diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b444581e62d0..3570d9fcf2d6 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -9,3 +9,4 @@ updates: directory: "/" # Location of package manifests schedule: interval: "daily" + open-pull-requests-limit: 0 # Disable non-security version updates diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3b4d4001147b..cef348721ffb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,7 +2,7 @@ name: Tests on: pull_request: - branches: [chain4travel, dev] + branches: [chain4travel, dev, evlekht/new-dev-c19] workflow_dispatch: merge_group: types: [checks_requested] @@ -69,6 +69,13 @@ jobs: - name: Run e2e tests shell: bash run: E2E_SERIAL=1 ./scripts/tests.e2e.sh + - name: Upload tmpnet network dir + uses: actions/upload-artifact@v4 + if: always() + with: + name: e2e-tmpnet-data + path: ${{ env.tmpnet_data_path }} + if-no-files-found: error e2e_existing_network: runs-on: ubuntu-latest steps: @@ -84,7 +91,14 @@ jobs: run: ./scripts/build.sh -r - name: Run e2e tests with existing network shell: bash - run: E2E_SERIAL=1 ./scripts/tests.e2e.persistent.sh + run: E2E_SERIAL=1 ./scripts/tests.e2e.existing.sh + - name: Upload tmpnet network dir + uses: actions/upload-artifact@v4 + if: always() + with: + name: e2e-existing-network-tmpnet-data + path: ${{ env.tmpnet_data_path }} + if-no-files-found: error Upgrade: runs-on: ubuntu-latest steps: @@ -99,6 +113,13 @@ jobs: - name: Run e2e tests shell: bash run: ./scripts/tests.upgrade.sh + - name: Upload tmpnet network dir + uses: actions/upload-artifact@v4 + if: always() + with: + name: upgrade-tmpnet-data + path: ${{ env.tmpnet_data_path }} + if-no-files-found: error Lint: runs-on: ubuntu-latest steps: diff --git a/.github/workflows/cnr-build-test-release.yml b/.github/workflows/cnr-build-test-release.yml deleted file mode 100644 index 92efd7aad941..000000000000 --- a/.github/workflows/cnr-build-test-release.yml +++ /dev/null @@ -1,81 +0,0 @@ -name: CNR - Build + test + release - -on: - push: - branches: - - chain4travel - - dev - pull_request: - branches: - - chain4travel - - dev - -permissions: - contents: write - -jobs: - lint_test: - name: Lint tests - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v3 - - name: update dependencies - run: git submodule update --init --recursive - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: '1.19' - - name: Run static analysis tests - working-directory: ./tools/camino-network-runner - shell: bash - run: scripts/lint.sh - - unit_test: - name: Unit tests - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: update dependencies - run: git submodule update --init --recursive - - uses: actions/setup-go@v3 - with: - go-version: '1.19' - - - name: run unit tests - run: go test -v -timeout 10m -race ./... - working-directory: ./tools/camino-network-runner - e2e_test: - name: e2e tests - runs-on: ubuntu-latest - steps: - - name: Git checkout - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - name: update dependencies - run: git submodule update --init --recursive - - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: '1.20.10' - - - name: build camino node - shell: bash - run: scripts/build.sh - - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: '1.19' - - - name: build cnr - working-directory: ./tools/camino-network-runner - shell: bash - run: scripts/build.sh - - - name: Run cnr e2e tests - working-directory: ./tools/camino-network-runner - shell: bash - run: scripts/tests.e2e.sh ${{ github.workspace }}/build/caminogo \ No newline at end of file diff --git a/.github/workflows/cnr-codeql-analysis.yml b/.github/workflows/cnr-codeql-analysis.yml deleted file mode 100644 index cc73c30ac181..000000000000 --- a/.github/workflows/cnr-codeql-analysis.yml +++ /dev/null @@ -1,78 +0,0 @@ -# For most projects, this workflow file will not need changing; you simply need -# to commit it to your repository. -# -# You may wish to alter this file to override the set of languages analyzed, -# or to provide custom queries or build logic. -# -# ******** NOTE ******** -# We have attempted to detect the languages in your repository. Please check -# the `language` matrix defined below to confirm you have the correct set of -# supported CodeQL languages. -# -name: "CNR-CodeQL" - -on: - push: - branches: [ chain4travel, dev ] - pull_request: - # The branches below must be a subset of the branches above - branches: [ chain4travel, dev ] - schedule: - - cron: '44 11 * * 4' - -jobs: - analyze: - name: Analyze - runs-on: ubuntu-latest - permissions: - actions: read - contents: read - security-events: write - - strategy: - fail-fast: false - matrix: - language: [ 'go' ] - # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] - # Learn more about CodeQL language support at https://git.io/codeql-language-support - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: update dependencies - run: git submodule update --init --recursive - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v2 - with: - working-directory: ./tools/camino-network-runner - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - queries: security-extended - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v2 - with: - working-directory: ./tools/camino-network-runner - - # ℹī¸ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl - - # ✏ī¸ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language - - #- run: | - # make bootstrap - # make release - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 - with: - working-directory: ./tools/camino-network-runner diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index 8a98cefb657d..6db622e3acdb 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -19,8 +19,8 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - name: Run fuzz tests shell: bash - run: ./scripts/build_fuzz.sh 30 # Run each fuzz test 30 seconds + run: ./scripts/build_fuzz.sh 180 # Run each fuzz test 180 seconds diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index d4badeaebdb0..000000000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "tools/camino-network-runner"] - path = tools/camino-network-runner - url = ../camino-network-runner.git diff --git a/.golangci.yml b/.golangci.yml index ee5af99af536..fad97cb63712 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -19,8 +19,6 @@ run: # Allowed values: readonly|vendor|mod # By default, it isn't set. modules-download-mode: readonly - skip-dirs: - - "tools/camino-network-runner" output: # Make issues output unique by line. @@ -69,6 +67,7 @@ linters: - nolintlint - perfsprint - prealloc + - predeclared - revive - staticcheck - stylecheck diff --git a/Dockerfile b/Dockerfile index bcd0ea32e859..a2301587dea1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ # README.md # go.mod # ============= Compilation Stage ================ -FROM golang:1.20.10-bullseye AS builder +FROM golang:1.20.12-bullseye AS builder WORKDIR /build # Copy and download caminogo dependencies using go mod diff --git a/LICENSE b/LICENSE index c9be72c59aa5..6178f77a85af 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ BSD 3-Clause License -Copyright (C) 2019-2023, Ava Labs, Inc. +Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/README.md b/README.md index 019c1bcbc5c3..341c51b14111 100644 --- a/README.md +++ b/README.md @@ -15,13 +15,14 @@ The minimum recommended hardware specification for nodes connected to Mainnet is - CPU: Equivalent of 8 AWS vCPU - RAM: 16 GiB -- Storage: 512 GiB +- Storage: 1 TiB + - Nodes running for very long periods of time or nodes with custom configurations may observe higher storage requirements. - OS: Ubuntu 20.04/22.04 or macOS >= 12 - Network: Reliable IPv4 or IPv6 network connection, with an open public port. If you plan to build Camino-Node from source, you will also need the following software: -- [Go](https://golang.org/doc/install) version >= 1.20.10 +- [Go](https://golang.org/doc/install) version >= 1.20.12 - [gcc](https://gcc.gnu.org/) - g++ diff --git a/api/admin/client.go b/api/admin/client.go index 6c3d158439cd..4e43eea6f893 100644 --- a/api/admin/client.go +++ b/api/admin/client.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package admin @@ -17,7 +17,9 @@ import ( "context" "github.com/ava-labs/avalanchego/api" + "github.com/ava-labs/avalanchego/database/rpcdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/rpc" ) @@ -35,9 +37,10 @@ type Client interface { GetChainAliases(ctx context.Context, chainID string, options ...rpc.Option) ([]string, error) Stacktrace(context.Context, ...rpc.Option) error LoadVMs(context.Context, ...rpc.Option) (map[ids.ID][]string, map[ids.ID]string, error) - SetLoggerLevel(ctx context.Context, loggerName, logLevel, displayLevel string, options ...rpc.Option) error + SetLoggerLevel(ctx context.Context, loggerName, logLevel, displayLevel string, options ...rpc.Option) (map[string]LogAndDisplayLevels, error) GetLoggerLevel(ctx context.Context, loggerName string, options ...rpc.Option) (map[string]LogAndDisplayLevels, error) GetConfig(ctx context.Context, options ...rpc.Option) (interface{}, error) + DBGet(ctx context.Context, key []byte, options ...rpc.Option) ([]byte, error) GetNodeSigner(ctx context.Context, _ string, options ...rpc.Option) (*GetNodeSignerReply, error) } @@ -111,7 +114,7 @@ func (c *client) SetLoggerLevel( logLevel, displayLevel string, options ...rpc.Option, -) error { +) (map[string]LogAndDisplayLevels, error) { var ( logLevelArg logging.Level displayLevelArg logging.Level @@ -120,21 +123,23 @@ func (c *client) SetLoggerLevel( if len(logLevel) > 0 { logLevelArg, err = logging.ToLevel(logLevel) if err != nil { - return err + return nil, err } } if len(displayLevel) > 0 { displayLevelArg, err = logging.ToLevel(displayLevel) if err != nil { - return err + return nil, err } } - return c.requester.SendRequest(ctx, "admin.setLoggerLevel", &SetLoggerLevelArgs{ + res := &LoggerLevelReply{} + err = c.requester.SendRequest(ctx, "admin.setLoggerLevel", &SetLoggerLevelArgs{ Secret: Secret{c.secret}, LoggerName: loggerName, LogLevel: &logLevelArg, DisplayLevel: &displayLevelArg, - }, &api.EmptyReply{}, options...) + }, res, options...) + return res.LoggerLevels, err } func (c *client) GetLoggerLevel( @@ -142,7 +147,7 @@ func (c *client) GetLoggerLevel( loggerName string, options ...rpc.Option, ) (map[string]LogAndDisplayLevels, error) { - res := &GetLoggerLevelReply{} + res := &LoggerLevelReply{} err := c.requester.SendRequest(ctx, "admin.getLoggerLevel", &GetLoggerLevelArgs{ Secret: Secret{c.secret}, LoggerName: loggerName, @@ -161,3 +166,23 @@ func (c *client) GetNodeSigner(ctx context.Context, _ string, options ...rpc.Opt err := c.requester.SendRequest(ctx, "getNodeSigner", Secret{c.secret}, res, options...) return res, err } + +func (c *client) DBGet(ctx context.Context, key []byte, options ...rpc.Option) ([]byte, error) { + keyStr, err := formatting.Encode(formatting.HexNC, key) + if err != nil { + return nil, err + } + + res := &DBGetReply{} + err = c.requester.SendRequest(ctx, "admin.dbGet", &DBGetArgs{ + Key: keyStr, + }, res, options...) + if err != nil { + return nil, err + } + + if err := rpcdb.ErrEnumToError[res.ErrorCode]; err != nil { + return nil, err + } + return formatting.Decode(formatting.HexNC, res.Value) +} diff --git a/api/admin/client_test.go b/api/admin/client_test.go index 4302bd5350fa..ed352e1b2b35 100644 --- a/api/admin/client_test.go +++ b/api/admin/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package admin @@ -16,24 +16,23 @@ import ( "github.com/ava-labs/avalanchego/utils/rpc" ) -var errTest = errors.New("non-nil error") +var ( + errTest = errors.New("non-nil error") -// SuccessResponseTest defines the expected result of an API call that returns SuccessResponse -type SuccessResponseTest struct { - Err error -} - -// GetSuccessResponseTests returns a list of possible SuccessResponseTests -func GetSuccessResponseTests() []SuccessResponseTest { - return []SuccessResponseTest{ + SuccessResponseTests = []struct { + name string + expectedErr error + }{ { - Err: nil, + name: "no error", + expectedErr: nil, }, { - Err: errTest, + name: "error", + expectedErr: errTest, }, } -} +) type mockClient struct { response interface{} @@ -63,8 +62,8 @@ func (mc *mockClient) SendRequest(_ context.Context, _ string, _ interface{}, re case *LoadVMsReply: response := mc.response.(*LoadVMsReply) *p = *response - case *GetLoggerLevelReply: - response := mc.response.(*GetLoggerLevelReply) + case *LoggerLevelReply: + response := mc.response.(*LoggerLevelReply) *p = *response case *interface{}: response := mc.response.(*interface{}) @@ -76,74 +75,62 @@ func (mc *mockClient) SendRequest(_ context.Context, _ string, _ interface{}, re } func TestStartCPUProfiler(t *testing.T) { - require := require.New(t) - - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.StartCPUProfiler(context.Background()) - require.ErrorIs(err, test.Err) + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.StartCPUProfiler(context.Background()) + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestStopCPUProfiler(t *testing.T) { - require := require.New(t) - - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.StopCPUProfiler(context.Background()) - require.ErrorIs(err, test.Err) + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.StopCPUProfiler(context.Background()) + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestMemoryProfile(t *testing.T) { - require := require.New(t) - - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.MemoryProfile(context.Background()) - require.ErrorIs(err, test.Err) + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.MemoryProfile(context.Background()) + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestLockProfile(t *testing.T) { - require := require.New(t) - - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.LockProfile(context.Background()) - require.ErrorIs(err, test.Err) + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.LockProfile(context.Background()) + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestAlias(t *testing.T) { - require := require.New(t) - - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.Alias(context.Background(), "alias", "alias2") - require.ErrorIs(err, test.Err) + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.Alias(context.Background(), "alias", "alias2") + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestAliasChain(t *testing.T) { - require := require.New(t) - - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.AliasChain(context.Background(), "chain", "chain-alias") - require.ErrorIs(err, test.Err) + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.AliasChain(context.Background(), "chain", "chain-alias") + require.ErrorIs(t, err, test.expectedErr) + }) } } @@ -169,14 +156,12 @@ func TestGetChainAliases(t *testing.T) { } func TestStacktrace(t *testing.T) { - require := require.New(t) - - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.Stacktrace(context.Background()) - require.ErrorIs(err, test.Err) + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.Stacktrace(context.Background()) + require.ErrorIs(t, err, test.expectedErr) + }) } } @@ -212,54 +197,72 @@ func TestReloadInstalledVMs(t *testing.T) { func TestSetLoggerLevel(t *testing.T) { type test struct { - name string - logLevel string - displayLevel string - serviceErr error - clientErr error + name string + logLevel string + displayLevel string + serviceResponse map[string]LogAndDisplayLevels + serviceErr error + clientErr error } tests := []test{ { name: "Happy path", logLevel: "INFO", displayLevel: "INFO", - serviceErr: nil, - clientErr: nil, + serviceResponse: map[string]LogAndDisplayLevels{ + "Happy path": {LogLevel: logging.Info, DisplayLevel: logging.Info}, + }, + serviceErr: nil, + clientErr: nil, }, { - name: "Service errors", - logLevel: "INFO", - displayLevel: "INFO", - serviceErr: errTest, - clientErr: errTest, + name: "Service errors", + logLevel: "INFO", + displayLevel: "INFO", + serviceResponse: nil, + serviceErr: errTest, + clientErr: errTest, }, { - name: "Invalid log level", - logLevel: "invalid", - displayLevel: "INFO", - serviceErr: nil, - clientErr: logging.ErrUnknownLevel, + name: "Invalid log level", + logLevel: "invalid", + displayLevel: "INFO", + serviceResponse: nil, + serviceErr: nil, + clientErr: logging.ErrUnknownLevel, }, { - name: "Invalid display level", - logLevel: "INFO", - displayLevel: "invalid", - serviceErr: nil, - clientErr: logging.ErrUnknownLevel, + name: "Invalid display level", + logLevel: "INFO", + displayLevel: "invalid", + serviceResponse: nil, + serviceErr: nil, + clientErr: logging.ErrUnknownLevel, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + c := client{ - requester: NewMockClient(&api.EmptyReply{}, tt.serviceErr), + requester: NewMockClient( + &LoggerLevelReply{ + LoggerLevels: tt.serviceResponse, + }, + tt.serviceErr, + ), } - err := c.SetLoggerLevel( + res, err := c.SetLoggerLevel( context.Background(), "", tt.logLevel, tt.displayLevel, ) - require.ErrorIs(t, err, tt.clientErr) + require.ErrorIs(err, tt.clientErr) + if tt.clientErr != nil { + return + } + require.Equal(tt.serviceResponse, res) }) } } @@ -296,7 +299,7 @@ func TestGetLoggerLevel(t *testing.T) { c := client{ requester: NewMockClient( - &GetLoggerLevelReply{ + &LoggerLevelReply{ LoggerLevels: tt.serviceResponse, }, tt.serviceErr, diff --git a/api/admin/key_value_reader.go b/api/admin/key_value_reader.go new file mode 100644 index 000000000000..bfc7b2cced06 --- /dev/null +++ b/api/admin/key_value_reader.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package admin + +import ( + "context" + + "github.com/ava-labs/avalanchego/database" +) + +var _ database.KeyValueReader = (*KeyValueReader)(nil) + +type KeyValueReader struct { + client Client +} + +func NewKeyValueReader(client Client) *KeyValueReader { + return &KeyValueReader{ + client: client, + } +} + +func (r *KeyValueReader) Has(key []byte) (bool, error) { + _, err := r.client.DBGet(context.Background(), key) + if err == database.ErrNotFound { + return false, nil + } + return err == nil, err +} + +func (r *KeyValueReader) Get(key []byte) ([]byte, error) { + return r.client.DBGet(context.Background(), key) +} diff --git a/api/admin/service.go b/api/admin/service.go index 24c177cee7b9..09628e53ed8c 100644 --- a/api/admin/service.go +++ b/api/admin/service.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package admin @@ -27,11 +27,14 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/api/server" "github.com/ava-labs/avalanchego/chains" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/rpcdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/cb58" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" @@ -39,6 +42,8 @@ import ( "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/registry" + + rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" ) const ( @@ -59,6 +64,7 @@ type Config struct { ProfileDir string LogFactory logging.Factory NodeConfig interface{} + DB database.Database ChainManager chains.Manager HTTPServer server.PathAdderWithReadLock VMRegistry registry.VMRegistry @@ -265,7 +271,6 @@ func (a *Admin) Stacktrace(_ *http.Request, args *Secret, _ *api.EmptyReply) err return perms.WriteFile(stacktraceFile, stacktrace, perms.ReadWrite) } -// See SetLoggerLevel type SetLoggerLevelArgs struct { Secret LoggerName string `json:"loggerName"` @@ -273,6 +278,15 @@ type SetLoggerLevelArgs struct { DisplayLevel *logging.Level `json:"displayLevel"` } +type LogAndDisplayLevels struct { + LogLevel logging.Level `json:"logLevel"` + DisplayLevel logging.Level `json:"displayLevel"` +} + +type LoggerLevelReply struct { + LoggerLevels map[string]LogAndDisplayLevels `json:"loggerLevels"` +} + // SetLoggerLevel sets the log level and/or display level for loggers. // If len([args.LoggerName]) == 0, sets the log/display level of all loggers. // Otherwise, sets the log/display level of the loggers named in that argument. @@ -282,7 +296,7 @@ type SetLoggerLevelArgs struct { // Sets the display level of these loggers to args.LogLevel. // If args.DisplayLevel == nil, doesn't set the display level of these loggers. // If args.DisplayLevel != nil, must be a valid string representation of a log level. -func (a *Admin) SetLoggerLevel(_ *http.Request, args *SetLoggerLevelArgs, _ *api.EmptyReply) error { +func (a *Admin) SetLoggerLevel(_ *http.Request, args *SetLoggerLevelArgs, reply *LoggerLevelReply) error { a.Log.Debug("API called", zap.String("service", "admin"), zap.String("method", "setLoggerLevel"), @@ -298,14 +312,7 @@ func (a *Admin) SetLoggerLevel(_ *http.Request, args *SetLoggerLevelArgs, _ *api a.lock.Lock() defer a.lock.Unlock() - var loggerNames []string - if len(args.LoggerName) > 0 { - loggerNames = []string{args.LoggerName} - } else { - // Empty name means all loggers - loggerNames = a.LogFactory.GetLoggerNames() - } - + loggerNames := a.getLoggerNames(args.LoggerName) for _, name := range loggerNames { if args.LogLevel != nil { if err := a.LogFactory.SetLogLevel(name, *args.LogLevel); err != nil { @@ -318,27 +325,19 @@ func (a *Admin) SetLoggerLevel(_ *http.Request, args *SetLoggerLevelArgs, _ *api } } } - return nil -} -type LogAndDisplayLevels struct { - LogLevel logging.Level `json:"logLevel"` - DisplayLevel logging.Level `json:"displayLevel"` + var err error + reply.LoggerLevels, err = a.getLogLevels(loggerNames) + return err } -// See GetLoggerLevel type GetLoggerLevelArgs struct { Secret LoggerName string `json:"loggerName"` } -// See GetLoggerLevel -type GetLoggerLevelReply struct { - LoggerLevels map[string]LogAndDisplayLevels `json:"loggerLevels"` -} - // GetLogLevel returns the log level and display level of all loggers. -func (a *Admin) GetLoggerLevel(_ *http.Request, args *GetLoggerLevelArgs, reply *GetLoggerLevelReply) error { +func (a *Admin) GetLoggerLevel(_ *http.Request, args *GetLoggerLevelArgs, reply *LoggerLevelReply) error { a.Log.Debug("API called", zap.String("service", "admin"), zap.String("method", "getLoggerLevels"), @@ -348,30 +347,11 @@ func (a *Admin) GetLoggerLevel(_ *http.Request, args *GetLoggerLevelArgs, reply a.lock.RLock() defer a.lock.RUnlock() - reply.LoggerLevels = make(map[string]LogAndDisplayLevels) - var loggerNames []string - // Empty name means all loggers - if len(args.LoggerName) > 0 { - loggerNames = []string{args.LoggerName} - } else { - loggerNames = a.LogFactory.GetLoggerNames() - } + loggerNames := a.getLoggerNames(args.LoggerName) - for _, name := range loggerNames { - logLevel, err := a.LogFactory.GetLogLevel(name) - if err != nil { - return err - } - displayLevel, err := a.LogFactory.GetDisplayLevel(name) - if err != nil { - return err - } - reply.LoggerLevels[name] = LogAndDisplayLevels{ - LogLevel: logLevel, - DisplayLevel: displayLevel, - } - } - return nil + var err error + reply.LoggerLevels, err = a.getLogLevels(loggerNames) + return err } // GetConfig returns the config that the node was started with. @@ -403,7 +383,7 @@ func (a *Admin) LoadVMs(r *http.Request, args *Secret, reply *LoadVMsReply) erro defer a.lock.Unlock() ctx := r.Context() - loadedVMs, failedVMs, err := a.VMRegistry.ReloadWithReadLock(ctx) + loadedVMs, failedVMs, err := a.VMRegistry.Reload(ctx) if err != nil { return err } @@ -419,6 +399,65 @@ func (a *Admin) LoadVMs(r *http.Request, args *Secret, reply *LoadVMsReply) erro return err } +func (a *Admin) getLoggerNames(loggerName string) []string { + if len(loggerName) == 0 { + // Empty name means all loggers + return a.LogFactory.GetLoggerNames() + } + return []string{loggerName} +} + +func (a *Admin) getLogLevels(loggerNames []string) (map[string]LogAndDisplayLevels, error) { + loggerLevels := make(map[string]LogAndDisplayLevels) + for _, name := range loggerNames { + logLevel, err := a.LogFactory.GetLogLevel(name) + if err != nil { + return nil, err + } + displayLevel, err := a.LogFactory.GetDisplayLevel(name) + if err != nil { + return nil, err + } + loggerLevels[name] = LogAndDisplayLevels{ + LogLevel: logLevel, + DisplayLevel: displayLevel, + } + } + return loggerLevels, nil +} + +type DBGetArgs struct { + Key string `json:"key"` +} + +type DBGetReply struct { + Value string `json:"value"` + ErrorCode rpcdbpb.Error `json:"errorCode"` +} + +//nolint:stylecheck // renaming this method to DBGet would change the API method from "dbGet" to "dBGet" +func (a *Admin) DbGet(_ *http.Request, args *DBGetArgs, reply *DBGetReply) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "dbGet"), + logging.UserString("key", args.Key), + ) + + key, err := formatting.Decode(formatting.HexNC, args.Key) + if err != nil { + return err + } + + value, err := a.DB.Get(key) + if err != nil { + reply.ErrorCode = rpcdb.ErrorToErrEnum[err] + return rpcdb.ErrorToRPCError(err) + } + + reply.Value, err = formatting.Encode(formatting.HexNC, value) + return err +} + // See GetNodeSigner type GetNodeSignerReply struct { PrivateKey string `json:"privateKey"` diff --git a/api/admin/service_test.go b/api/admin/service_test.go index 09665a52c9d9..a1309a213f60 100644 --- a/api/admin/service_test.go +++ b/api/admin/service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package admin @@ -11,16 +11,19 @@ import ( "go.uber.org/mock/gomock" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/registry" + + rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" ) type loadVMsTest struct { admin *Admin ctrl *gomock.Controller - mockLog *logging.MockLogger mockVMManager *vms.MockManager mockVMRegistry *registry.MockVMRegistry } @@ -28,18 +31,16 @@ type loadVMsTest struct { func initLoadVMsTest(t *testing.T) *loadVMsTest { ctrl := gomock.NewController(t) - mockLog := logging.NewMockLogger(ctrl) mockVMRegistry := registry.NewMockVMRegistry(ctrl) mockVMManager := vms.NewMockManager(ctrl) return &loadVMsTest{ admin: &Admin{Config: Config{ - Log: mockLog, + Log: logging.NoLog{}, VMRegistry: mockVMRegistry, VMManager: mockVMManager, }}, ctrl: ctrl, - mockLog: mockLog, mockVMManager: mockVMManager, mockVMRegistry: mockVMRegistry, } @@ -67,8 +68,7 @@ func TestLoadVMsSuccess(t *testing.T) { id2: alias2[1:], } - resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) - resources.mockVMRegistry.EXPECT().ReloadWithReadLock(gomock.Any()).Times(1).Return(newVMs, failedVMs, nil) + resources.mockVMRegistry.EXPECT().Reload(gomock.Any()).Times(1).Return(newVMs, failedVMs, nil) resources.mockVMManager.EXPECT().Aliases(id1).Times(1).Return(alias1, nil) resources.mockVMManager.EXPECT().Aliases(id2).Times(1).Return(alias2, nil) @@ -84,9 +84,8 @@ func TestLoadVMsReloadFails(t *testing.T) { resources := initLoadVMsTest(t) - resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) // Reload fails - resources.mockVMRegistry.EXPECT().ReloadWithReadLock(gomock.Any()).Times(1).Return(nil, nil, errTest) + resources.mockVMRegistry.EXPECT().Reload(gomock.Any()).Times(1).Return(nil, nil, errTest) reply := LoadVMsReply{} err := resources.admin.LoadVMs(&http.Request{}, nil, &reply) @@ -108,8 +107,7 @@ func TestLoadVMsGetAliasesFails(t *testing.T) { // every vm is at least aliased to itself. alias1 := []string{id1.String(), "vm1-alias-1", "vm1-alias-2"} - resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) - resources.mockVMRegistry.EXPECT().ReloadWithReadLock(gomock.Any()).Times(1).Return(newVMs, failedVMs, nil) + resources.mockVMRegistry.EXPECT().Reload(gomock.Any()).Times(1).Return(newVMs, failedVMs, nil) resources.mockVMManager.EXPECT().Aliases(id1).Times(1).Return(alias1, nil) resources.mockVMManager.EXPECT().Aliases(id2).Times(1).Return(nil, errTest) @@ -117,3 +115,56 @@ func TestLoadVMsGetAliasesFails(t *testing.T) { err := resources.admin.LoadVMs(&http.Request{}, nil, &reply) require.ErrorIs(err, errTest) } + +func TestServiceDBGet(t *testing.T) { + a := &Admin{Config: Config{ + Log: logging.NoLog{}, + DB: memdb.New(), + }} + + helloBytes := []byte("hello") + helloHex, err := formatting.Encode(formatting.HexNC, helloBytes) + require.NoError(t, err) + + worldBytes := []byte("world") + worldHex, err := formatting.Encode(formatting.HexNC, worldBytes) + require.NoError(t, err) + + require.NoError(t, a.DB.Put(helloBytes, worldBytes)) + + tests := []struct { + name string + key string + expectedValue string + expectedErrorCode rpcdbpb.Error + }{ + { + name: "key exists", + key: helloHex, + expectedValue: worldHex, + expectedErrorCode: rpcdbpb.Error_ERROR_UNSPECIFIED, + }, + { + name: "key doesn't exist", + key: "", + expectedValue: "", + expectedErrorCode: rpcdbpb.Error_ERROR_NOT_FOUND, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + reply := &DBGetReply{} + require.NoError(a.DbGet( + nil, + &DBGetArgs{ + Key: test.key, + }, + reply, + )) + require.Equal(test.expectedValue, reply.Value) + require.Equal(test.expectedErrorCode, reply.ErrorCode) + }) + } +} diff --git a/api/auth/auth.go b/api/auth/auth.go index 0f78192cb111..f01b1d2fcd73 100644 --- a/api/auth/auth.go +++ b/api/auth/auth.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package auth @@ -41,7 +41,7 @@ const ( var ( errNoToken = errors.New("auth token not provided") errAuthHeaderNotParsable = fmt.Errorf( - "couldn't parse auth token. Header \"%s\" should be \"%sTOKEN.GOES.HERE\"", + `couldn't parse auth token. Header "%s" should be "%sTOKEN.GOES.HERE"`, headerKey, headerValStart, ) diff --git a/api/auth/auth_test.go b/api/auth/auth_test.go index d8b7a4cca59b..caf921ca2a26 100644 --- a/api/auth/auth_test.go +++ b/api/auth/auth_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package auth diff --git a/api/auth/claims.go b/api/auth/claims.go index e2bf55d3078b..1cdda3d4a224 100644 --- a/api/auth/claims.go +++ b/api/auth/claims.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package auth diff --git a/api/auth/response.go b/api/auth/response.go index e87065c71501..eca4b39da9b8 100644 --- a/api/auth/response.go +++ b/api/auth/response.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package auth diff --git a/api/auth/service.go b/api/auth/service.go index 77517c174a5c..badb544c5ccb 100644 --- a/api/auth/service.go +++ b/api/auth/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package auth diff --git a/api/common_args_responses.go b/api/common_args_responses.go index 90ba898dc661..a6402ef1488b 100644 --- a/api/common_args_responses.go +++ b/api/common_args_responses.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package api diff --git a/api/health/checker.go b/api/health/checker.go index efc895177ed3..b30e450660b8 100644 --- a/api/health/checker.go +++ b/api/health/checker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/api/health/client.go b/api/health/client.go index 7c615757f0ce..59daa555cdda 100644 --- a/api/health/client.go +++ b/api/health/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/api/health/client_test.go b/api/health/client_test.go index e42be5dbe852..e019829e68e4 100644 --- a/api/health/client_test.go +++ b/api/health/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/api/health/handler.go b/api/health/handler.go index a8bd8269a158..a95c66a322c0 100644 --- a/api/health/handler.go +++ b/api/health/handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/api/health/health.go b/api/health/health.go index 14f1bb7b4b4a..80012cf8c02e 100644 --- a/api/health/health.go +++ b/api/health/health.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/api/health/health_test.go b/api/health/health_test.go index 432cefdf6194..64661c710929 100644 --- a/api/health/health_test.go +++ b/api/health/health_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/api/health/metrics.go b/api/health/metrics.go index d567bc483d71..fdb7b2ed813b 100644 --- a/api/health/metrics.go +++ b/api/health/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/api/health/result.go b/api/health/result.go index df9edb3419cc..e243cba1466d 100644 --- a/api/health/result.go +++ b/api/health/result.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/api/health/service.go b/api/health/service.go index 368d986c52bb..7b48507075b2 100644 --- a/api/health/service.go +++ b/api/health/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/api/health/service_test.go b/api/health/service_test.go index 0e60d467000a..b25e6dccc017 100644 --- a/api/health/service_test.go +++ b/api/health/service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/api/health/worker.go b/api/health/worker.go index f0e7a71ed13d..e42e77a4d52c 100644 --- a/api/health/worker.go +++ b/api/health/worker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/api/info/camino_service_test.go b/api/info/camino_service_test.go index e0715ddd856f..5706f2f759bf 100644 --- a/api/info/camino_service_test.go +++ b/api/info/camino_service_test.go @@ -7,19 +7,13 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/utils/logging" ) func TestGetGenesisBytes(t *testing.T) { - mockLog := logging.NewMockLogger(gomock.NewController(t)) - service := Info{log: mockLog} - - mockLog.EXPECT().Debug(gomock.Any()).Times(1) - + service := Info{log: logging.NoLog{}} service.GenesisBytes = []byte("some random bytes") - reply := GetGenesisBytesReply{} require.NoError(t, service.GetGenesisBytes(nil, nil, &reply)) require.Equal(t, GetGenesisBytesReply{GenesisBytes: service.GenesisBytes}, reply) diff --git a/api/info/client.go b/api/info/client.go index e77441373cc9..1fecb7580404 100644 --- a/api/info/client.go +++ b/api/info/client.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package info diff --git a/api/info/client_test.go b/api/info/client_test.go index 292a1841bd3c..7923ff94aff8 100644 --- a/api/info/client_test.go +++ b/api/info/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package info diff --git a/api/info/service.go b/api/info/service.go index 61d8a60760af..75acb3ed1a1c 100644 --- a/api/info/service.go +++ b/api/info/service.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package info @@ -27,13 +27,18 @@ import ( "github.com/ava-labs/avalanchego/network" "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/snow/networking/benchlist" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/platformvm/signer" + "github.com/ava-labs/avalanchego/vms/propertyfx" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) var errNoChainProvided = errors.New("argument 'chain' not given") @@ -42,6 +47,7 @@ var errNoChainProvided = errors.New("argument 'chain' not given") type Info struct { Parameters log logging.Logger + validators validators.Manager myIP ips.DynamicIPPort networking network.Network chainManager chains.Manager @@ -72,6 +78,7 @@ type Parameters struct { func NewService( parameters Parameters, log logging.Logger, + validators validators.Manager, chainManager chains.Manager, vmManager vms.Manager, myIP ips.DynamicIPPort, @@ -86,6 +93,7 @@ func NewService( &Info{ Parameters: parameters, log: log, + validators: validators, chainManager: chainManager, vmManager: vmManager, myIP: myIP, @@ -223,7 +231,7 @@ type PeersArgs struct { type Peer struct { peer.Info - Benched []ids.ID `json:"benched"` + Benched []string `json:"benched"` } // PeersReply are the results from calling Peers @@ -244,9 +252,18 @@ func (i *Info) Peers(_ *http.Request, args *PeersArgs, reply *PeersReply) error peers := i.networking.PeerInfo(args.NodeIDs) peerInfo := make([]Peer, len(peers)) for index, peer := range peers { + benchedIDs := i.benchlist.GetBenched(peer.ID) + benchedAliases := make([]string, len(benchedIDs)) + for idx, id := range benchedIDs { + alias, err := i.chainManager.PrimaryAlias(id) + if err != nil { + return fmt.Errorf("failed to get primary alias for chain ID %s: %w", id, err) + } + benchedAliases[idx] = alias + } peerInfo[index] = Peer{ Info: peer, - Benched: i.benchlist.GetBenched(peer.ID), + Benched: benchedAliases, } } @@ -325,6 +342,64 @@ func (i *Info) Uptime(_ *http.Request, args *UptimeRequest, reply *UptimeRespons return nil } +type ACP struct { + SupportWeight json.Uint64 `json:"supportWeight"` + Supporters set.Set[ids.NodeID] `json:"supporters"` + ObjectWeight json.Uint64 `json:"objectWeight"` + Objectors set.Set[ids.NodeID] `json:"objectors"` + AbstainWeight json.Uint64 `json:"abstainWeight"` +} + +type ACPsReply struct { + ACPs map[uint32]*ACP `json:"acps"` +} + +func (a *ACPsReply) getACP(acpNum uint32) *ACP { + acp, ok := a.ACPs[acpNum] + if !ok { + acp = &ACP{} + a.ACPs[acpNum] = acp + } + return acp +} + +func (i *Info) Acps(_ *http.Request, _ *struct{}, reply *ACPsReply) error { + i.log.Debug("API called", + zap.String("service", "info"), + zap.String("method", "acps"), + ) + + reply.ACPs = make(map[uint32]*ACP, constants.CurrentACPs.Len()) + peers := i.networking.PeerInfo(nil) + for _, peer := range peers { + weight := json.Uint64(i.validators.GetWeight(constants.PrimaryNetworkID, peer.ID)) + if weight == 0 { + continue + } + + for acpNum := range peer.SupportedACPs { + acp := reply.getACP(acpNum) + acp.Supporters.Add(peer.ID) + acp.SupportWeight += weight + } + for acpNum := range peer.ObjectedACPs { + acp := reply.getACP(acpNum) + acp.Objectors.Add(peer.ID) + acp.ObjectWeight += weight + } + } + + totalWeight, err := i.validators.TotalWeight(constants.PrimaryNetworkID) + if err != nil { + return err + } + for acpNum := range constants.CurrentACPs { + acp := reply.getACP(acpNum) + acp.AbstainWeight = json.Uint64(totalWeight) - acp.SupportWeight - acp.ObjectWeight + } + return nil +} + type GetTxFeeResponse struct { TxFee json.Uint64 `json:"txFee"` CreateAssetTxFee json.Uint64 `json:"createAssetTxFee"` @@ -359,6 +434,7 @@ func (i *Info) GetTxFee(_ *http.Request, _ *struct{}, reply *GetTxFeeResponse) e // GetVMsReply contains the response metadata for GetVMs type GetVMsReply struct { VMs map[ids.ID][]string `json:"vms"` + Fxs map[ids.ID]string `json:"fxs"` } // GetVMs lists the virtual machines installed on the node @@ -375,5 +451,10 @@ func (i *Info) GetVMs(_ *http.Request, _ *struct{}, reply *GetVMsReply) error { } reply.VMs, err = ids.GetRelevantAliases(i.VMManager, vmIDs) + reply.Fxs = map[ids.ID]string{ + secp256k1fx.ID: secp256k1fx.Name, + nftfx.ID: nftfx.Name, + propertyfx.ID: propertyfx.Name, + } return err } diff --git a/api/info/service_test.go b/api/info/service_test.go index 312d8182ea83..b91f87354d1d 100644 --- a/api/info/service_test.go +++ b/api/info/service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package info @@ -21,24 +21,20 @@ var errTest = errors.New("non-nil error") type getVMsTest struct { info *Info ctrl *gomock.Controller - mockLog *logging.MockLogger mockVMManager *vms.MockManager } func initGetVMsTest(t *testing.T) *getVMsTest { ctrl := gomock.NewController(t) - - service := Info{} - mockLog := logging.NewMockLogger(ctrl) mockVMManager := vms.NewMockManager(ctrl) - - service.log = mockLog - service.VMManager = mockVMManager - return &getVMsTest{ - info: &service, + info: &Info{ + Parameters: Parameters{ + VMManager: mockVMManager, + }, + log: logging.NoLog{}, + }, ctrl: ctrl, - mockLog: mockLog, mockVMManager: mockVMManager, } } @@ -62,7 +58,6 @@ func TestGetVMsSuccess(t *testing.T) { id2: alias2[1:], } - resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) resources.mockVMManager.EXPECT().ListFactories().Times(1).Return(vmIDs, nil) resources.mockVMManager.EXPECT().Aliases(id1).Times(1).Return(alias1, nil) resources.mockVMManager.EXPECT().Aliases(id2).Times(1).Return(alias2, nil) @@ -76,7 +71,6 @@ func TestGetVMsSuccess(t *testing.T) { func TestGetVMsVMsListFactoriesFails(t *testing.T) { resources := initGetVMsTest(t) - resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) resources.mockVMManager.EXPECT().ListFactories().Times(1).Return(nil, errTest) reply := GetVMsReply{} @@ -93,7 +87,6 @@ func TestGetVMsGetAliasesFails(t *testing.T) { vmIDs := []ids.ID{id1, id2} alias1 := []string{id1.String(), "vm1-alias-1", "vm1-alias-2"} - resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) resources.mockVMManager.EXPECT().ListFactories().Times(1).Return(vmIDs, nil) resources.mockVMManager.EXPECT().Aliases(id1).Times(1).Return(alias1, nil) resources.mockVMManager.EXPECT().Aliases(id2).Times(1).Return(nil, errTest) diff --git a/api/ipcs/client.go b/api/ipcs/client.go index 95391f0f4469..121c1855bc8f 100644 --- a/api/ipcs/client.go +++ b/api/ipcs/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ipcs diff --git a/api/ipcs/service.go b/api/ipcs/service.go index b9bb90479ce9..efe6f2e7280b 100644 --- a/api/ipcs/service.go +++ b/api/ipcs/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ipcs diff --git a/api/keystore/blockchain_keystore.go b/api/keystore/blockchain_keystore.go index 4c163b9627b7..31a3bdc59109 100644 --- a/api/keystore/blockchain_keystore.go +++ b/api/keystore/blockchain_keystore.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore diff --git a/api/keystore/client.go b/api/keystore/client.go index 43442ace79fe..9d12ea0d1df9 100644 --- a/api/keystore/client.go +++ b/api/keystore/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore diff --git a/api/keystore/codec.go b/api/keystore/codec.go index ebb196ccbfff..b925747c44ec 100644 --- a/api/keystore/codec.go +++ b/api/keystore/codec.go @@ -1,27 +1,28 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore import ( + "time" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/utils/units" ) const ( - maxPackerSize = 1 * units.GiB // max size, in bytes, of something being marshalled by Marshal() - maxSliceLength = linearcodec.DefaultMaxSliceLength + CodecVersion = 0 - codecVersion = 0 + maxPackerSize = 1 * units.GiB // max size, in bytes, of something being marshalled by Marshal() ) -var c codec.Manager +var Codec codec.Manager func init() { - lc := linearcodec.NewCustomMaxLength(maxSliceLength) - c = codec.NewManager(maxPackerSize) - if err := c.RegisterCodec(codecVersion, lc); err != nil { + lc := linearcodec.NewDefault(time.Time{}) + Codec = codec.NewManager(maxPackerSize) + if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { panic(err) } } diff --git a/api/keystore/gkeystore/keystore_client.go b/api/keystore/gkeystore/keystore_client.go index 6bbfc6f92c1e..87527a640412 100644 --- a/api/keystore/gkeystore/keystore_client.go +++ b/api/keystore/gkeystore/keystore_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gkeystore diff --git a/api/keystore/gkeystore/keystore_server.go b/api/keystore/gkeystore/keystore_server.go index 9244939de3b9..65e6e90e99d9 100644 --- a/api/keystore/gkeystore/keystore_server.go +++ b/api/keystore/gkeystore/keystore_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gkeystore diff --git a/api/keystore/keystore.go b/api/keystore/keystore.go index cd7f0b8a8f21..ed3c9d21e57e 100644 --- a/api/keystore/keystore.go +++ b/api/keystore/keystore.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore @@ -188,7 +188,7 @@ func (ks *keystore) CreateUser(username, pw string) error { return err } - passwordBytes, err := c.Marshal(codecVersion, passwordHash) + passwordBytes, err := Codec.Marshal(CodecVersion, passwordHash) if err != nil { return err } @@ -288,14 +288,14 @@ func (ks *keystore) ImportUser(username, pw string, userBytes []byte) error { } userData := user{} - if _, err := c.Unmarshal(userBytes, &userData); err != nil { + if _, err := Codec.Unmarshal(userBytes, &userData); err != nil { return err } if !userData.Hash.Check(pw) { return fmt.Errorf("%w: user %q", errIncorrectPassword, username) } - usrBytes, err := c.Marshal(codecVersion, &userData.Hash) + usrBytes, err := Codec.Marshal(CodecVersion, &userData.Hash) if err != nil { return err } @@ -355,7 +355,7 @@ func (ks *keystore) ExportUser(username, pw string) ([]byte, error) { } // Return the byte representation of the user - return c.Marshal(codecVersion, &userData) + return Codec.Marshal(CodecVersion, &userData) } func (ks *keystore) getPassword(username string) (*password.Hash, error) { @@ -377,6 +377,6 @@ func (ks *keystore) getPassword(username string) (*password.Hash, error) { } passwordHash = &password.Hash{} - _, err = c.Unmarshal(userBytes, passwordHash) + _, err = Codec.Unmarshal(userBytes, passwordHash) return passwordHash, err } diff --git a/api/keystore/service.go b/api/keystore/service.go index d4c845743bbb..aa56433ee6e7 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore diff --git a/api/keystore/service_test.go b/api/keystore/service_test.go index 842ab7d76cc7..c011c92e78e1 100644 --- a/api/keystore/service_test.go +++ b/api/keystore/service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore diff --git a/api/metrics/gatherer_test.go b/api/metrics/gatherer_test.go index 2059c1ab584f..334c361ebcc0 100644 --- a/api/metrics/gatherer_test.go +++ b/api/metrics/gatherer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics diff --git a/api/metrics/multi_gatherer.go b/api/metrics/multi_gatherer.go index ce9af54936be..45d4439622b4 100644 --- a/api/metrics/multi_gatherer.go +++ b/api/metrics/multi_gatherer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -13,6 +13,9 @@ import ( dto "github.com/prometheus/client_model/go" "golang.org/x/exp/slices" + + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/metric" ) var ( @@ -48,23 +51,19 @@ func (g *multiGatherer) Gather() ([]*dto.MetricFamily, error) { var results []*dto.MetricFamily for namespace, gatherer := range g.gatherers { - metrics, err := gatherer.Gather() + gatheredMetrics, err := gatherer.Gather() if err != nil { return nil, err } - for _, metric := range metrics { + for _, gatheredMetric := range gatheredMetrics { var name string - if metric.Name != nil { - if len(namespace) > 0 { - name = fmt.Sprintf("%s_%s", namespace, *metric.Name) - } else { - name = *metric.Name - } + if gatheredMetric.Name != nil { + name = metric.AppendNamespace(namespace, *gatheredMetric.Name) } else { name = namespace } - metric.Name = &name - results = append(results, metric) + gatheredMetric.Name = &name + results = append(results, gatheredMetric) } } // Because we overwrite every metric's name, we are guaranteed that there @@ -91,7 +90,7 @@ func (g *multiGatherer) Register(namespace string, gatherer prometheus.Gatherer) } func sortMetrics(m []*dto.MetricFamily) { - slices.SortFunc(m, func(i, j *dto.MetricFamily) bool { - return *i.Name < *j.Name + slices.SortFunc(m, func(i, j *dto.MetricFamily) int { + return utils.Compare(*i.Name, *j.Name) }) } diff --git a/api/metrics/multi_gatherer_test.go b/api/metrics/multi_gatherer_test.go index a2e59a90d51e..033e3e88b1e6 100644 --- a/api/metrics/multi_gatherer_test.go +++ b/api/metrics/multi_gatherer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics diff --git a/api/metrics/optional_gatherer.go b/api/metrics/optional_gatherer.go index f31603281cee..686856efcc86 100644 --- a/api/metrics/optional_gatherer.go +++ b/api/metrics/optional_gatherer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics diff --git a/api/metrics/optional_gatherer_test.go b/api/metrics/optional_gatherer_test.go index 887029a3572b..201750701313 100644 --- a/api/metrics/optional_gatherer_test.go +++ b/api/metrics/optional_gatherer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics diff --git a/api/server/allowed_hosts.go b/api/server/allowed_hosts.go index 6745f0e17565..7d2812b2782a 100644 --- a/api/server/allowed_hosts.go +++ b/api/server/allowed_hosts.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server diff --git a/api/server/allowed_hosts_test.go b/api/server/allowed_hosts_test.go index ae7a824834a9..47b1a53df0ba 100644 --- a/api/server/allowed_hosts_test.go +++ b/api/server/allowed_hosts_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server diff --git a/api/server/metrics.go b/api/server/metrics.go index 9859494f3ae4..e3b2d76c83ea 100644 --- a/api/server/metrics.go +++ b/api/server/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server diff --git a/api/server/mock_server.go b/api/server/mock_server.go index 1d29c7054db3..769df9baa26f 100644 --- a/api/server/mock_server.go +++ b/api/server/mock_server.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/api/server (interfaces: Server) +// +// Generated by this command: +// +// mockgen -package=server -destination=api/server/mock_server.go github.com/ava-labs/avalanchego/api/server Server +// // Package server is a generated GoMock package. package server @@ -42,7 +44,7 @@ func (m *MockServer) EXPECT() *MockServerMockRecorder { // AddAliases mocks base method. func (m *MockServer) AddAliases(arg0 string, arg1 ...string) error { m.ctrl.T.Helper() - varargs := []interface{}{arg0} + varargs := []any{arg0} for _, a := range arg1 { varargs = append(varargs, a) } @@ -52,16 +54,16 @@ func (m *MockServer) AddAliases(arg0 string, arg1 ...string) error { } // AddAliases indicates an expected call of AddAliases. -func (mr *MockServerMockRecorder) AddAliases(arg0 interface{}, arg1 ...interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) AddAliases(arg0 any, arg1 ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) + varargs := append([]any{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAliases", reflect.TypeOf((*MockServer)(nil).AddAliases), varargs...) } // AddAliasesWithReadLock mocks base method. func (m *MockServer) AddAliasesWithReadLock(arg0 string, arg1 ...string) error { m.ctrl.T.Helper() - varargs := []interface{}{arg0} + varargs := []any{arg0} for _, a := range arg1 { varargs = append(varargs, a) } @@ -71,9 +73,9 @@ func (m *MockServer) AddAliasesWithReadLock(arg0 string, arg1 ...string) error { } // AddAliasesWithReadLock indicates an expected call of AddAliasesWithReadLock. -func (mr *MockServerMockRecorder) AddAliasesWithReadLock(arg0 interface{}, arg1 ...interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) AddAliasesWithReadLock(arg0 any, arg1 ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) + varargs := append([]any{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAliasesWithReadLock", reflect.TypeOf((*MockServer)(nil).AddAliasesWithReadLock), varargs...) } @@ -86,7 +88,7 @@ func (m *MockServer) AddRoute(arg0 http.Handler, arg1, arg2 string) error { } // AddRoute indicates an expected call of AddRoute. -func (mr *MockServerMockRecorder) AddRoute(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) AddRoute(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRoute", reflect.TypeOf((*MockServer)(nil).AddRoute), arg0, arg1, arg2) } @@ -100,7 +102,7 @@ func (m *MockServer) AddRouteWithReadLock(arg0 http.Handler, arg1, arg2 string) } // AddRouteWithReadLock indicates an expected call of AddRouteWithReadLock. -func (mr *MockServerMockRecorder) AddRouteWithReadLock(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) AddRouteWithReadLock(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRouteWithReadLock", reflect.TypeOf((*MockServer)(nil).AddRouteWithReadLock), arg0, arg1, arg2) } @@ -126,7 +128,7 @@ func (m *MockServer) RegisterChain(arg0 string, arg1 *snow.ConsensusContext, arg } // RegisterChain indicates an expected call of RegisterChain. -func (mr *MockServerMockRecorder) RegisterChain(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) RegisterChain(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterChain", reflect.TypeOf((*MockServer)(nil).RegisterChain), arg0, arg1, arg2) } diff --git a/api/server/router.go b/api/server/router.go index b37c6282c90d..6adadf608be4 100644 --- a/api/server/router.go +++ b/api/server/router.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server diff --git a/api/server/router_test.go b/api/server/router_test.go index cae75d2c97bd..f6676a3727a3 100644 --- a/api/server/router_test.go +++ b/api/server/router_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server diff --git a/api/server/server.go b/api/server/server.go index 0f676e0cef7f..a2364b6a17cd 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server diff --git a/api/server/server_test.go b/api/server/server_test.go index 9f6f3732e591..584ad24a7862 100644 --- a/api/server/server_test.go +++ b/api/server/server_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" ) func TestRejectMiddleware(t *testing.T) { @@ -58,7 +59,8 @@ func TestRejectMiddleware(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - ctx := &snow.ConsensusContext{} + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) ctx.State.Set(snow.EngineState{ State: tt.state, }) diff --git a/api/server/wrapper.go b/api/server/wrapper.go index e467dc968065..b6cca85c731e 100644 --- a/api/server/wrapper.go +++ b/api/server/wrapper.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server diff --git a/api/traced_handler.go b/api/traced_handler.go index 149be8208edc..9543c2ebbd15 100644 --- a/api/traced_handler.go +++ b/api/traced_handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package api diff --git a/app/app.go b/app/app.go index fe5f58a8acf4..c443d76b3709 100644 --- a/app/app.go +++ b/app/app.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package app @@ -24,10 +24,7 @@ import ( "golang.org/x/sync/errgroup" - "github.com/ava-labs/avalanchego/nat" "github.com/ava-labs/avalanchego/node" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/ulimit" @@ -44,12 +41,7 @@ const ( \____||__|__||___|___||____||__|__| \___/ ` ) -var ( - stakingPortName = fmt.Sprintf("%s-staking", constants.AppName) - httpPortName = fmt.Sprintf("%s-http", constants.AppName) - - _ App = (*app)(nil) -) +var _ App = (*app)(nil) type App interface { // Start kicks off the application and returns immediately. @@ -66,11 +58,44 @@ type App interface { ExitCode() (int, error) } -func New(config node.Config) App { - return &app{ - config: config, - node: &node.Node{}, +func New(config node.Config) (App, error) { + // Set the data directory permissions to be read write. + if err := perms.ChmodR(config.DatabaseConfig.Path, true, perms.ReadWriteExecute); err != nil { + return nil, fmt.Errorf("failed to restrict the permissions of the database directory with: %w", err) + } + if err := perms.ChmodR(config.LoggingConfig.Directory, true, perms.ReadWriteExecute); err != nil { + return nil, fmt.Errorf("failed to restrict the permissions of the log directory with: %w", err) + } + + logFactory := logging.NewFactory(config.LoggingConfig) + log, err := logFactory.Make("main") + if err != nil { + logFactory.Close() + return nil, fmt.Errorf("failed to initialize log: %w", err) + } + + // update fd limit + fdLimit := config.FdLimit + if err := ulimit.Set(fdLimit, log); err != nil { + log.Fatal("failed to set fd-limit", + zap.Error(err), + ) + logFactory.Close() + return nil, err } + + n, err := node.New(&config, logFactory, log) + if err != nil { + log.Stop() + logFactory.Close() + return nil, fmt.Errorf("failed to initialize node: %w", err) + } + + return &app{ + node: n, + log: log, + logFactory: logFactory, + }, nil } func Run(app App) int { @@ -111,133 +136,16 @@ func Run(app App) int { // app is a wrapper around a node that runs in this process type app struct { - config node.Config - node *node.Node - exitWG sync.WaitGroup + node *node.Node + log logging.Logger + logFactory logging.Factory + exitWG sync.WaitGroup } // Start the business logic of the node (as opposed to config reading, etc). // Does not block until the node is done. Errors returned from this method // are not logged. func (a *app) Start() error { - // Set the data directory permissions to be read write. - if err := perms.ChmodR(a.config.DatabaseConfig.Path, true, perms.ReadWriteExecute); err != nil { - return fmt.Errorf("failed to restrict the permissions of the database directory with: %w", err) - } - if err := perms.ChmodR(a.config.LoggingConfig.Directory, true, perms.ReadWriteExecute); err != nil { - return fmt.Errorf("failed to restrict the permissions of the log directory with: %w", err) - } - - // we want to create the logger after the plugin has started the app - logFactory := logging.NewFactory(a.config.LoggingConfig) - log, err := logFactory.Make("main") - if err != nil { - logFactory.Close() - return err - } - - // update fd limit - fdLimit := a.config.FdLimit - if err := ulimit.Set(fdLimit, log); err != nil { - log.Fatal("failed to set fd-limit", - zap.Error(err), - ) - logFactory.Close() - return err - } - - // Track if sybil control is enforced - if !a.config.SybilProtectionEnabled { - log.Warn("sybil control is not enforced") - } - - // TODO move this to config - // SupportsNAT() for NoRouter is false. - // Which means we tried to perform a NAT activity but we were not successful. - if a.config.AttemptedNATTraversal && !a.config.Nat.SupportsNAT() { - log.Warn("UPnP and NAT-PMP router attach failed, you may not be listening publicly. " + - "Please confirm the settings in your router") - } - - if ip := a.config.IPPort.IPPort().IP; ip.IsLoopback() || ip.IsPrivate() { - log.Warn("P2P IP is private, you will not be publicly discoverable", - zap.Stringer("ip", ip), - ) - } - - // An empty host is treated as a wildcard to match all addresses, so it is - // considered public. - hostIsPublic := a.config.HTTPHost == "" - if !hostIsPublic { - ip, err := ips.Lookup(a.config.HTTPHost) - if err != nil { - log.Fatal("failed to lookup HTTP host", - zap.String("host", a.config.HTTPHost), - zap.Error(err), - ) - logFactory.Close() - return err - } - hostIsPublic = !ip.IsLoopback() && !ip.IsPrivate() - - log.Debug("finished HTTP host lookup", - zap.String("host", a.config.HTTPHost), - zap.Stringer("ip", ip), - zap.Bool("isPublic", hostIsPublic), - ) - } - - mapper := nat.NewPortMapper(log, a.config.Nat) - - // Open staking port we want for NAT traversal to have the external port - // (config.IP.Port) to connect to our internal listening port - // (config.InternalStakingPort) which should be the same in most cases. - if port := a.config.IPPort.IPPort().Port; port != 0 { - mapper.Map( - port, - port, - stakingPortName, - a.config.IPPort, - a.config.IPResolutionFreq, - ) - } - - // Don't open the HTTP port if the HTTP server is private - if hostIsPublic { - log.Warn("HTTP server is binding to a potentially public host. "+ - "You may be vulnerable to a DoS attack if your HTTP port is publicly accessible", - zap.String("host", a.config.HTTPHost), - ) - - // For NAT traversal we want to route from the external port - // (config.ExternalHTTPPort) to our internal port (config.HTTPPort). - if a.config.HTTPPort != 0 { - mapper.Map( - a.config.HTTPPort, - a.config.HTTPPort, - httpPortName, - nil, - a.config.IPResolutionFreq, - ) - } - } - - // Regularly update our public IP. - // Note that if the node config said to not dynamically resolve and - // update our public IP, [p.config.IPUdater] is a no-op implementation. - go a.config.IPUpdater.Dispatch(log) - - if err := a.node.Initialize(&a.config, log, logFactory); err != nil { - log.Fatal("error initializing node", - zap.Error(err), - ) - mapper.UnmapAllPorts() - a.config.IPUpdater.Stop() - log.Stop() - logFactory.Close() - return err - } - // [p.ExitCode] will block until [p.exitWG.Done] is called a.exitWG.Add(1) go func() { @@ -245,22 +153,19 @@ func (a *app) Start() error { if r := recover(); r != nil { fmt.Println("caught panic", r) } - log.Stop() - logFactory.Close() + a.log.Stop() + a.logFactory.Close() a.exitWG.Done() }() defer func() { - mapper.UnmapAllPorts() - a.config.IPUpdater.Stop() - // If [p.node.Dispatch()] panics, then we should log the panic and // then re-raise the panic. This is why the above defer is broken // into two parts. - log.StopOnPanic() + a.log.StopOnPanic() }() err := a.node.Dispatch() - log.Debug("dispatch returned", + a.log.Debug("dispatch returned", zap.Error(err), ) }() diff --git a/cache/cache.go b/cache/cache.go index 3d7206e79050..10ecad2c502f 100644 --- a/cache/cache.go +++ b/cache/cache.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache diff --git a/cache/empty_cache.go b/cache/empty_cache.go index 767cf5b74266..3a70ea91fe5d 100644 --- a/cache/empty_cache.go +++ b/cache/empty_cache.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache diff --git a/cache/lru_cache.go b/cache/lru_cache.go index 84d342db9f01..2a8a7ebe6d80 100644 --- a/cache/lru_cache.go +++ b/cache/lru_cache.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache diff --git a/cache/lru_cache_benchmark_test.go b/cache/lru_cache_benchmark_test.go index d8e4f4185933..3ddf03cb06f7 100644 --- a/cache/lru_cache_benchmark_test.go +++ b/cache/lru_cache_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache diff --git a/cache/lru_cache_test.go b/cache/lru_cache_test.go index 9ae277299d94..e8f0b2883c1c 100644 --- a/cache/lru_cache_test.go +++ b/cache/lru_cache_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache diff --git a/cache/lru_sized_cache.go b/cache/lru_sized_cache.go index 6d093e033195..5dc9b5fdec01 100644 --- a/cache/lru_sized_cache.go +++ b/cache/lru_sized_cache.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache diff --git a/cache/lru_sized_cache_test.go b/cache/lru_sized_cache_test.go index 65dbcf8c8ab7..ad1c8b403362 100644 --- a/cache/lru_sized_cache_test.go +++ b/cache/lru_sized_cache_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache diff --git a/cache/metercacher/cache.go b/cache/metercacher/cache.go index 6b6fcd909c81..c2ff666f25e7 100644 --- a/cache/metercacher/cache.go +++ b/cache/metercacher/cache.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metercacher diff --git a/cache/metercacher/cache_test.go b/cache/metercacher/cache_test.go index 7ef1676c0874..3f575acdc1d4 100644 --- a/cache/metercacher/cache_test.go +++ b/cache/metercacher/cache_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metercacher diff --git a/cache/metercacher/metrics.go b/cache/metercacher/metrics.go index a65e31805934..f08082e1be71 100644 --- a/cache/metercacher/metrics.go +++ b/cache/metercacher/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metercacher diff --git a/cache/mock_cacher.go b/cache/mock_cacher.go index bdbcdab15fb0..7b8f0bd24f9a 100644 --- a/cache/mock_cacher.go +++ b/cache/mock_cacher.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/cache (interfaces: Cacher) +// Source: cache/cache.go +// +// Generated by this command: +// +// mockgen -source=cache/cache.go -destination=cache/mock_cacher.go -package=cache -exclude_interfaces= +// // Package cache is a generated GoMock package. package cache @@ -37,15 +39,15 @@ func (m *MockCacher[K, V]) EXPECT() *MockCacherMockRecorder[K, V] { } // Evict mocks base method. -func (m *MockCacher[K, V]) Evict(arg0 K) { +func (m *MockCacher[K, V]) Evict(key K) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Evict", arg0) + m.ctrl.Call(m, "Evict", key) } // Evict indicates an expected call of Evict. -func (mr *MockCacherMockRecorder[K, V]) Evict(arg0 interface{}) *gomock.Call { +func (mr *MockCacherMockRecorder[K, V]) Evict(key any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Evict", reflect.TypeOf((*MockCacher[K, V])(nil).Evict), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Evict", reflect.TypeOf((*MockCacher[K, V])(nil).Evict), key) } // Flush mocks base method. @@ -60,6 +62,21 @@ func (mr *MockCacherMockRecorder[K, V]) Flush() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Flush", reflect.TypeOf((*MockCacher[K, V])(nil).Flush)) } +// Get mocks base method. +func (m *MockCacher[K, V]) Get(key K) (V, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", key) + ret0, _ := ret[0].(V) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockCacherMockRecorder[K, V]) Get(key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockCacher[K, V])(nil).Get), key) +} + // Len mocks base method. func (m *MockCacher[K, V]) Len() int { m.ctrl.T.Helper() @@ -74,43 +91,126 @@ func (mr *MockCacherMockRecorder[K, V]) Len() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Len", reflect.TypeOf((*MockCacher[K, V])(nil).Len)) } -// Get mocks base method. -func (m *MockCacher[K, V]) Get(arg0 K) (V, bool) { +// PortionFilled mocks base method. +func (m *MockCacher[K, V]) PortionFilled() float64 { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", arg0) - ret0, _ := ret[0].(V) - ret1, _ := ret[1].(bool) - return ret0, ret1 + ret := m.ctrl.Call(m, "PortionFilled") + ret0, _ := ret[0].(float64) + return ret0 } -// Get indicates an expected call of Get. -func (mr *MockCacherMockRecorder[K, V]) Get(arg0 interface{}) *gomock.Call { +// PortionFilled indicates an expected call of PortionFilled. +func (mr *MockCacherMockRecorder[K, V]) PortionFilled() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockCacher[K, V])(nil).Get), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PortionFilled", reflect.TypeOf((*MockCacher[K, V])(nil).PortionFilled)) } // Put mocks base method. -func (m *MockCacher[K, V]) Put(arg0 K, arg1 V) { +func (m *MockCacher[K, V]) Put(key K, value V) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Put", arg0, arg1) + m.ctrl.Call(m, "Put", key, value) } // Put indicates an expected call of Put. -func (mr *MockCacherMockRecorder[K, V]) Put(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockCacherMockRecorder[K, V]) Put(key, value any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockCacher[K, V])(nil).Put), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockCacher[K, V])(nil).Put), key, value) } -// PortionFilled mocks base method. -func (m *MockCacher[K, V]) PortionFilled() float64 { +// MockEvictable is a mock of Evictable interface. +type MockEvictable[K comparable] struct { + ctrl *gomock.Controller + recorder *MockEvictableMockRecorder[K] +} + +// MockEvictableMockRecorder is the mock recorder for MockEvictable. +type MockEvictableMockRecorder[K comparable] struct { + mock *MockEvictable[K] +} + +// NewMockEvictable creates a new mock instance. +func NewMockEvictable[K comparable](ctrl *gomock.Controller) *MockEvictable[K] { + mock := &MockEvictable[K]{ctrl: ctrl} + mock.recorder = &MockEvictableMockRecorder[K]{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEvictable[K]) EXPECT() *MockEvictableMockRecorder[K] { + return m.recorder +} + +// Evict mocks base method. +func (m *MockEvictable[K]) Evict() { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PortionFilled") - ret0, _ := ret[0].(float64) + m.ctrl.Call(m, "Evict") +} + +// Evict indicates an expected call of Evict. +func (mr *MockEvictableMockRecorder[K]) Evict() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Evict", reflect.TypeOf((*MockEvictable[K])(nil).Evict)) +} + +// Key mocks base method. +func (m *MockEvictable[K]) Key() K { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Key") + ret0, _ := ret[0].(K) return ret0 } -// PortionFilled indicates an expected call of PortionFilled. -func (mr *MockCacherMockRecorder[K, V]) PortionFilled() *gomock.Call { +// Key indicates an expected call of Key. +func (mr *MockEvictableMockRecorder[K]) Key() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PortionFilled", reflect.TypeOf((*MockCacher[K, V])(nil).PortionFilled)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Key", reflect.TypeOf((*MockEvictable[K])(nil).Key)) +} + +// MockDeduplicator is a mock of Deduplicator interface. +type MockDeduplicator[K comparable, V Evictable[K]] struct { + ctrl *gomock.Controller + recorder *MockDeduplicatorMockRecorder[K, V] +} + +// MockDeduplicatorMockRecorder is the mock recorder for MockDeduplicator. +type MockDeduplicatorMockRecorder[K comparable, V Evictable[K]] struct { + mock *MockDeduplicator[K, V] +} + +// NewMockDeduplicator creates a new mock instance. +func NewMockDeduplicator[K comparable, V Evictable[K]](ctrl *gomock.Controller) *MockDeduplicator[K, V] { + mock := &MockDeduplicator[K, V]{ctrl: ctrl} + mock.recorder = &MockDeduplicatorMockRecorder[K, V]{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDeduplicator[K, V]) EXPECT() *MockDeduplicatorMockRecorder[K, V] { + return m.recorder +} + +// Deduplicate mocks base method. +func (m *MockDeduplicator[K, V]) Deduplicate(arg0 V) V { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Deduplicate", arg0) + ret0, _ := ret[0].(V) + return ret0 +} + +// Deduplicate indicates an expected call of Deduplicate. +func (mr *MockDeduplicatorMockRecorder[K, V]) Deduplicate(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Deduplicate", reflect.TypeOf((*MockDeduplicator[K, V])(nil).Deduplicate), arg0) +} + +// Flush mocks base method. +func (m *MockDeduplicator[K, V]) Flush() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Flush") +} + +// Flush indicates an expected call of Flush. +func (mr *MockDeduplicatorMockRecorder[K, V]) Flush() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Flush", reflect.TypeOf((*MockDeduplicator[K, V])(nil).Flush)) } diff --git a/cache/test_cacher.go b/cache/test_cacher.go index 1b029bcb4b21..2e85502e4a55 100644 --- a/cache/test_cacher.go +++ b/cache/test_cacher.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache diff --git a/cache/unique_cache.go b/cache/unique_cache.go index 24052d79355e..b958b1f3a870 100644 --- a/cache/unique_cache.go +++ b/cache/unique_cache.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache diff --git a/cache/unique_cache_test.go b/cache/unique_cache_test.go index 3f0d40f8dc0d..199bdc87c081 100644 --- a/cache/unique_cache_test.go +++ b/cache/unique_cache_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache diff --git a/chains/atomic/codec.go b/chains/atomic/codec.go index bc2e93c27213..290713b3c258 100644 --- a/chains/atomic/codec.go +++ b/chains/atomic/codec.go @@ -1,22 +1,25 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic import ( + "math" + "time" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" ) -const codecVersion = 0 +const CodecVersion = 0 -// codecManager is used to marshal and unmarshal dbElements and chain IDs. -var codecManager codec.Manager +// Codec is used to marshal and unmarshal dbElements and chain IDs. +var Codec codec.Manager func init() { - linearCodec := linearcodec.NewDefault() - codecManager = codec.NewDefaultManager() - if err := codecManager.RegisterCodec(codecVersion, linearCodec); err != nil { + lc := linearcodec.NewDefault(time.Time{}) + Codec = codec.NewManager(math.MaxInt) + if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { panic(err) } } diff --git a/chains/atomic/gsharedmemory/filtered_batch.go b/chains/atomic/gsharedmemory/filtered_batch.go index df63e8df2d16..a6ba81251f57 100644 --- a/chains/atomic/gsharedmemory/filtered_batch.go +++ b/chains/atomic/gsharedmemory/filtered_batch.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gsharedmemory diff --git a/chains/atomic/gsharedmemory/shared_memory_client.go b/chains/atomic/gsharedmemory/shared_memory_client.go index 649503a0313c..096a8117e7a1 100644 --- a/chains/atomic/gsharedmemory/shared_memory_client.go +++ b/chains/atomic/gsharedmemory/shared_memory_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gsharedmemory diff --git a/chains/atomic/gsharedmemory/shared_memory_server.go b/chains/atomic/gsharedmemory/shared_memory_server.go index 3e2d0d38940e..0aaa71c01f8e 100644 --- a/chains/atomic/gsharedmemory/shared_memory_server.go +++ b/chains/atomic/gsharedmemory/shared_memory_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gsharedmemory diff --git a/chains/atomic/gsharedmemory/shared_memory_test.go b/chains/atomic/gsharedmemory/shared_memory_test.go index 0ce546c94f77..02dfb7324a78 100644 --- a/chains/atomic/gsharedmemory/shared_memory_test.go +++ b/chains/atomic/gsharedmemory/shared_memory_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gsharedmemory diff --git a/chains/atomic/memory.go b/chains/atomic/memory.go index a8aa703fe217..76f5b6451d97 100644 --- a/chains/atomic/memory.go +++ b/chains/atomic/memory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic @@ -107,7 +107,7 @@ func sharedID(id1, id2 ids.ID) ids.ID { id1, id2 = id2, id1 } - combinedBytes, err := codecManager.Marshal(codecVersion, [2]ids.ID{id1, id2}) + combinedBytes, err := Codec.Marshal(CodecVersion, [2]ids.ID{id1, id2}) if err != nil { panic(err) } diff --git a/chains/atomic/memory_test.go b/chains/atomic/memory_test.go index 5acdb5233af4..7ca02e6d7275 100644 --- a/chains/atomic/memory_test.go +++ b/chains/atomic/memory_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic diff --git a/chains/atomic/mock_shared_memory.go b/chains/atomic/mock_shared_memory.go index d22bd0f995ff..0e63179314da 100644 --- a/chains/atomic/mock_shared_memory.go +++ b/chains/atomic/mock_shared_memory.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/chains/atomic (interfaces: SharedMemory) +// +// Generated by this command: +// +// mockgen -package=atomic -destination=chains/atomic/mock_shared_memory.go github.com/ava-labs/avalanchego/chains/atomic SharedMemory +// // Package atomic is a generated GoMock package. package atomic @@ -41,7 +43,7 @@ func (m *MockSharedMemory) EXPECT() *MockSharedMemoryMockRecorder { // Apply mocks base method. func (m *MockSharedMemory) Apply(arg0 map[ids.ID]*Requests, arg1 ...database.Batch) error { m.ctrl.T.Helper() - varargs := []interface{}{arg0} + varargs := []any{arg0} for _, a := range arg1 { varargs = append(varargs, a) } @@ -51,9 +53,9 @@ func (m *MockSharedMemory) Apply(arg0 map[ids.ID]*Requests, arg1 ...database.Bat } // Apply indicates an expected call of Apply. -func (mr *MockSharedMemoryMockRecorder) Apply(arg0 interface{}, arg1 ...interface{}) *gomock.Call { +func (mr *MockSharedMemoryMockRecorder) Apply(arg0 any, arg1 ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) + varargs := append([]any{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockSharedMemory)(nil).Apply), varargs...) } @@ -67,7 +69,7 @@ func (m *MockSharedMemory) Get(arg0 ids.ID, arg1 [][]byte) ([][]byte, error) { } // Get indicates an expected call of Get. -func (mr *MockSharedMemoryMockRecorder) Get(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockSharedMemoryMockRecorder) Get(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSharedMemory)(nil).Get), arg0, arg1) } @@ -84,7 +86,7 @@ func (m *MockSharedMemory) Indexed(arg0 ids.ID, arg1 [][]byte, arg2, arg3 []byte } // Indexed indicates an expected call of Indexed. -func (mr *MockSharedMemoryMockRecorder) Indexed(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockSharedMemoryMockRecorder) Indexed(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Indexed", reflect.TypeOf((*MockSharedMemory)(nil).Indexed), arg0, arg1, arg2, arg3, arg4) } diff --git a/chains/atomic/prefixes.go b/chains/atomic/prefixes.go index 08927384317e..adc21c36e2b0 100644 --- a/chains/atomic/prefixes.go +++ b/chains/atomic/prefixes.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic diff --git a/chains/atomic/shared_memory.go b/chains/atomic/shared_memory.go index 7b2f8a562c82..d90c5685fa35 100644 --- a/chains/atomic/shared_memory.go +++ b/chains/atomic/shared_memory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic diff --git a/chains/atomic/shared_memory_test.go b/chains/atomic/shared_memory_test.go index bb3266d80602..1597d662131a 100644 --- a/chains/atomic/shared_memory_test.go +++ b/chains/atomic/shared_memory_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic diff --git a/chains/atomic/state.go b/chains/atomic/state.go index cd7a3f2a0faa..a9e9bbd05cd8 100644 --- a/chains/atomic/state.go +++ b/chains/atomic/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic @@ -6,17 +6,22 @@ package atomic import ( "bytes" "errors" + "fmt" + + "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/linkeddb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/set" ) -var errDuplicatedOperation = errors.New("duplicated operation on provided value") +var ( + errDuplicatePut = errors.New("duplicate put") + errDuplicateRemove = errors.New("duplicate remove") +) type dbElement struct { // Present indicates the value was removed before existing. @@ -86,7 +91,7 @@ func (s *state) SetValue(e *Element) error { } // This key was written twice, which is invalid - return errDuplicatedOperation + return fmt.Errorf("%w: Key=0x%x Value=0x%x", errDuplicatePut, e.Key, e.Value) } if err != database.ErrNotFound { // An unexpected error occurred, so we should propagate that error @@ -107,7 +112,7 @@ func (s *state) SetValue(e *Element) error { Traits: e.Traits, } - valueBytes, err := codecManager.Marshal(codecVersion, &dbElem) + valueBytes, err := Codec.Marshal(CodecVersion, &dbElem) if err != nil { return err } @@ -151,7 +156,7 @@ func (s *state) RemoveValue(key []byte) error { // The value doesn't exist, so we should optimistically delete it dbElem := dbElement{Present: false} - valueBytes, err := codecManager.Marshal(codecVersion, &dbElem) + valueBytes, err := Codec.Marshal(CodecVersion, &dbElem) if err != nil { return err } @@ -160,7 +165,7 @@ func (s *state) RemoveValue(key []byte) error { // Don't allow the removal of something that was already removed. if !value.Present { - return errDuplicatedOperation + return fmt.Errorf("%w: Key=0x%x", errDuplicateRemove, key) } // Remove [key] from the indexDB for each trait that has indexed this key. @@ -184,7 +189,7 @@ func (s *state) loadValue(key []byte) (*dbElement, error) { // The key was in the database value := &dbElement{} - _, err = codecManager.Unmarshal(valueBytes, value) + _, err = Codec.Unmarshal(valueBytes, value) return value, err } @@ -203,7 +208,7 @@ func (s *state) getKeys(traits [][]byte, startTrait, startKey []byte, limit int) lastKey := startKey // Iterate over the traits in order appending all of the keys that possess // the given [traits]. - utils.SortBytes(traits) + slices.SortFunc(traits, bytes.Compare) for _, trait := range traits { switch bytes.Compare(trait, startTrait) { case -1: diff --git a/chains/atomic/test_shared_memory.go b/chains/atomic/test_shared_memory.go index d89940c31c2f..82b1cbeff3a5 100644 --- a/chains/atomic/test_shared_memory.go +++ b/chains/atomic/test_shared_memory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic diff --git a/chains/atomic/writer.go b/chains/atomic/writer.go index d117218fe87d..6bcdd86b00b4 100644 --- a/chains/atomic/writer.go +++ b/chains/atomic/writer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic diff --git a/chains/linearizable_vm.go b/chains/linearizable_vm.go index f4fc93f7a696..97fe9eb4d1f4 100644 --- a/chains/linearizable_vm.go +++ b/chains/linearizable_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chains diff --git a/chains/manager.go b/chains/manager.go index 85472e424222..8edab5351159 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chains @@ -50,13 +50,18 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/fx" "github.com/ava-labs/avalanchego/vms/metervm" + "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/proposervm" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/vms/tracedvm" timetracker "github.com/ava-labs/avalanchego/snow/networking/tracker" @@ -78,16 +83,16 @@ const ( var ( // Commonly shared VM DB prefix - vmDBPrefix = []byte("vm") + VMDBPrefix = []byte("vm") // Bootstrapping prefixes for LinearizableVMs - vertexDBPrefix = []byte("vertex") - vertexBootstrappingDBPrefix = []byte("vertex_bs") - txBootstrappingDBPrefix = []byte("tx_bs") - blockBootstrappingDBPrefix = []byte("block_bs") + VertexDBPrefix = []byte("vertex") + VertexBootstrappingDBPrefix = []byte("vertex_bs") + TxBootstrappingDBPrefix = []byte("tx_bs") + BlockBootstrappingDBPrefix = []byte("block_bs") // Bootstrapping prefixes for ChainVMs - bootstrappingDB = []byte("bs") + ChainBootstrappingDBPrefix = []byte("bs") errUnknownVMType = errors.New("the vm should have type avalanche.DAGVM or snowman.ChainVM") errCreatePlatformVM = errors.New("attempted to create a chain running the PlatformVM") @@ -95,6 +100,12 @@ var ( errNoPrimaryNetworkConfig = errors.New("no subnet config for primary network found") errPartialSyncAsAValidator = errors.New("partial sync should not be configured for a validator") + fxs = map[ids.ID]fx.Factory{ + secp256k1fx.ID: &secp256k1fx.Factory{}, + nftfx.ID: &nftfx.Factory{}, + propertyfx.ID: &propertyfx.Factory{}, + } + _ Manager = (*manager)(nil) ) @@ -107,9 +118,6 @@ var ( type Manager interface { ids.Aliaser - // Return the router this Manager is using to route consensus messages to chains - Router() router.Router - // Queues a chain to be created in the future after chain creator is unblocked. // This is only called from the P-chain thread to create other chains // Queued chains are created only after P-chain is bootstrapped. @@ -174,41 +182,39 @@ type ManagerConfig struct { StakingBLSKey *bls.SecretKey TracingEnabled bool // Must not be used unless [TracingEnabled] is true as this may be nil. - Tracer trace.Tracer - Log logging.Logger - LogFactory logging.Factory - VMManager vms.Manager // Manage mappings from vm ID --> vm - BlockAcceptorGroup snow.AcceptorGroup - TxAcceptorGroup snow.AcceptorGroup - VertexAcceptorGroup snow.AcceptorGroup - DB database.Database - MsgCreator message.OutboundMsgBuilder // message creator, shared with network - Router router.Router // Routes incoming messages to the appropriate chain - Net network.Network // Sends consensus messages to other validators - Validators validators.Manager // Validators validating on this chain - NodeID ids.NodeID // The ID of this node - NetworkID uint32 // ID of the network this node is connected to - PartialSyncPrimaryNetwork bool - Server server.Server // Handles HTTP API calls - Keystore keystore.Keystore - AtomicMemory *atomic.Memory - AVAXAssetID ids.ID - XChainID ids.ID // ID of the X-Chain, - CChainID ids.ID // ID of the C-Chain, - CriticalChains set.Set[ids.ID] // Chains that can't exit gracefully - TimeoutManager timeout.Manager // Manages request timeouts when sending messages to other validators - Health health.Registerer - RetryBootstrap bool // Should Bootstrap be retried - RetryBootstrapWarnFrequency int // Max number of times to retry bootstrap before warning the node operator - SubnetConfigs map[ids.ID]subnets.Config // ID -> SubnetConfig - ChainConfigs map[string]ChainConfig // alias -> ChainConfig + Tracer trace.Tracer + Log logging.Logger + LogFactory logging.Factory + VMManager vms.Manager // Manage mappings from vm ID --> vm + BlockAcceptorGroup snow.AcceptorGroup + TxAcceptorGroup snow.AcceptorGroup + VertexAcceptorGroup snow.AcceptorGroup + DB database.Database + MsgCreator message.OutboundMsgBuilder // message creator, shared with network + Router router.Router // Routes incoming messages to the appropriate chain + Net network.Network // Sends consensus messages to other validators + Validators validators.Manager // Validators validating on this chain + NodeID ids.NodeID // The ID of this node + NetworkID uint32 // ID of the network this node is connected to + PartialSyncPrimaryNetwork bool + Server server.Server // Handles HTTP API calls + Keystore keystore.Keystore + AtomicMemory *atomic.Memory + AVAXAssetID ids.ID + XChainID ids.ID // ID of the X-Chain, + CChainID ids.ID // ID of the C-Chain, + CriticalChains set.Set[ids.ID] // Chains that can't exit gracefully + TimeoutManager timeout.Manager // Manages request timeouts when sending messages to other validators + Health health.Registerer + SubnetConfigs map[ids.ID]subnets.Config // ID -> SubnetConfig + ChainConfigs map[string]ChainConfig // alias -> ChainConfig // ShutdownNodeFunc allows the chain manager to issue a request to shutdown the node ShutdownNodeFunc func(exitCode int) MeterVMEnabled bool // Should each VM be wrapped with a MeterVM Metrics metrics.MultiGatherer - AcceptedFrontierGossipFrequency time.Duration - ConsensusAppConcurrency int + FrontierPollFrequency time.Duration + ConsensusAppConcurrency int // Max Time to spend fetching a container and its // ancestors when responding to a GetAncestors @@ -266,23 +272,22 @@ type manager struct { } // New returns a new Manager -func New(config *ManagerConfig) Manager { +func New(config *ManagerConfig) (Manager, error) { + cert, err := staking.CertificateFromX509(config.StakingTLSCert.Leaf) + if err != nil { + return nil, err + } return &manager{ Aliaser: ids.NewAliaser(), ManagerConfig: *config, stakingSigner: config.StakingTLSCert.PrivateKey.(crypto.Signer), - stakingCert: staking.CertificateFromX509(config.StakingTLSCert.Leaf), + stakingCert: cert, subnets: make(map[ids.ID]subnets.Subnet), chains: make(map[ids.ID]handler.Handler), chainsQueue: buffer.NewUnboundedBlockingDeque[ChainParameters](initialQueueSize), unblockChainCreatorCh: make(chan struct{}), chainCreatorShutdownCh: make(chan struct{}), - } -} - -// Router that this chain manager is using to route consensus messages to chains -func (m *manager) Router() router.Router { - return m.ManagerConfig.Router + }, nil } // QueueChainCreation queues a chain creation request @@ -370,7 +375,7 @@ func (m *manager) createChain(chainParams ChainParameters) { // created or not. This attempts to notify the node operator that their // node may not be properly validating the subnet they expect to be // validating. - healthCheckErr := fmt.Errorf("failed to create chain on subnet: %s", chainParams.SubnetID) + healthCheckErr := fmt.Errorf("failed to create chain on subnet %s: %w", chainParams.SubnetID, err) err := m.Health.RegisterHealthCheck( chainAlias, health.CheckerFunc(func(context.Context) (interface{}, error) { @@ -449,7 +454,7 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c } consensusMetrics := prometheus.NewRegistry() - chainNamespace := fmt.Sprintf("%s_%s", constants.PlatformName, primaryAlias) + chainNamespace := metric.AppendNamespace(constants.PlatformName, primaryAlias) if err := m.Metrics.Register(chainNamespace, consensusMetrics); err != nil { return nil, fmt.Errorf("error while registering chain's metrics %w", err) } @@ -458,13 +463,13 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c // `avalanche_{chainID}_` into `avalanche_{chainID}_avalanche_` so that // there are no conflicts when registering the Snowman consensus metrics. avalancheConsensusMetrics := prometheus.NewRegistry() - avalancheDAGNamespace := fmt.Sprintf("%s_avalanche", chainNamespace) + avalancheDAGNamespace := metric.AppendNamespace(chainNamespace, "avalanche") if err := m.Metrics.Register(avalancheDAGNamespace, avalancheConsensusMetrics); err != nil { return nil, fmt.Errorf("error while registering DAG metrics %w", err) } vmMetrics := metrics.NewOptionalGatherer() - vmNamespace := fmt.Sprintf("%s_vm", chainNamespace) + vmNamespace := metric.AppendNamespace(chainNamespace, "vm") if err := m.Metrics.Register(vmNamespace, vmMetrics); err != nil { return nil, fmt.Errorf("error while registering vm's metrics %w", err) } @@ -512,23 +517,16 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c } // TODO: Shutdown VM if an error occurs - fxs := make([]*common.Fx, len(chainParams.FxIDs)) + chainFxs := make([]*common.Fx, len(chainParams.FxIDs)) for i, fxID := range chainParams.FxIDs { - // Get a factory for the fx we want to use on our chain - fxFactory, err := m.VMManager.GetFactory(fxID) - if err != nil { - return nil, fmt.Errorf("error while getting fxFactory: %w", err) - } - - fx, err := fxFactory.New(chainLog) - if err != nil { - return nil, fmt.Errorf("error while creating fx: %w", err) + fxFactory, ok := fxs[fxID] + if !ok { + return nil, fmt.Errorf("fx %s not found", fxID) } - // Create the fx - fxs[i] = &common.Fx{ + chainFxs[i] = &common.Fx{ ID: fxID, - Fx: fx, + Fx: fxFactory.New(), } } @@ -540,7 +538,7 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c chainParams.GenesisData, m.Validators, vm, - fxs, + chainFxs, sb, ) if err != nil { @@ -558,7 +556,7 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c m.Validators, beacons, vm, - fxs, + chainFxs, sb, ) if err != nil { @@ -602,11 +600,11 @@ func (m *manager) createAvalancheChain( return nil, err } prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) - vmDB := prefixdb.New(vmDBPrefix, prefixDB) - vertexDB := prefixdb.New(vertexDBPrefix, prefixDB) - vertexBootstrappingDB := prefixdb.New(vertexBootstrappingDBPrefix, prefixDB) - txBootstrappingDB := prefixdb.New(txBootstrappingDBPrefix, prefixDB) - blockBootstrappingDB := prefixdb.New(blockBootstrappingDBPrefix, prefixDB) + vmDB := prefixdb.New(VMDBPrefix, prefixDB) + vertexDB := prefixdb.New(VertexDBPrefix, prefixDB) + vertexBootstrappingDB := prefixdb.New(VertexBootstrappingDBPrefix, prefixDB) + txBootstrappingDB := prefixdb.New(TxBootstrappingDBPrefix, prefixDB) + blockBootstrappingDB := prefixdb.New(BlockBootstrappingDBPrefix, prefixDB) vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", ctx.AvalancheRegisterer) if err != nil { @@ -771,12 +769,15 @@ func (m *manager) createAvalancheChain( // using. var vmWrappingProposerVM block.ChainVM = proposervm.New( vmWrappedInsideProposerVM, - m.ApricotPhase4Time, - m.ApricotPhase4MinPChainHeight, - minBlockDelay, - numHistoricalBlocks, - m.stakingSigner, - m.stakingCert, + proposervm.Config{ + ActivationTime: m.ApricotPhase4Time, + DurangoTime: version.GetDurangoTime(m.NetworkID), + MinimumPChainHeight: m.ApricotPhase4MinPChainHeight, + MinBlkDelay: minBlockDelay, + NumHistoricalBlocks: numHistoricalBlocks, + StakingLeafSigner: m.stakingSigner, + StakingCertLeaf: m.stakingCert, + }, ) if m.MeterVMEnabled { @@ -826,7 +827,7 @@ func (m *manager) createAvalancheChain( ctx, vdrs, msgChan, - m.AcceptedFrontierGossipFrequency, + m.FrontierPollFrequency, m.ConsensusAppConcurrency, m.ResourceTracker, validators.UnhandledSubnetConnector, // avalanche chains don't use subnet connector @@ -841,23 +842,14 @@ func (m *manager) createAvalancheChain( startupTracker := tracker.NewStartup(connectedBeacons, (3*bootstrapWeight+3)/4) vdrs.RegisterCallbackListener(ctx.SubnetID, startupTracker) - snowmanCommonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: sampleK, - Alpha: bootstrapWeight/2 + 1, // must be > 50% - StartupTracker: startupTracker, - Sender: snowmanMessageSender, - BootstrapTracker: sb, - Timer: h, - RetryBootstrap: m.RetryBootstrap, - RetryBootstrapWarnFrequency: m.RetryBootstrapWarnFrequency, - MaxTimeGetAncestors: m.BootstrapMaxTimeGetAncestors, - AncestorsMaxContainersSent: m.BootstrapAncestorsMaxContainersSent, - AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, - SharedCfg: &common.SharedConfig{}, - } - snowGetHandler, err := snowgetter.New(vmWrappingProposerVM, snowmanCommonCfg) + snowGetHandler, err := snowgetter.New( + vmWrappingProposerVM, + snowmanMessageSender, + ctx.Log, + m.BootstrapMaxTimeGetAncestors, + m.BootstrapAncestorsMaxContainersSent, + ctx.Registerer, + ) if err != nil { return nil, fmt.Errorf("couldn't initialize snow base message handler: %w", err) } @@ -870,13 +862,14 @@ func (m *manager) createAvalancheChain( // Create engine, bootstrapper and state-syncer in this order, // to make sure start callbacks are duly initialized snowmanEngineConfig := smeng.Config{ - Ctx: snowmanCommonCfg.Ctx, - AllGetsServer: snowGetHandler, - VM: vmWrappingProposerVM, - Sender: snowmanCommonCfg.Sender, - Validators: vdrs, - Params: consensusParams, - Consensus: snowmanConsensus, + Ctx: ctx, + AllGetsServer: snowGetHandler, + VM: vmWrappingProposerVM, + Sender: snowmanMessageSender, + Validators: vdrs, + ConnectedValidators: connectedValidators, + Params: consensusParams, + Consensus: snowmanConsensus, } snowmanEngine, err := smeng.New(snowmanEngineConfig) if err != nil { @@ -889,12 +882,20 @@ func (m *manager) createAvalancheChain( // create bootstrap gear bootstrapCfg := smbootstrap.Config{ - Config: snowmanCommonCfg, - AllGetsServer: snowGetHandler, - Blocked: blockBlocker, - VM: vmWrappingProposerVM, + AllGetsServer: snowGetHandler, + Ctx: ctx, + Beacons: vdrs, + SampleK: sampleK, + StartupTracker: startupTracker, + Sender: snowmanMessageSender, + BootstrapTracker: sb, + Timer: h, + AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, + Blocked: blockBlocker, + VM: vmWrappingProposerVM, } - snowmanBootstrapper, err := smbootstrap.New( + var snowmanBootstrapper common.BootstrapableEngine + snowmanBootstrapper, err = smbootstrap.New( bootstrapCfg, snowmanEngine.Start, ) @@ -906,24 +907,14 @@ func (m *manager) createAvalancheChain( snowmanBootstrapper = common.TraceBootstrapableEngine(snowmanBootstrapper, m.Tracer) } - avalancheCommonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: sampleK, - StartupTracker: startupTracker, - Alpha: bootstrapWeight/2 + 1, // must be > 50% - Sender: avalancheMessageSender, - BootstrapTracker: sb, - Timer: h, - RetryBootstrap: m.RetryBootstrap, - RetryBootstrapWarnFrequency: m.RetryBootstrapWarnFrequency, - MaxTimeGetAncestors: m.BootstrapMaxTimeGetAncestors, - AncestorsMaxContainersSent: m.BootstrapAncestorsMaxContainersSent, - AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, - SharedCfg: &common.SharedConfig{}, - } - - avaGetHandler, err := avagetter.New(vtxManager, avalancheCommonCfg) + avaGetHandler, err := avagetter.New( + vtxManager, + avalancheMessageSender, + ctx.Log, + m.BootstrapMaxTimeGetAncestors, + m.BootstrapAncestorsMaxContainersSent, + ctx.AvalancheRegisterer, + ) if err != nil { return nil, fmt.Errorf("couldn't initialize avalanche base message handler: %w", err) } @@ -935,16 +926,20 @@ func (m *manager) createAvalancheChain( } // create bootstrap gear - _, specifiedLinearizationTime := version.CortinaTimes[ctx.NetworkID] - specifiedLinearizationTime = specifiedLinearizationTime && ctx.ChainID == m.XChainID avalancheBootstrapperConfig := avbootstrap.Config{ - Config: avalancheCommonCfg, - AllGetsServer: avaGetHandler, - VtxBlocked: vtxBlocker, - TxBlocked: txBlocker, - Manager: vtxManager, - VM: linearizableVM, - LinearizeOnStartup: !specifiedLinearizationTime, + AllGetsServer: avaGetHandler, + Ctx: ctx, + Beacons: vdrs, + StartupTracker: startupTracker, + Sender: avalancheMessageSender, + AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, + VtxBlocked: vtxBlocker, + TxBlocked: txBlocker, + Manager: vtxManager, + VM: linearizableVM, + } + if ctx.ChainID == m.XChainID { + avalancheBootstrapperConfig.StopVertexID = version.CortinaXChainStopVertexID[ctx.NetworkID] } avalancheBootstrapper, err := avbootstrap.New( @@ -1008,8 +1003,8 @@ func (m *manager) createSnowmanChain( return nil, err } prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) - vmDB := prefixdb.New(vmDBPrefix, prefixDB) - bootstrappingDB := prefixdb.New(bootstrappingDB, prefixDB) + vmDB := prefixdb.New(VMDBPrefix, prefixDB) + bootstrappingDB := prefixdb.New(ChainBootstrappingDBPrefix, prefixDB) blocked, err := queue.NewWithMissing(bootstrappingDB, "block", ctx.Registerer) if err != nil { @@ -1120,12 +1115,15 @@ func (m *manager) createSnowmanChain( vm = proposervm.New( vm, - m.ApricotPhase4Time, - m.ApricotPhase4MinPChainHeight, - minBlockDelay, - numHistoricalBlocks, - m.stakingSigner, - m.stakingCert, + proposervm.Config{ + ActivationTime: m.ApricotPhase4Time, + DurangoTime: version.GetDurangoTime(m.NetworkID), + MinimumPChainHeight: m.ApricotPhase4MinPChainHeight, + MinBlkDelay: minBlockDelay, + NumHistoricalBlocks: numHistoricalBlocks, + StakingLeafSigner: m.stakingSigner, + StakingCertLeaf: m.stakingCert, + }, ) if m.MeterVMEnabled { @@ -1175,7 +1173,7 @@ func (m *manager) createSnowmanChain( ctx, vdrs, msgChan, - m.AcceptedFrontierGossipFrequency, + m.FrontierPollFrequency, m.ConsensusAppConcurrency, m.ResourceTracker, subnetConnector, @@ -1190,24 +1188,14 @@ func (m *manager) createSnowmanChain( startupTracker := tracker.NewStartup(connectedBeacons, (3*bootstrapWeight+3)/4) beacons.RegisterCallbackListener(ctx.SubnetID, startupTracker) - commonCfg := common.Config{ - Ctx: ctx, - Beacons: beacons, - SampleK: sampleK, - StartupTracker: startupTracker, - Alpha: bootstrapWeight/2 + 1, // must be > 50% - Sender: messageSender, - BootstrapTracker: sb, - Timer: h, - RetryBootstrap: m.RetryBootstrap, - RetryBootstrapWarnFrequency: m.RetryBootstrapWarnFrequency, - MaxTimeGetAncestors: m.BootstrapMaxTimeGetAncestors, - AncestorsMaxContainersSent: m.BootstrapAncestorsMaxContainersSent, - AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, - SharedCfg: &common.SharedConfig{}, - } - - snowGetHandler, err := snowgetter.New(vm, commonCfg) + snowGetHandler, err := snowgetter.New( + vm, + messageSender, + ctx.Log, + m.BootstrapMaxTimeGetAncestors, + m.BootstrapAncestorsMaxContainersSent, + ctx.Registerer, + ) if err != nil { return nil, fmt.Errorf("couldn't initialize snow base message handler: %w", err) } @@ -1220,14 +1208,15 @@ func (m *manager) createSnowmanChain( // Create engine, bootstrapper and state-syncer in this order, // to make sure start callbacks are duly initialized engineConfig := smeng.Config{ - Ctx: commonCfg.Ctx, - AllGetsServer: snowGetHandler, - VM: vm, - Sender: commonCfg.Sender, - Validators: vdrs, - Params: consensusParams, - Consensus: consensus, - PartialSync: m.PartialSyncPrimaryNetwork && commonCfg.Ctx.ChainID == constants.PlatformChainID, + Ctx: ctx, + AllGetsServer: snowGetHandler, + VM: vm, + Sender: messageSender, + Validators: vdrs, + ConnectedValidators: connectedValidators, + Params: consensusParams, + Consensus: consensus, + PartialSync: m.PartialSyncPrimaryNetwork && ctx.ChainID == constants.PlatformChainID, } engine, err := smeng.New(engineConfig) if err != nil { @@ -1240,13 +1229,21 @@ func (m *manager) createSnowmanChain( // create bootstrap gear bootstrapCfg := smbootstrap.Config{ - Config: commonCfg, - AllGetsServer: snowGetHandler, - Blocked: blocked, - VM: vm, - Bootstrapped: bootstrapFunc, + AllGetsServer: snowGetHandler, + Ctx: ctx, + Beacons: beacons, + SampleK: sampleK, + StartupTracker: startupTracker, + Sender: messageSender, + BootstrapTracker: sb, + Timer: h, + AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, + Blocked: blocked, + VM: vm, + Bootstrapped: bootstrapFunc, } - bootstrapper, err := smbootstrap.New( + var bootstrapper common.BootstrapableEngine + bootstrapper, err = smbootstrap.New( bootstrapCfg, engine.Start, ) @@ -1260,9 +1257,14 @@ func (m *manager) createSnowmanChain( // create state sync gear stateSyncCfg, err := syncer.NewConfig( - commonCfg, - m.StateSyncBeacons, snowGetHandler, + ctx, + startupTracker, + messageSender, + beacons, + sampleK, + bootstrapWeight/2+1, // must be > 50% + m.StateSyncBeacons, vm, ) if err != nil { diff --git a/chains/registrant.go b/chains/registrant.go index 3a2137048c1b..cd3aa6e9c0bf 100644 --- a/chains/registrant.go +++ b/chains/registrant.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chains diff --git a/chains/test_manager.go b/chains/test_manager.go index e4dabea426f9..f7b98b29b587 100644 --- a/chains/test_manager.go +++ b/chains/test_manager.go @@ -1,12 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chains -import ( - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/networking/router" -) +import "github.com/ava-labs/avalanchego/ids" // TestManager implements Manager but does nothing. Always returns nil error. // To be used only in tests @@ -14,10 +11,6 @@ var TestManager Manager = testManager{} type testManager struct{} -func (testManager) Router() router.Router { - return nil -} - func (testManager) QueueChainCreation(ChainParameters) {} func (testManager) ForceCreateChain(ChainParameters) {} diff --git a/codec/codec.go b/codec/codec.go index 413b6d174be2..7aacb9085848 100644 --- a/codec/codec.go +++ b/codec/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package codec diff --git a/codec/general_codec.go b/codec/general_codec.go index ac32b84e6a87..3688065a021f 100644 --- a/codec/general_codec.go +++ b/codec/general_codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package codec diff --git a/codec/hierarchycodec/codec.go b/codec/hierarchycodec/codec.go index d1d03d879275..db2ffed0425d 100644 --- a/codec/hierarchycodec/codec.go +++ b/codec/hierarchycodec/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package hierarchycodec @@ -7,9 +7,11 @@ import ( "fmt" "reflect" "sync" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/reflectcodec" + "github.com/ava-labs/avalanchego/utils/bimap" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -42,28 +44,26 @@ type typeID struct { type hierarchyCodec struct { codec.Codec - lock sync.RWMutex - currentGroupID uint16 - nextTypeID uint16 - typeIDToType map[typeID]reflect.Type - typeToTypeID map[reflect.Type]typeID + lock sync.RWMutex + currentGroupID uint16 + nextTypeID uint16 + registeredTypes *bimap.BiMap[typeID, reflect.Type] } // New returns a new, concurrency-safe codec -func New(tagNames []string, maxSliceLen uint32) Codec { +func New(durangoTime time.Time, tagNames []string, maxSliceLen uint32) Codec { hCodec := &hierarchyCodec{ - currentGroupID: 0, - nextTypeID: 0, - typeIDToType: map[typeID]reflect.Type{}, - typeToTypeID: map[reflect.Type]typeID{}, + currentGroupID: 0, + nextTypeID: 0, + registeredTypes: bimap.New[typeID, reflect.Type](), } - hCodec.Codec = reflectcodec.New(hCodec, tagNames, maxSliceLen) + hCodec.Codec = reflectcodec.New(hCodec, tagNames, durangoTime, maxSliceLen) return hCodec } // NewDefault returns a new codec with reasonable default values -func NewDefault() Codec { - return New([]string{reflectcodec.DefaultTagName}, defaultMaxSliceLength) +func NewDefault(durangoTime time.Time) Codec { + return New(durangoTime, []string{reflectcodec.DefaultTagName}, defaultMaxSliceLength) } // SkipRegistrations some number of type IDs @@ -88,7 +88,7 @@ func (c *hierarchyCodec) RegisterType(val interface{}) error { defer c.lock.Unlock() valType := reflect.TypeOf(val) - if _, exists := c.typeToTypeID[valType]; exists { + if c.registeredTypes.HasValue(valType) { return fmt.Errorf("%w: %v", codec.ErrDuplicateType, valType) } @@ -98,8 +98,7 @@ func (c *hierarchyCodec) RegisterType(val interface{}) error { } c.nextTypeID++ - c.typeIDToType[valTypeID] = valType - c.typeToTypeID[valType] = valTypeID + c.registeredTypes.Put(valTypeID, valType) return nil } @@ -112,7 +111,7 @@ func (c *hierarchyCodec) PackPrefix(p *wrappers.Packer, valueType reflect.Type) c.lock.RLock() defer c.lock.RUnlock() - typeID, ok := c.typeToTypeID[valueType] // Get the type ID of the value being marshaled + typeID, ok := c.registeredTypes.GetKey(valueType) // Get the type ID of the value being marshaled if !ok { return fmt.Errorf("can't marshal unregistered type %q", valueType) } @@ -136,7 +135,7 @@ func (c *hierarchyCodec) UnpackPrefix(p *wrappers.Packer, valueType reflect.Type typeID: typeIDShort, } // Get a type that implements the interface - implementingType, ok := c.typeIDToType[t] + implementingType, ok := c.registeredTypes.GetValue(t) if !ok { return reflect.Value{}, fmt.Errorf("couldn't unmarshal interface: unknown type ID %+v", t) } diff --git a/codec/hierarchycodec/codec_test.go b/codec/hierarchycodec/codec_test.go index c4c71d76571c..8149cdcc65e2 100644 --- a/codec/hierarchycodec/codec_test.go +++ b/codec/hierarchycodec/codec_test.go @@ -1,29 +1,45 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package hierarchycodec import ( "testing" + "time" "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/utils/timer/mockable" ) func TestVectors(t *testing.T) { for _, test := range codec.Tests { - c := NewDefault() + c := NewDefault(mockable.MaxTime) test(c, t) } } func TestMultipleTags(t *testing.T) { for _, test := range codec.MultipleTagsTests { - c := New([]string{"tag1", "tag2"}, defaultMaxSliceLength) + c := New(mockable.MaxTime, []string{"tag1", "tag2"}, defaultMaxSliceLength) + test(c, t) + } +} + +func TestEnforceSliceLen(t *testing.T) { + for _, test := range codec.EnforceSliceLenTests { + c := NewDefault(mockable.MaxTime) + test(c, t) + } +} + +func TestIgnoreSliceLen(t *testing.T) { + for _, test := range codec.IgnoreSliceLenTests { + c := NewDefault(time.Time{}) test(c, t) } } func FuzzStructUnmarshalHierarchyCodec(f *testing.F) { - c := NewDefault() + c := NewDefault(mockable.MaxTime) codec.FuzzStructUnmarshal(c, f) } diff --git a/codec/linearcodec/camino_codec.go b/codec/linearcodec/camino_codec.go index 462685276d03..69d1c07ca681 100644 --- a/codec/linearcodec/camino_codec.go +++ b/codec/linearcodec/camino_codec.go @@ -6,9 +6,11 @@ package linearcodec import ( "fmt" "reflect" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/reflectcodec" + "github.com/ava-labs/avalanchego/utils/bimap" ) const ( @@ -35,27 +37,26 @@ type caminoLinearCodec struct { nextCustomTypeID uint32 } -func NewCamino(tagNames []string, maxSliceLen uint32) CaminoCodec { +func NewCamino(durangoTime time.Time, tagNames []string, maxSliceLen uint32) CaminoCodec { hCodec := &caminoLinearCodec{ linearCodec: linearCodec{ - nextTypeID: 0, - typeIDToType: map[uint32]reflect.Type{}, - typeToTypeID: map[reflect.Type]uint32{}, + nextTypeID: 0, + registeredTypes: bimap.New[uint32, reflect.Type](), }, nextCustomTypeID: firstCustomTypeID, } - hCodec.Codec = reflectcodec.New(hCodec, tagNames, maxSliceLen) + hCodec.Codec = reflectcodec.New(hCodec, tagNames, durangoTime, maxSliceLen) return hCodec } // NewDefault is a convenience constructor; it returns a new codec with reasonable default values -func NewCaminoDefault() CaminoCodec { - return NewCamino([]string{reflectcodec.DefaultTagName}, DefaultMaxSliceLength) +func NewCaminoDefault(durangoTime time.Time) CaminoCodec { + return NewCamino(durangoTime, []string{reflectcodec.DefaultTagName}, DefaultMaxSliceLength) } // NewCustomMaxLength is a convenience constructor; it returns a new codec with custom max length and default tags -func NewCaminoCustomMaxLength(maxSliceLen uint32) CaminoCodec { - return NewCamino([]string{reflectcodec.DefaultTagName}, maxSliceLen) +func NewCaminoCustomMaxLength(durangoTime time.Time, maxSliceLen uint32) CaminoCodec { + return NewCamino(durangoTime, []string{reflectcodec.DefaultTagName}, maxSliceLen) } // RegisterCustomType is used to register custom types that may be @@ -66,12 +67,10 @@ func (c *caminoLinearCodec) RegisterCustomType(val interface{}) error { defer c.lock.Unlock() valType := reflect.TypeOf(val) - if _, exists := c.typeToTypeID[valType]; exists { - return fmt.Errorf("type %v has already been registered", valType) + if c.registeredTypes.HasValue(valType) { + return fmt.Errorf("%w: %v", codec.ErrDuplicateType, valType) } - - c.typeIDToType[c.nextCustomTypeID] = valType - c.typeToTypeID[valType] = c.nextCustomTypeID + c.registeredTypes.Put(c.nextCustomTypeID, valType) c.nextCustomTypeID++ return nil } diff --git a/codec/linearcodec/camino_codec_test.go b/codec/linearcodec/camino_codec_test.go index 5830884c81bb..925e8505b6a9 100644 --- a/codec/linearcodec/camino_codec_test.go +++ b/codec/linearcodec/camino_codec_test.go @@ -7,25 +7,26 @@ import ( "testing" "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/utils/timer/mockable" ) func TestVectorsCamino(t *testing.T) { for _, test := range codec.Tests { - c := NewCaminoDefault() + c := NewCaminoDefault(mockable.MaxTime) test(c, t) } } func TestMultipleTagsCamino(t *testing.T) { for _, test := range codec.MultipleTagsTests { - c := NewCamino([]string{"tag1", "tag2"}, DefaultMaxSliceLength) + c := NewCamino(mockable.MaxTime, []string{"tag1", "tag2"}, DefaultMaxSliceLength) test(c, t) } } func TestVersionCamino(t *testing.T) { for _, test := range codec.VersionTests { - c := NewCaminoDefault() + c := NewCaminoDefault(mockable.MaxTime) test(c, t) } } diff --git a/codec/linearcodec/codec.go b/codec/linearcodec/codec.go index 677c331b0366..6ad36b8a197d 100644 --- a/codec/linearcodec/codec.go +++ b/codec/linearcodec/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linearcodec @@ -7,9 +7,11 @@ import ( "fmt" "reflect" "sync" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/reflectcodec" + "github.com/ava-labs/avalanchego/utils/bimap" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -36,32 +38,30 @@ type Codec interface { type linearCodec struct { codec.Codec - lock sync.RWMutex - nextTypeID uint32 - typeIDToType map[uint32]reflect.Type - typeToTypeID map[reflect.Type]uint32 + lock sync.RWMutex + nextTypeID uint32 + registeredTypes *bimap.BiMap[uint32, reflect.Type] } // New returns a new, concurrency-safe codec; it allow to specify // both tagNames and maxSlicelenght -func New(tagNames []string, maxSliceLen uint32) Codec { +func New(durangoTime time.Time, tagNames []string, maxSliceLen uint32) Codec { hCodec := &linearCodec{ - nextTypeID: 0, - typeIDToType: map[uint32]reflect.Type{}, - typeToTypeID: map[reflect.Type]uint32{}, + nextTypeID: 0, + registeredTypes: bimap.New[uint32, reflect.Type](), } - hCodec.Codec = reflectcodec.New(hCodec, tagNames, maxSliceLen) + hCodec.Codec = reflectcodec.New(hCodec, tagNames, durangoTime, maxSliceLen) return hCodec } // NewDefault is a convenience constructor; it returns a new codec with reasonable default values -func NewDefault() Codec { - return New([]string{reflectcodec.DefaultTagName}, DefaultMaxSliceLength) +func NewDefault(durangoTime time.Time) Codec { + return New(durangoTime, []string{reflectcodec.DefaultTagName}, DefaultMaxSliceLength) } // NewCustomMaxLength is a convenience constructor; it returns a new codec with custom max length and default tags -func NewCustomMaxLength(maxSliceLen uint32) Codec { - return New([]string{reflectcodec.DefaultTagName}, maxSliceLen) +func NewCustomMaxLength(durangoTime time.Time, maxSliceLen uint32) Codec { + return New(durangoTime, []string{reflectcodec.DefaultTagName}, maxSliceLen) } // Skip some number of type IDs @@ -78,12 +78,11 @@ func (c *linearCodec) RegisterType(val interface{}) error { defer c.lock.Unlock() valType := reflect.TypeOf(val) - if _, exists := c.typeToTypeID[valType]; exists { + if c.registeredTypes.HasValue(valType) { return fmt.Errorf("%w: %v", codec.ErrDuplicateType, valType) } - c.typeIDToType[c.nextTypeID] = valType - c.typeToTypeID[valType] = c.nextTypeID + c.registeredTypes.Put(c.nextTypeID, valType) c.nextTypeID++ return nil } @@ -97,7 +96,7 @@ func (c *linearCodec) PackPrefix(p *wrappers.Packer, valueType reflect.Type) err c.lock.RLock() defer c.lock.RUnlock() - typeID, ok := c.typeToTypeID[valueType] // Get the type ID of the value being marshaled + typeID, ok := c.registeredTypes.GetKey(valueType) // Get the type ID of the value being marshaled if !ok { return fmt.Errorf("can't marshal unregistered type %q", valueType) } @@ -114,7 +113,7 @@ func (c *linearCodec) UnpackPrefix(p *wrappers.Packer, valueType reflect.Type) ( return reflect.Value{}, fmt.Errorf("couldn't unmarshal interface: %w", p.Err) } // Get a type that implements the interface - implementingType, ok := c.typeIDToType[typeID] + implementingType, ok := c.registeredTypes.GetValue(typeID) if !ok { return reflect.Value{}, fmt.Errorf("couldn't unmarshal interface: unknown type ID %d", typeID) } diff --git a/codec/linearcodec/codec_test.go b/codec/linearcodec/codec_test.go index db8a4e720dd6..3d2f3efff68a 100644 --- a/codec/linearcodec/codec_test.go +++ b/codec/linearcodec/codec_test.go @@ -1,29 +1,45 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linearcodec import ( "testing" + "time" "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/utils/timer/mockable" ) func TestVectors(t *testing.T) { for _, test := range codec.Tests { - c := NewDefault() + c := NewDefault(mockable.MaxTime) test(c, t) } } func TestMultipleTags(t *testing.T) { for _, test := range codec.MultipleTagsTests { - c := New([]string{"tag1", "tag2"}, DefaultMaxSliceLength) + c := New(mockable.MaxTime, []string{"tag1", "tag2"}, DefaultMaxSliceLength) + test(c, t) + } +} + +func TestEnforceSliceLen(t *testing.T) { + for _, test := range codec.EnforceSliceLenTests { + c := NewDefault(mockable.MaxTime) + test(c, t) + } +} + +func TestIgnoreSliceLen(t *testing.T) { + for _, test := range codec.IgnoreSliceLenTests { + c := NewDefault(time.Time{}) test(c, t) } } func FuzzStructUnmarshalLinearCodec(f *testing.F) { - c := NewDefault() + c := NewDefault(mockable.MaxTime) codec.FuzzStructUnmarshal(c, f) } diff --git a/codec/manager.go b/codec/manager.go index 3a5e9eb174fc..6fb48aaad9f8 100644 --- a/codec/manager.go +++ b/codec/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package codec diff --git a/codec/mock_manager.go b/codec/mock_manager.go index 91961806bf4f..36bbae57e96f 100644 --- a/codec/mock_manager.go +++ b/codec/mock_manager.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/codec (interfaces: Manager) +// +// Generated by this command: +// +// mockgen -package=codec -destination=codec/mock_manager.go github.com/ava-labs/avalanchego/codec Manager +// // Package codec is a generated GoMock package. package codec @@ -37,7 +39,7 @@ func (m *MockManager) EXPECT() *MockManagerMockRecorder { } // Marshal mocks base method. -func (m *MockManager) Marshal(arg0 uint16, arg1 interface{}) ([]byte, error) { +func (m *MockManager) Marshal(arg0 uint16, arg1 any) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Marshal", arg0, arg1) ret0, _ := ret[0].([]byte) @@ -46,7 +48,7 @@ func (m *MockManager) Marshal(arg0 uint16, arg1 interface{}) ([]byte, error) { } // Marshal indicates an expected call of Marshal. -func (mr *MockManagerMockRecorder) Marshal(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Marshal(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Marshal", reflect.TypeOf((*MockManager)(nil).Marshal), arg0, arg1) } @@ -60,13 +62,13 @@ func (m *MockManager) RegisterCodec(arg0 uint16, arg1 Codec) error { } // RegisterCodec indicates an expected call of RegisterCodec. -func (mr *MockManagerMockRecorder) RegisterCodec(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RegisterCodec(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterCodec", reflect.TypeOf((*MockManager)(nil).RegisterCodec), arg0, arg1) } // Size mocks base method. -func (m *MockManager) Size(arg0 uint16, arg1 interface{}) (int, error) { +func (m *MockManager) Size(arg0 uint16, arg1 any) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Size", arg0, arg1) ret0, _ := ret[0].(int) @@ -75,13 +77,13 @@ func (m *MockManager) Size(arg0 uint16, arg1 interface{}) (int, error) { } // Size indicates an expected call of Size. -func (mr *MockManagerMockRecorder) Size(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Size(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockManager)(nil).Size), arg0, arg1) } // Unmarshal mocks base method. -func (m *MockManager) Unmarshal(arg0 []byte, arg1 interface{}) (uint16, error) { +func (m *MockManager) Unmarshal(arg0 []byte, arg1 any) (uint16, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Unmarshal", arg0, arg1) ret0, _ := ret[0].(uint16) @@ -90,7 +92,7 @@ func (m *MockManager) Unmarshal(arg0 []byte, arg1 interface{}) (uint16, error) { } // Unmarshal indicates an expected call of Unmarshal. -func (mr *MockManagerMockRecorder) Unmarshal(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Unmarshal(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unmarshal", reflect.TypeOf((*MockManager)(nil).Unmarshal), arg0, arg1) } diff --git a/codec/reflectcodec/camino.go b/codec/reflectcodec/camino.go new file mode 100644 index 000000000000..58da5154e169 --- /dev/null +++ b/codec/reflectcodec/camino.go @@ -0,0 +1,31 @@ +// Copyright (C) 2022, Chain4Travel AG. All rights reserved. +// See the file LICENSE for licensing terms. + +package reflectcodec + +import "reflect" + +const ( + upgradeVersionTagName = "upgradeVersion" + UpgradeVersionIDFieldName = "UpgradeVersionID" +) + +func checkUpgrade(t reflect.Type, numFields int) (bool, int) { + if numFields > 0 && + t.Field(0).Type.Kind() == reflect.Uint64 && + t.Field(0).Name == UpgradeVersionIDFieldName { + return true, 1 + } + return false, 0 +} + +type SerializedFields struct { + Fields []FieldDesc + CheckUpgrade bool + MaxUpgradeVersion uint16 +} + +type FieldDesc struct { + Index int + UpgradeVersion uint16 +} diff --git a/codec/reflectcodec/struct_fielder.go b/codec/reflectcodec/struct_fielder.go index 70f2e17cac81..a40d86b4df30 100644 --- a/codec/reflectcodec/struct_fielder.go +++ b/codec/reflectcodec/struct_fielder.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package reflectcodec @@ -22,112 +22,65 @@ import ( "github.com/ava-labs/avalanchego/codec" ) -const ( - // SliceLenTagName that specifies the length of a slice. - SliceLenTagName = "len" - - // TagValue is the value the tag must have to be serialized. - TagValue = "true" - - // TagValue is the value the tag must have to be serialized, this variant - // includes the nullable option - TagWithNullableValue = "true,nullable" - - UpgradeVersionIDFieldName = "UpgradeVersionID" - UpgradeVersionTagName = "upgradeVersion" -) +// TagValue is the value the tag must have to be serialized. +const TagValue = "true" var _ StructFielder = (*structFielder)(nil) -type FieldDesc struct { - Index int - MaxSliceLen uint32 - Nullable bool - UpgradeVersion uint16 -} - -type SerializedFields struct { - Fields []FieldDesc - CheckUpgrade bool - MaxUpgradeVersion uint16 -} - // StructFielder handles discovery of serializable fields in a struct. type StructFielder interface { // Returns the fields that have been marked as serializable in [t], which is - // a struct type. Additionally, returns the custom maximum length slice that - // may be serialized into the field, if any. + // a struct type. // Returns an error if a field has tag "[tagName]: [TagValue]" but the field // is un-exported. // GetSerializedField(Foo) --> [1,5,8] means Foo.Field(1), Foo.Field(5), // Foo.Field(8) are to be serialized/deserialized. - GetSerializedFields(t reflect.Type) (*SerializedFields, error) + GetSerializedFields(t reflect.Type) (SerializedFields, error) } -func NewStructFielder(tagNames []string, maxSliceLen uint32) StructFielder { +func NewStructFielder(tagNames []string) StructFielder { return &structFielder{ tags: tagNames, - maxSliceLen: maxSliceLen, - serializedFieldIndices: make(map[reflect.Type]*SerializedFields), + serializedFieldIndices: make(map[reflect.Type]SerializedFields), } } type structFielder struct { - lock sync.Mutex + lock sync.RWMutex // multiple tags per field can be specified. A field is serialized/deserialized // if it has at least one of the specified tags. tags []string - maxSliceLen uint32 - // Key: a struct type // Value: Slice where each element is index in the struct type of a field // that is serialized/deserialized e.g. Foo --> [1,5,8] means Foo.Field(1), // etc. are to be serialized/deserialized. We assume this cache is pretty // small (a few hundred keys at most) and doesn't take up much memory. - serializedFieldIndices map[reflect.Type]*SerializedFields + serializedFieldIndices map[reflect.Type]SerializedFields } -func (s *structFielder) GetSerializedFields(t reflect.Type) (*SerializedFields, error) { +func (s *structFielder) GetSerializedFields(t reflect.Type) (SerializedFields, error) { + if serializedFields, ok := s.getCachedSerializedFields(t); ok { // use pre-computed result + return serializedFields, nil + } + s.lock.Lock() defer s.lock.Unlock() - if s.serializedFieldIndices == nil { - s.serializedFieldIndices = make(map[reflect.Type]*SerializedFields) - } - if serializedFields, ok := s.serializedFieldIndices[t]; ok { // use pre-computed result - return serializedFields, nil - } numFields := t.NumField() - checkUpgrade := false - startIndex := 0 - if numFields > 0 && t.Field(0).Type.Kind() == reflect.Uint64 && - t.Field(0).Name == UpgradeVersionIDFieldName { - checkUpgrade = true - startIndex = 1 - } - serializedFields := &SerializedFields{Fields: make([]FieldDesc, 0, numFields), CheckUpgrade: checkUpgrade} - maxUpgradeVersion := uint16(0) + checkUpgrade, startIndex := checkUpgrade(t, numFields) + serializedFields := SerializedFields{Fields: make([]FieldDesc, 0, numFields), CheckUpgrade: checkUpgrade} for i := startIndex; i < numFields; i++ { // Go through all fields of this struct field := t.Field(i) // Multiple tags per fields can be specified. // Serialize/Deserialize field if it has // any tag with the right value - var ( - captureField bool - nullable bool - ) + var captureField bool for _, tag := range s.tags { - switch field.Tag.Get(tag) { - case TagValue: - captureField = true - case TagWithNullableValue: + if field.Tag.Get(tag) == TagValue { captureField = true - nullable = true - } - if captureField { break } } @@ -135,36 +88,36 @@ func (s *structFielder) GetSerializedFields(t reflect.Type) (*SerializedFields, continue } if !field.IsExported() { // Can only marshal exported fields - return nil, fmt.Errorf("can not marshal %w: %s", + return SerializedFields{}, fmt.Errorf("can not marshal %w: %s", codec.ErrUnexportedField, field.Name, ) } - upgradeVersionTag := field.Tag.Get(UpgradeVersionTagName) + upgradeVersionTag := field.Tag.Get(upgradeVersionTagName) upgradeVersion := uint16(0) if upgradeVersionTag != "" { v, err := strconv.ParseUint(upgradeVersionTag, 10, 8) if err != nil { - return nil, fmt.Errorf("can't parse %s (%s)", UpgradeVersionTagName, upgradeVersionTag) + return SerializedFields{}, fmt.Errorf("can't parse %s (%s)", upgradeVersionTagName, upgradeVersionTag) } upgradeVersion = uint16(v) - maxUpgradeVersion = upgradeVersion + serializedFields.MaxUpgradeVersion = upgradeVersion } - sliceLenField := field.Tag.Get(SliceLenTagName) - maxSliceLen := s.maxSliceLen - if newLen, err := strconv.ParseUint(sliceLenField, 10, 31); err == nil { - maxSliceLen = uint32(newLen) - } serializedFields.Fields = append(serializedFields.Fields, FieldDesc{ Index: i, - MaxSliceLen: maxSliceLen, - Nullable: nullable, UpgradeVersion: upgradeVersion, }) } - serializedFields.MaxUpgradeVersion = maxUpgradeVersion s.serializedFieldIndices[t] = serializedFields // cache result return serializedFields, nil } + +func (s *structFielder) getCachedSerializedFields(t reflect.Type) (SerializedFields, bool) { + s.lock.RLock() + defer s.lock.RUnlock() + + cachedFields, ok := s.serializedFieldIndices[t] + return cachedFields, ok +} diff --git a/codec/reflectcodec/type_codec.go b/codec/reflectcodec/type_codec.go index f5f6eef6e8d1..f21545913156 100644 --- a/codec/reflectcodec/type_codec.go +++ b/codec/reflectcodec/type_codec.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package reflectcodec @@ -19,6 +19,7 @@ import ( "fmt" "math" "reflect" + "time" "golang.org/x/exp/slices" @@ -82,16 +83,18 @@ type TypeCodec interface { // 7. nil slices are marshaled as empty slices type genericCodec struct { typer TypeCodec + durangoTime time.Time // Time after which [maxSliceLen] will be ignored maxSliceLen uint32 fielder StructFielder } // New returns a new, concurrency-safe codec -func New(typer TypeCodec, tagNames []string, maxSliceLen uint32) codec.Codec { +func New(typer TypeCodec, tagNames []string, durangoTime time.Time, maxSliceLen uint32) codec.Codec { return &genericCodec{ typer: typer, + durangoTime: durangoTime, maxSliceLen: maxSliceLen, - fielder: NewStructFielder(tagNames, maxSliceLen), + fielder: NewStructFielder(tagNames), } } @@ -100,16 +103,14 @@ func (c *genericCodec) Size(value interface{}) (int, error) { return 0, errMarshalNil // can't marshal nil } - size, _, err := c.size(reflect.ValueOf(value), false /*=nullable*/, nil /*=typeStack*/) + size, _, err := c.size(reflect.ValueOf(value), nil /*=typeStack*/) return size, err } // size returns the size of the value along with whether the value is constant -// sized. This function takes into account a `nullable` property which allows -// pointers and interfaces to serialize nil values +// sized. func (c *genericCodec) size( value reflect.Value, - nullable bool, typeStack set.Set[reflect.Type], ) (int, bool, error) { switch valueKind := value.Kind(); valueKind { @@ -135,24 +136,14 @@ func (c *genericCodec) size( return wrappers.StringLen(value.String()), false, nil case reflect.Ptr: if value.IsNil() { - if !nullable { - return 0, false, errMarshalNil - } - return wrappers.BoolLen, false, nil + return 0, false, errMarshalNil } - size, constSize, err := c.size(value.Elem(), false /*=nullable*/, typeStack) - if nullable { - return wrappers.BoolLen + size, false, err - } - return size, constSize, err + return c.size(value.Elem(), typeStack) case reflect.Interface: if value.IsNil() { - if !nullable { - return 0, false, errMarshalNil - } - return wrappers.BoolLen, false, nil + return 0, false, errMarshalNil } underlyingValue := value.Interface() @@ -163,12 +154,9 @@ func (c *genericCodec) size( typeStack.Add(underlyingType) prefixSize := c.typer.PrefixSize(underlyingType) - valueSize, _, err := c.size(value.Elem(), false /*=nullable*/, typeStack) + valueSize, _, err := c.size(value.Elem(), typeStack) typeStack.Remove(underlyingType) - if nullable { - return wrappers.BoolLen + prefixSize + valueSize, false, err - } return prefixSize + valueSize, false, err case reflect.Slice: @@ -177,7 +165,7 @@ func (c *genericCodec) size( return wrappers.IntLen, false, nil } - size, constSize, err := c.size(value.Index(0), nullable, typeStack) + size, constSize, err := c.size(value.Index(0), typeStack) if err != nil { return 0, false, err } @@ -189,7 +177,7 @@ func (c *genericCodec) size( } for i := 1; i < numElts; i++ { - innerSize, _, err := c.size(value.Index(i), nullable, typeStack) + innerSize, _, err := c.size(value.Index(i), typeStack) if err != nil { return 0, false, err } @@ -203,7 +191,7 @@ func (c *genericCodec) size( return 0, true, nil } - size, constSize, err := c.size(value.Index(0), nullable, typeStack) + size, constSize, err := c.size(value.Index(0), typeStack) if err != nil { return 0, false, err } @@ -215,7 +203,7 @@ func (c *genericCodec) size( } for i := 1; i < numElts; i++ { - innerSize, _, err := c.size(value.Index(i), nullable, typeStack) + innerSize, _, err := c.size(value.Index(i), typeStack) if err != nil { return 0, false, err } @@ -246,7 +234,7 @@ func (c *genericCodec) size( if fieldDesc.UpgradeVersion > upgradeVersion { break } - innerSize, innerConstSize, err := c.size(value.Field(fieldDesc.Index), fieldDesc.Nullable, typeStack) + innerSize, innerConstSize, err := c.size(value.Field(fieldDesc.Index), typeStack) if err != nil { return 0, false, err } @@ -261,11 +249,11 @@ func (c *genericCodec) size( return wrappers.IntLen, false, nil } - keySize, keyConstSize, err := c.size(iter.Key(), false /*=nullable*/, typeStack) + keySize, keyConstSize, err := c.size(iter.Key(), typeStack) if err != nil { return 0, false, err } - valueSize, valueConstSize, err := c.size(iter.Value(), nullable, typeStack) + valueSize, valueConstSize, err := c.size(iter.Value(), typeStack) if err != nil { return 0, false, err } @@ -280,7 +268,7 @@ func (c *genericCodec) size( totalValueSize = valueSize ) for iter.Next() { - valueSize, _, err := c.size(iter.Value(), nullable, typeStack) + valueSize, _, err := c.size(iter.Value(), typeStack) if err != nil { return 0, false, err } @@ -294,7 +282,7 @@ func (c *genericCodec) size( totalKeySize = keySize ) for iter.Next() { - keySize, _, err := c.size(iter.Key(), false /*=nullable*/, typeStack) + keySize, _, err := c.size(iter.Key(), typeStack) if err != nil { return 0, false, err } @@ -305,11 +293,11 @@ func (c *genericCodec) size( default: totalSize := wrappers.IntLen + keySize + valueSize for iter.Next() { - keySize, _, err := c.size(iter.Key(), false /*=nullable*/, typeStack) + keySize, _, err := c.size(iter.Key(), typeStack) if err != nil { return 0, false, err } - valueSize, _, err := c.size(iter.Value(), nullable, typeStack) + valueSize, _, err := c.size(iter.Value(), typeStack) if err != nil { return 0, false, err } @@ -329,7 +317,7 @@ func (c *genericCodec) MarshalInto(value interface{}, p *wrappers.Packer) error return errMarshalNil // can't marshal nil } - return c.marshal(reflect.ValueOf(value), p, c.maxSliceLen, false /*=nullable*/, nil /*=typeStack*/) + return c.marshal(reflect.ValueOf(value), p, nil /*=typeStack*/) } // marshal writes the byte representation of [value] to [p] @@ -338,8 +326,6 @@ func (c *genericCodec) MarshalInto(value interface{}, p *wrappers.Packer) error func (c *genericCodec) marshal( value reflect.Value, p *wrappers.Packer, - maxSliceLen uint32, - nullable bool, typeStack set.Set[reflect.Type], ) error { switch valueKind := value.Kind(); valueKind { @@ -374,25 +360,13 @@ func (c *genericCodec) marshal( p.PackBool(value.Bool()) return p.Err case reflect.Ptr: - isNil := value.IsNil() - if nullable { - p.PackBool(isNil) - if isNil || p.Err != nil { - return p.Err - } - } else if isNil { + if value.IsNil() { return errMarshalNil } - return c.marshal(value.Elem(), p, c.maxSliceLen, false /*=nullable*/, typeStack) + return c.marshal(value.Elem(), p, typeStack) case reflect.Interface: - isNil := value.IsNil() - if nullable { - p.PackBool(isNil) - if isNil || p.Err != nil { - return p.Err - } - } else if isNil { + if value.IsNil() { return errMarshalNil } @@ -405,18 +379,25 @@ func (c *genericCodec) marshal( if err := c.typer.PackPrefix(p, underlyingType); err != nil { return err } - if err := c.marshal(value.Elem(), p, c.maxSliceLen, false /*=nullable*/, typeStack); err != nil { + if err := c.marshal(value.Elem(), p, typeStack); err != nil { return err } typeStack.Remove(underlyingType) return p.Err case reflect.Slice: numElts := value.Len() // # elements in the slice/array. 0 if this slice is nil. - if uint32(numElts) > maxSliceLen { + if numElts > math.MaxInt32 { return fmt.Errorf("%w; slice length, %d, exceeds maximum length, %d", codec.ErrMaxSliceLenExceeded, numElts, - maxSliceLen, + math.MaxInt32, + ) + } + if time.Now().Before(c.durangoTime) && uint32(numElts) > c.maxSliceLen { + return fmt.Errorf("%w; slice length, %d, exceeds maximum length, %d", + codec.ErrMaxSliceLenExceeded, + numElts, + c.maxSliceLen, ) } p.PackInt(uint32(numElts)) // pack # elements @@ -436,27 +417,20 @@ func (c *genericCodec) marshal( return p.Err } for i := 0; i < numElts; i++ { // Process each element in the slice - if err := c.marshal(value.Index(i), p, c.maxSliceLen, nullable, typeStack); err != nil { + if err := c.marshal(value.Index(i), p, typeStack); err != nil { return err } } return nil case reflect.Array: - numElts := value.Len() if elemKind := value.Type().Kind(); elemKind == reflect.Uint8 { sliceVal := value.Convert(reflect.TypeOf([]byte{})) p.PackFixedBytes(sliceVal.Bytes()) return p.Err } - if uint32(numElts) > c.maxSliceLen { - return fmt.Errorf("%w; array length, %d, exceeds maximum length, %d", - codec.ErrMaxSliceLenExceeded, - numElts, - c.maxSliceLen, - ) - } + numElts := value.Len() for i := 0; i < numElts; i++ { // Process each element in the array - if err := c.marshal(value.Index(i), p, c.maxSliceLen, nullable, typeStack); err != nil { + if err := c.marshal(value.Index(i), p, typeStack); err != nil { return err } } @@ -481,7 +455,7 @@ func (c *genericCodec) marshal( if fieldDesc.UpgradeVersion > upgradeVersion { break } - if err := c.marshal(value.Field(fieldDesc.Index), p, fieldDesc.MaxSliceLen, fieldDesc.Nullable, typeStack); err != nil { // Serialize the field and write to byte array + if err := c.marshal(value.Field(fieldDesc.Index), p, typeStack); err != nil { // Serialize the field and write to byte array return err } } @@ -489,11 +463,18 @@ func (c *genericCodec) marshal( case reflect.Map: keys := value.MapKeys() numElts := len(keys) - if uint32(numElts) > maxSliceLen { + if numElts > math.MaxInt32 { + return fmt.Errorf("%w; slice length, %d, exceeds maximum length, %d", + codec.ErrMaxSliceLenExceeded, + numElts, + math.MaxInt32, + ) + } + if time.Now().Before(c.durangoTime) && uint32(numElts) > c.maxSliceLen { return fmt.Errorf("%w; map length, %d, exceeds maximum length, %d", codec.ErrMaxSliceLenExceeded, numElts, - maxSliceLen, + c.maxSliceLen, ) } p.PackInt(uint32(numElts)) // pack # elements @@ -512,7 +493,7 @@ func (c *genericCodec) marshal( startOffset := p.Offset endOffset := p.Offset for i, key := range keys { - if err := c.marshal(key, p, c.maxSliceLen, false /*=nullable*/, typeStack); err != nil { + if err := c.marshal(key, p, typeStack); err != nil { return err } if p.Err != nil { @@ -526,10 +507,10 @@ func (c *genericCodec) marshal( endOffset = p.Offset } - slices.SortFunc(sortedKeys, func(a, b keyTuple) bool { + slices.SortFunc(sortedKeys, func(a, b keyTuple) int { aBytes := p.Bytes[a.startIndex:a.endIndex] bBytes := p.Bytes[b.startIndex:b.endIndex] - return bytes.Compare(aBytes, bBytes) < 0 + return bytes.Compare(aBytes, bBytes) }) allKeyBytes := slices.Clone(p.Bytes[startOffset:p.Offset]) @@ -545,7 +526,7 @@ func (c *genericCodec) marshal( } // serialize and pack value - if err := c.marshal(value.MapIndex(key.key), p, c.maxSliceLen, nullable, typeStack); err != nil { + if err := c.marshal(value.MapIndex(key.key), p, typeStack); err != nil { return err } } @@ -570,7 +551,7 @@ func (c *genericCodec) Unmarshal(bytes []byte, dest interface{}) error { if destPtr.Kind() != reflect.Ptr { return errNeedPointer } - if err := c.unmarshal(&p, destPtr.Elem(), c.maxSliceLen, false /*=nullable*/, nil /*=typeStack*/); err != nil { + if err := c.unmarshal(&p, destPtr.Elem(), nil /*=typeStack*/); err != nil { return err } if p.Offset != len(bytes) { @@ -585,16 +566,10 @@ func (c *genericCodec) Unmarshal(bytes []byte, dest interface{}) error { // Unmarshal from p.Bytes into [value]. [value] must be addressable. // -// The [nullable] property affects how pointers and interfaces are unmarshalled, -// as an extra byte would be used to unmarshal nil values for pointers and -// interaces -// // c.lock should be held for the duration of this function func (c *genericCodec) unmarshal( p *wrappers.Packer, value reflect.Value, - maxSliceLen uint32, - nullable bool, typeStack set.Set[reflect.Type], ) error { switch value.Kind() { @@ -657,18 +632,18 @@ func (c *genericCodec) unmarshal( if p.Err != nil { return fmt.Errorf("couldn't unmarshal slice: %w", p.Err) } - if numElts32 > maxSliceLen { + if numElts32 > math.MaxInt32 { return fmt.Errorf("%w; array length, %d, exceeds maximum length, %d", codec.ErrMaxSliceLenExceeded, numElts32, - maxSliceLen, + math.MaxInt32, ) } - if numElts32 > math.MaxInt32 { + if time.Now().Before(c.durangoTime) && numElts32 > c.maxSliceLen { return fmt.Errorf("%w; array length, %d, exceeds maximum length, %d", codec.ErrMaxSliceLenExceeded, numElts32, - math.MaxInt32, + c.maxSliceLen, ) } numElts := int(numElts32) @@ -687,7 +662,7 @@ func (c *genericCodec) unmarshal( zeroValue := reflect.Zero(innerType) for i := 0; i < numElts; i++ { value.Set(reflect.Append(value, zeroValue)) - if err := c.unmarshal(p, value.Index(i), c.maxSliceLen, nullable, typeStack); err != nil { + if err := c.unmarshal(p, value.Index(i), typeStack); err != nil { return err } } @@ -705,7 +680,7 @@ func (c *genericCodec) unmarshal( return nil } for i := 0; i < numElts; i++ { - if err := c.unmarshal(p, value.Index(i), c.maxSliceLen, nullable, typeStack); err != nil { + if err := c.unmarshal(p, value.Index(i), typeStack); err != nil { return err } } @@ -717,13 +692,6 @@ func (c *genericCodec) unmarshal( } return nil case reflect.Interface: - if nullable { - isNil := p.UnpackBool() - if isNil || p.Err != nil { - return p.Err - } - } - intfImplementor, err := c.typer.UnpackPrefix(p, value.Type()) if err != nil { return err @@ -735,7 +703,7 @@ func (c *genericCodec) unmarshal( typeStack.Add(intfImplementorType) // Unmarshal into the struct - if err := c.unmarshal(p, intfImplementor, c.maxSliceLen, false /*=nullable*/, typeStack); err != nil { + if err := c.unmarshal(p, intfImplementor, typeStack); err != nil { return err } @@ -769,25 +737,18 @@ func (c *genericCodec) unmarshal( if fieldDesc.UpgradeVersion > upgradeVersion { break } - if err := c.unmarshal(p, value.Field(fieldDesc.Index), fieldDesc.MaxSliceLen, fieldDesc.Nullable, typeStack); err != nil { + if err := c.unmarshal(p, value.Field(fieldDesc.Index), typeStack); err != nil { return err } } return nil case reflect.Ptr: - if nullable { - isNil := p.UnpackBool() - if isNil || p.Err != nil { - return p.Err - } - } - // Get the type this pointer points to t := value.Type().Elem() // Create a new pointer to a new value of the underlying type v := reflect.New(t) // Fill the value - if err := c.unmarshal(p, v.Elem(), c.maxSliceLen, false /*=nullable*/, typeStack); err != nil { + if err := c.unmarshal(p, v.Elem(), typeStack); err != nil { return err } // Assign to the top-level struct's member @@ -798,7 +759,14 @@ func (c *genericCodec) unmarshal( if p.Err != nil { return fmt.Errorf("couldn't unmarshal map: %w", p.Err) } - if numElts32 > c.maxSliceLen { + if numElts32 > math.MaxInt32 { + return fmt.Errorf("%w; map length, %d, exceeds maximum length, %d", + codec.ErrMaxSliceLenExceeded, + numElts32, + math.MaxInt32, + ) + } + if time.Now().Before(c.durangoTime) && numElts32 > c.maxSliceLen { return fmt.Errorf("%w; map length, %d, exceeds maximum length, %d", codec.ErrMaxSliceLenExceeded, numElts32, @@ -822,7 +790,7 @@ func (c *genericCodec) unmarshal( keyStartOffset := p.Offset - if err := c.unmarshal(p, mapKey, c.maxSliceLen, false /*=nullable*/, typeStack); err != nil { + if err := c.unmarshal(p, mapKey, typeStack); err != nil { return err } @@ -840,7 +808,7 @@ func (c *genericCodec) unmarshal( // Get the value mapValue := reflect.New(mapValueType).Elem() - if err := c.unmarshal(p, mapValue, c.maxSliceLen, nullable, typeStack); err != nil { + if err := c.unmarshal(p, mapValue, typeStack); err != nil { return err } diff --git a/codec/reflectcodec/type_codec_test.go b/codec/reflectcodec/type_codec_test.go deleted file mode 100644 index 42b256c4a6c9..000000000000 --- a/codec/reflectcodec/type_codec_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package reflectcodec - -import ( - "reflect" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSizeWithNil(t *testing.T) { - require := require.New(t) - var x *int32 - y := int32(1) - c := genericCodec{} - _, _, err := c.size(reflect.ValueOf(x), false /*=nullable*/, nil /*=typeStack*/) - require.ErrorIs(err, errMarshalNil) - len, _, err := c.size(reflect.ValueOf(x), true /*=nullable*/, nil /*=typeStack*/) - require.Empty(err) - require.Equal(1, len) - x = &y - len, _, err = c.size(reflect.ValueOf(y), true /*=nullable*/, nil /*=typeStack*/) - require.Empty(err) - require.Equal(4, len) - len, _, err = c.size(reflect.ValueOf(x), true /*=nullable*/, nil /*=typeStack*/) - require.Empty(err) - require.Equal(5, len) -} diff --git a/codec/registry.go b/codec/registry.go index f0f1c2ff8157..de87e1a9b2aa 100644 --- a/codec/registry.go +++ b/codec/registry.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package codec diff --git a/codec/test_codec.go b/codec/test_codec.go index 341912a823af..d58e2d818f9e 100644 --- a/codec/test_codec.go +++ b/codec/test_codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package codec @@ -8,6 +8,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/wrappers" ) var ( @@ -23,7 +25,6 @@ var ( TestBigArray, TestPointerToStruct, TestSliceOfStruct, - TestStructWithNullable, TestInterface, TestSliceOfInterface, TestArrayOfInterface, @@ -40,7 +41,6 @@ var ( TestNegativeNumbers, TestTooLargeUnmarshal, TestUnmarshalInvalidInterface, - TestRestrictedSlice, TestExtraSpace, TestSliceLengthOverflow, TestMap, @@ -49,6 +49,15 @@ var ( MultipleTagsTests = []func(c GeneralCodec, t testing.TB){ TestMultipleTags, } + + EnforceSliceLenTests = []func(c GeneralCodec, t testing.TB){ + TestCanNotMarshalLargeSlices, + TestCanNotUnmarshalLargeSlices, + } + + IgnoreSliceLenTests = []func(c GeneralCodec, t testing.TB){ + TestCanMarshalLargeSlices, + } ) // The below structs and interfaces exist @@ -64,8 +73,7 @@ type Foo interface { } type MyInnerStruct struct { - Str string `serialize:"true"` - NumberNotProvided *int32 `serialize:"true,nullable"` + Str string `serialize:"true"` } func (*MyInnerStruct) Foo() int { @@ -88,15 +96,6 @@ type MyInnerStruct3 struct { F Foo `serialize:"true"` } -type MyStructWithNullable struct { - Interface any `serialize:"true,nullable"` - Int32 *int32 `serialize:"true,nullable"` - Int64 *int64 `serialize:"true,nullable"` - Int32Slice []*int32 `serialize:"true,nullable"` - Int32Array [2]*int32 `serialize:"true,nullable"` - Int32Map map[int32]*int32 `serialize:"true,nullable"` -} - type myStruct struct { InnerStruct MyInnerStruct `serialize:"true"` InnerStruct2 *MyInnerStruct `serialize:"true"` @@ -156,23 +155,21 @@ func TestStruct(codec GeneralCodec, t testing.TB) { myMap7["key"] = "value" myMap7[int32(1)] = int32(2) - number := int32(8) - myStructInstance := myStruct{ - InnerStruct: MyInnerStruct{"hello", nil}, - InnerStruct2: &MyInnerStruct{"yello", nil}, + InnerStruct: MyInnerStruct{"hello"}, + InnerStruct2: &MyInnerStruct{"yello"}, Member1: 1, Member2: 2, MySlice: []byte{1, 2, 3, 4}, MySlice2: []string{"one", "two", "three"}, - MySlice3: []MyInnerStruct{{"abc", nil}, {"ab", &number}, {"c", nil}}, + MySlice3: []MyInnerStruct{{"abc"}, {"ab"}, {"c"}}, MySlice4: []*MyInnerStruct2{{true}, {}}, MySlice5: []Foo{&MyInnerStruct2{true}, &MyInnerStruct2{}}, MyArray: [4]byte{5, 6, 7, 8}, MyArray2: [5]string{"four", "five", "six", "seven"}, - MyArray3: [3]MyInnerStruct{{"d", nil}, {"e", nil}, {"f", nil}}, + MyArray3: [3]MyInnerStruct{{"d"}, {"e"}, {"f"}}, MyArray4: [2]*MyInnerStruct2{{}, {true}}, - MyInterface: &MyInnerStruct{"yeet", &number}, + MyInterface: &MyInnerStruct{"yeet"}, InnerStruct3: MyInnerStruct3{ Str: "str", M1: MyInnerStruct{ @@ -427,79 +424,19 @@ func TestPointerToStruct(codec GeneralCodec, t testing.TB) { require.Equal(myPtr, myPtrUnmarshaled) } -func TestStructWithNullable(codec GeneralCodec, t testing.TB) { - require := require.New(t) - n1 := int32(5) - n2 := int64(10) - struct1 := MyStructWithNullable{ - Interface: nil, - Int32: &n1, - Int64: &n2, - Int32Slice: []*int32{ - nil, - nil, - &n1, - }, - Int32Array: [2]*int32{ - nil, - &n1, - }, - Int32Map: map[int32]*int32{ - 1: nil, - 2: &n1, - }, - } - - require.NoError(codec.RegisterType(&MyStructWithNullable{})) - manager := NewDefaultManager() - require.NoError(manager.RegisterCodec(0, codec)) - - bytes, err := manager.Marshal(0, struct1) - require.NoError(err) - - bytesLen, err := manager.Size(0, struct1) - require.NoError(err) - require.Len(bytes, bytesLen) - - var struct1Unmarshaled MyStructWithNullable - version, err := manager.Unmarshal(bytes, &struct1Unmarshaled) - require.NoError(err) - require.Zero(version) - require.Equal(struct1, struct1Unmarshaled) - - struct1 = MyStructWithNullable{ - Int32Slice: []*int32{}, - Int32Map: map[int32]*int32{}, - } - bytes, err = manager.Marshal(0, struct1) - require.NoError(err) - - bytesLen, err = manager.Size(0, struct1) - require.NoError(err) - require.Len(bytes, bytesLen) - - var struct1Unmarshaled2 MyStructWithNullable - version, err = manager.Unmarshal(bytes, &struct1Unmarshaled2) - require.NoError(err) - require.Zero(version) - require.Equal(struct1, struct1Unmarshaled2) -} - // Test marshalling a slice of structs func TestSliceOfStruct(codec GeneralCodec, t testing.TB) { require := require.New(t) - n1 := int32(-1) - n2 := int32(0xff) mySlice := []MyInnerStruct3{ { Str: "One", - M1: MyInnerStruct{"Two", &n1}, - F: &MyInnerStruct{"Three", &n2}, + M1: MyInnerStruct{"Two"}, + F: &MyInnerStruct{"Three"}, }, { Str: "Four", - M1: MyInnerStruct{"Five", nil}, - F: &MyInnerStruct{"Six", nil}, + M1: MyInnerStruct{"Five"}, + F: &MyInnerStruct{"Six"}, }, } require.NoError(codec.RegisterType(&MyInnerStruct{})) @@ -947,27 +884,6 @@ func TestUnmarshalInvalidInterface(codec GeneralCodec, t testing.TB) { } } -// Ensure deserializing slices that have been length restricted errors correctly -func TestRestrictedSlice(codec GeneralCodec, t testing.TB) { - require := require.New(t) - - type inner struct { - Bytes []byte `serialize:"true" len:"2"` - } - bytes := []byte{0, 0, 0, 0, 0, 3, 0, 1, 2} - - manager := NewDefaultManager() - require.NoError(manager.RegisterCodec(0, codec)) - - s := inner{} - _, err := manager.Unmarshal(bytes, &s) - require.ErrorIs(err, ErrMaxSliceLenExceeded) - - s.Bytes = []byte{0, 1, 2} - _, err = manager.Marshal(0, s) - require.ErrorIs(err, ErrMaxSliceLenExceeded) -} - // Test unmarshaling something with extra data func TestExtraSpace(codec GeneralCodec, t testing.TB) { require := require.New(t) @@ -982,12 +898,12 @@ func TestExtraSpace(codec GeneralCodec, t testing.TB) { require.ErrorIs(err, ErrExtraSpace) } -// Ensure deserializing slices that have been length restricted errors correctly +// Ensure deserializing slices whose lengths exceed MaxInt32 error correctly func TestSliceLengthOverflow(codec GeneralCodec, t testing.TB) { require := require.New(t) type inner struct { - Vals []uint32 `serialize:"true" len:"2"` + Vals []uint32 `serialize:"true"` } bytes := []byte{ // Codec Version: @@ -1115,6 +1031,52 @@ func TestMap(codec GeneralCodec, t testing.TB) { require.Len(outerArrayBytes, outerArraySize) } +func TestCanNotMarshalLargeSlices(codec GeneralCodec, t testing.TB) { + require := require.New(t) + + data := make([]uint16, 1_000_000) + + manager := NewManager(math.MaxInt) + require.NoError(manager.RegisterCodec(0, codec)) + + _, err := manager.Marshal(0, data) + require.ErrorIs(err, ErrMaxSliceLenExceeded) +} + +func TestCanNotUnmarshalLargeSlices(codec GeneralCodec, t testing.TB) { + require := require.New(t) + + writer := wrappers.Packer{ + Bytes: make([]byte, 2+4+2_000_000), + } + writer.PackShort(0) + writer.PackInt(1_000_000) + + manager := NewManager(math.MaxInt) + require.NoError(manager.RegisterCodec(0, codec)) + + var data []uint16 + _, err := manager.Unmarshal(writer.Bytes, &data) + require.ErrorIs(err, ErrMaxSliceLenExceeded) +} + +func TestCanMarshalLargeSlices(codec GeneralCodec, t testing.TB) { + require := require.New(t) + + data := make([]uint16, 1_000_000) + + manager := NewManager(math.MaxInt) + require.NoError(manager.RegisterCodec(0, codec)) + + bytes, err := manager.Marshal(0, data) + require.NoError(err) + + var unmarshalledData []uint16 + _, err = manager.Unmarshal(bytes, &unmarshalledData) + require.NoError(err) + require.Equal(data, unmarshalledData) +} + func FuzzStructUnmarshal(codec GeneralCodec, f *testing.F) { manager := NewDefaultManager() // Register the types that may be unmarshaled into interfaces diff --git a/config/config.go b/config/config.go index 2d72c3381f08..4c4e4db203ac 100644 --- a/config/config.go +++ b/config/config.go @@ -8,13 +8,12 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config import ( - "context" "crypto/tls" "encoding/base64" "encoding/json" @@ -22,7 +21,6 @@ import ( "fmt" "io/fs" "math" - "net" "os" "path/filepath" "strings" @@ -35,7 +33,6 @@ import ( "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/ipcs" - "github.com/ava-labs/avalanchego/nat" "github.com/ava-labs/avalanchego/network" "github.com/ava-labs/avalanchego/network/dialer" "github.com/ava-labs/avalanchego/network/throttling" @@ -50,7 +47,6 @@ import ( "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/dynamicip" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/password" @@ -68,22 +64,41 @@ const ( chainConfigFileName = "config" chainUpgradeFileName = "upgrade" subnetConfigFileExt = ".json" - ipResolutionTimeout = 30 * time.Second - ipcDeprecationMsg = "IPC API is deprecated" - keystoreDeprecationMsg = "keystore API is deprecated" + ipcDeprecationMsg = "IPC API is deprecated" + keystoreDeprecationMsg = "keystore API is deprecated" + acceptedFrontierGossipDeprecationMsg = "push-based accepted frontier gossip is deprecated" + peerListPushGossipDeprecationMsg = "push-based peer list gossip is deprecated" ) var ( // Deprecated key --> deprecation message (i.e. which key replaces it) // TODO: deprecate "BootstrapIDsKey" and "BootstrapIPsKey" - deprecatedKeys = map[string]string{ - IpcAPIEnabledKey: ipcDeprecationMsg, - IpcsChainIDsKey: ipcDeprecationMsg, - IpcsPathKey: ipcDeprecationMsg, + commitThresholdDeprecationMsg = fmt.Sprintf("use --%s instead", SnowCommitThresholdKey) + deprecatedKeys = map[string]string{ + IpcAPIEnabledKey: ipcDeprecationMsg, + IpcsChainIDsKey: ipcDeprecationMsg, + IpcsPathKey: ipcDeprecationMsg, + KeystoreAPIEnabledKey: keystoreDeprecationMsg, + + ConsensusGossipAcceptedFrontierValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, + ConsensusGossipAcceptedFrontierNonValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, + ConsensusGossipAcceptedFrontierPeerSizeKey: acceptedFrontierGossipDeprecationMsg, + ConsensusGossipOnAcceptValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, + ConsensusGossipOnAcceptNonValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, + ConsensusGossipOnAcceptPeerSizeKey: acceptedFrontierGossipDeprecationMsg, + + NetworkPeerListValidatorGossipSizeKey: peerListPushGossipDeprecationMsg, + NetworkPeerListNonValidatorGossipSizeKey: peerListPushGossipDeprecationMsg, + NetworkPeerListPeersGossipSizeKey: peerListPushGossipDeprecationMsg, + NetworkPeerListGossipFreqKey: peerListPushGossipDeprecationMsg, + + SnowRogueCommitThresholdKey: commitThresholdDeprecationMsg, + SnowVirtuousCommitThresholdKey: commitThresholdDeprecationMsg, } + errConflictingACPOpinion = errors.New("supporting and objecting to the same ACP") errSybilProtectionDisabledStakerWeights = errors.New("sybil protection disabled weights must be positive") errSybilProtectionDisabledOnPublicNetwork = errors.New("sybil protection disabled on public network") errAuthPasswordTooWeak = errors.New("API auth password is not strong enough") @@ -104,21 +119,16 @@ var ( errCannotReadDirectory = errors.New("cannot read directory") errUnmarshalling = errors.New("unmarshalling failed") errFileDoesNotExist = errors.New("file does not exist") + errGzipDeprecatedMsg = errors.New("gzip compression is not supported, use zstd or no compression") ) func getConsensusConfig(v *viper.Viper) snowball.Parameters { p := snowball.Parameters{ - K: v.GetInt(SnowSampleSizeKey), - AlphaPreference: v.GetInt(SnowPreferenceQuorumSizeKey), - AlphaConfidence: v.GetInt(SnowConfidenceQuorumSizeKey), - // During the X-chain linearization we require BetaVirtuous and - // BetaRogue to be equal. Therefore we use the more conservative - // BetaRogue value for both BetaVirtuous and BetaRogue. - // - // TODO: After the X-chain linearization use the - // SnowVirtuousCommitThresholdKey as before. - BetaVirtuous: v.GetInt(SnowRogueCommitThresholdKey), - BetaRogue: v.GetInt(SnowRogueCommitThresholdKey), + K: v.GetInt(SnowSampleSizeKey), + AlphaPreference: v.GetInt(SnowPreferenceQuorumSizeKey), + AlphaConfidence: v.GetInt(SnowConfidenceQuorumSizeKey), + BetaVirtuous: v.GetInt(SnowCommitThresholdKey), + BetaRogue: v.GetInt(SnowCommitThresholdKey), ConcurrentRepolls: v.GetInt(SnowConcurrentRepollsKey), OptimalProcessing: v.GetInt(SnowOptimalProcessingKey), MaxOutstandingItems: v.GetInt(SnowMaxProcessingKey), @@ -128,6 +138,10 @@ func getConsensusConfig(v *viper.Viper) snowball.Parameters { p.AlphaPreference = v.GetInt(SnowQuorumSizeKey) p.AlphaConfidence = p.AlphaPreference } + if v.IsSet(SnowRogueCommitThresholdKey) { + p.BetaVirtuous = v.GetInt(SnowRogueCommitThresholdKey) + p.BetaRogue = v.GetInt(SnowRogueCommitThresholdKey) + } return p } @@ -343,12 +357,34 @@ func getNetworkConfig( if err != nil { return network.Config{}, err } + if compressionType == compression.TypeGzip { + return network.Config{}, errGzipDeprecatedMsg + } allowPrivateIPs := !constants.ProductionNetworkIDs.Contains(networkID) if v.IsSet(NetworkAllowPrivateIPsKey) { allowPrivateIPs = v.GetBool(NetworkAllowPrivateIPsKey) } + var supportedACPs set.Set[uint32] + for _, acp := range v.GetIntSlice(ACPSupportKey) { + if acp < 0 || acp > math.MaxInt32 { + return network.Config{}, fmt.Errorf("invalid ACP: %d", acp) + } + supportedACPs.Add(uint32(acp)) + } + + var objectedACPs set.Set[uint32] + for _, acp := range v.GetIntSlice(ACPObjectKey) { + if acp < 0 || acp > math.MaxInt32 { + return network.Config{}, fmt.Errorf("invalid ACP: %d", acp) + } + objectedACPs.Add(uint32(acp)) + } + if supportedACPs.Overlaps(objectedACPs) { + return network.Config{}, errConflictingACPOpinion + } + config := network.Config{ ThrottlerConfig: network.ThrottlerConfig{ MaxInboundConnsPerSec: maxInboundConnsPerSec, @@ -414,6 +450,8 @@ func getNetworkConfig( PeerListNonValidatorGossipSize: v.GetUint32(NetworkPeerListNonValidatorGossipSizeKey), PeerListPeersGossipSize: v.GetUint32(NetworkPeerListPeersGossipSizeKey), PeerListGossipFreq: v.GetDuration(NetworkPeerListGossipFreqKey), + PeerListPullGossipFreq: v.GetDuration(NetworkPeerListPullGossipFreqKey), + PeerListBloomResetFreq: v.GetDuration(NetworkPeerListBloomResetFreqKey), }, DelayConfig: network.DelayConfig{ @@ -428,6 +466,9 @@ func getNetworkConfig( UptimeMetricFreq: v.GetDuration(UptimeMetricFreqKey), MaximumInboundMessageTimeout: v.GetDuration(NetworkMaximumInboundTimeoutKey), + SupportedACPs: supportedACPs, + ObjectedACPs: objectedACPs, + RequireValidatorToConnect: v.GetBool(NetworkRequireValidatorToConnectKey), PeerReadBufferSize: int(v.GetUint(NetworkPeerReadBufferSizeKey)), PeerWriteBufferSize: int(v.GetUint(NetworkPeerWriteBufferSizeKey)), @@ -446,6 +487,10 @@ func getNetworkConfig( return network.Config{}, fmt.Errorf("%q must be >= 0", NetworkOutboundConnectionTimeoutKey) case config.PeerListGossipFreq < 0: return network.Config{}, fmt.Errorf("%s must be >= 0", NetworkPeerListGossipFreqKey) + case config.PeerListPullGossipFreq < 0: + return network.Config{}, fmt.Errorf("%s must be >= 0", NetworkPeerListPullGossipFreqKey) + case config.PeerListBloomResetFreq < 0: + return network.Config{}, fmt.Errorf("%s must be >= 0", NetworkPeerListBloomResetFreqKey) case config.ThrottlerConfig.InboundMsgThrottlerConfig.CPUThrottlerConfig.MaxRecheckDelay < constants.MinInboundThrottlerMaxRecheckDelay: return network.Config{}, fmt.Errorf("%s must be >= %d", InboundThrottlerCPUMaxRecheckDelayKey, constants.MinInboundThrottlerMaxRecheckDelay) case config.ThrottlerConfig.InboundMsgThrottlerConfig.DiskThrottlerConfig.MaxRecheckDelay < constants.MinInboundThrottlerMaxRecheckDelay: @@ -531,8 +576,6 @@ func getStateSyncConfig(v *viper.Viper) (node.StateSyncConfig, error) { func getBootstrapConfig(v *viper.Viper, networkID uint32) (node.BootstrapConfig, error) { config := node.BootstrapConfig{ - RetryBootstrap: v.GetBool(RetryBootstrapKey), - RetryBootstrapWarnFrequency: v.GetInt(RetryBootstrapWarnFrequencyKey), BootstrapBeaconConnectionTimeout: v.GetDuration(BootstrapBeaconConnectionTimeoutKey), BootstrapMaxTimeGetAncestors: v.GetDuration(BootstrapMaxTimeGetAncestorsKey), BootstrapAncestorsMaxContainersSent: int(v.GetUint(BootstrapAncestorsMaxContainersSentKey)), @@ -598,64 +641,19 @@ func getBootstrapConfig(v *viper.Viper, networkID uint32) (node.BootstrapConfig, } func getIPConfig(v *viper.Viper) (node.IPConfig, error) { - ipResolutionService := v.GetString(PublicIPResolutionServiceKey) - ipResolutionFreq := v.GetDuration(PublicIPResolutionFreqKey) - if ipResolutionFreq <= 0 { - return node.IPConfig{}, fmt.Errorf("%q must be > 0", PublicIPResolutionFreqKey) - } - - stakingPort := uint16(v.GetUint(StakingPortKey)) - publicIP := v.GetString(PublicIPKey) - if publicIP != "" && ipResolutionService != "" { - return node.IPConfig{}, fmt.Errorf("only one of --%s and --%s can be given", PublicIPKey, PublicIPResolutionServiceKey) - } - - // Define default configuration ipConfig := node.IPConfig{ - IPUpdater: dynamicip.NewNoUpdater(), - IPResolutionFreq: ipResolutionFreq, - Nat: nat.NewNoRouter(), - ListenHost: v.GetString(StakingHostKey), + PublicIP: v.GetString(PublicIPKey), + PublicIPResolutionService: v.GetString(PublicIPResolutionServiceKey), + PublicIPResolutionFreq: v.GetDuration(PublicIPResolutionFreqKey), + ListenHost: v.GetString(StakingHostKey), + ListenPort: uint16(v.GetUint(StakingPortKey)), } - - if publicIP != "" { - // User specified a specific public IP to use. - ip := net.ParseIP(publicIP) - if ip == nil { - return node.IPConfig{}, fmt.Errorf("invalid IP Address %s", publicIP) - } - ipConfig.IPPort = ips.NewDynamicIPPort(ip, stakingPort) - return ipConfig, nil - } - if ipResolutionService != "" { - // User specified to use dynamic IP resolution. - resolver, err := dynamicip.NewResolver(ipResolutionService) - if err != nil { - return node.IPConfig{}, fmt.Errorf("couldn't create IP resolver: %w", err) - } - - // Use that to resolve our public IP. - ctx, cancel := context.WithTimeout(context.Background(), ipResolutionTimeout) - defer cancel() - ip, err := resolver.Resolve(ctx) - if err != nil { - return node.IPConfig{}, fmt.Errorf("couldn't resolve public IP: %w", err) - } - ipConfig.IPPort = ips.NewDynamicIPPort(ip, stakingPort) - ipConfig.IPUpdater = dynamicip.NewUpdater(ipConfig.IPPort, resolver, ipResolutionFreq) - return ipConfig, nil + if ipConfig.PublicIPResolutionFreq <= 0 { + return node.IPConfig{}, fmt.Errorf("%q must be > 0", PublicIPResolutionFreqKey) } - - // User didn't specify a public IP to use, and they didn't specify a public IP resolution - // service to use. Try to resolve public IP with NAT traversal. - nat := nat.GetRouter() - ip, err := nat.ExternalIP() - if err != nil { - return node.IPConfig{}, fmt.Errorf("public IP / IP resolution service not given and failed to resolve IP with NAT: %w", err) + if ipConfig.PublicIP != "" && ipConfig.PublicIPResolutionService != "" { + return node.IPConfig{}, fmt.Errorf("only one of --%s and --%s can be given", PublicIPKey, PublicIPResolutionServiceKey) } - ipConfig.IPPort = ips.NewDynamicIPPort(ip, stakingPort) - ipConfig.Nat = nat - ipConfig.AttemptedNATTraversal = true return ipConfig, nil } @@ -945,7 +943,8 @@ func getDatabaseConfig(v *viper.Viper, networkID uint32) (node.DatabaseConfig, e } return node.DatabaseConfig{ - Name: v.GetString(DBTypeKey), + Name: v.GetString(DBTypeKey), + ReadOnly: v.GetBool(DBReadOnlyKey), Path: filepath.Join( GetExpandedArg(v, DBPathKey), constants.NetworkName(networkID), @@ -1338,9 +1337,9 @@ func GetNodeConfig(v *viper.Viper) (node.Config, error) { } // Gossiping - nodeConfig.AcceptedFrontierGossipFrequency = v.GetDuration(ConsensusAcceptedFrontierGossipFrequencyKey) - if nodeConfig.AcceptedFrontierGossipFrequency < 0 { - return node.Config{}, fmt.Errorf("%s must be >= 0", ConsensusAcceptedFrontierGossipFrequencyKey) + nodeConfig.FrontierPollFrequency = v.GetDuration(ConsensusFrontierPollFrequencyKey) + if nodeConfig.FrontierPollFrequency < 0 { + return node.Config{}, fmt.Errorf("%s must be >= 0", ConsensusFrontierPollFrequencyKey) } // App handling diff --git a/config/config_test.go b/config/config_test.go index 037b8ac450cc..4c64e448ac13 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config diff --git a/config/flags.go b/config/flags.go index ea86e13c19d5..5515f5110fd6 100644 --- a/config/flags.go +++ b/config/flags.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config @@ -31,6 +31,7 @@ import ( "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/dynamicip" "github.com/ava-labs/avalanchego/utils/ulimit" "github.com/ava-labs/avalanchego/utils/units" ) @@ -103,6 +104,10 @@ func addNodeFlags(fs *pflag.FlagSet) { // Network ID fs.String(NetworkNameKey, constants.CaminoName, "Network ID this node will connect to") + // ACP flagging + fs.IntSlice(ACPSupportKey, nil, "ACPs to support adoption") + fs.IntSlice(ACPObjectKey, nil, "ACPs to object adoption") + // AVAX fees fs.Uint64(TxFeeKey, genesis.LocalParams.TxFee, "Transaction fee, in nAVAX") fs.Uint64(CreateAssetTxFeeKey, genesis.LocalParams.CreateAssetTxFee, "Transaction fee, in nAVAX, for transactions that create new assets") @@ -116,6 +121,7 @@ func addNodeFlags(fs *pflag.FlagSet) { // Database fs.String(DBTypeKey, leveldb.Name, fmt.Sprintf("Database type to use. Must be one of {%s, %s, %s}", leveldb.Name, memdb.Name, pebble.Name)) + fs.Bool(DBReadOnlyKey, false, "If true, database writes are to memory and never persisted. May still initialize database directory/files on disk if they don't exist") fs.String(DBPathKey, defaultDBDir, "Path to database directory") fs.String(DBConfigFileKey, "", fmt.Sprintf("Path to database config file. Ignored if %s is specified", DBConfigContentKey)) fs.String(DBConfigContentKey, "", "Specifies base64 encoded database config content") @@ -137,11 +143,13 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Uint(NetworkPeerListNonValidatorGossipSizeKey, constants.DefaultNetworkPeerListNonValidatorGossipSize, "Number of non-validators that the node will gossip peer list to") fs.Uint(NetworkPeerListPeersGossipSizeKey, constants.DefaultNetworkPeerListPeersGossipSize, "Number of total peers (including non-validators and validators) that the node will gossip peer list to") fs.Duration(NetworkPeerListGossipFreqKey, constants.DefaultNetworkPeerListGossipFreq, "Frequency to gossip peers to other nodes") + fs.Duration(NetworkPeerListPullGossipFreqKey, constants.DefaultNetworkPeerListPullGossipFreq, "Frequency to request peers from other nodes") + fs.Duration(NetworkPeerListBloomResetFreqKey, constants.DefaultNetworkPeerListBloomResetFreq, "Frequency to recalculate the bloom filter used to request new peers from other nodes") // Public IP Resolution - fs.String(PublicIPKey, "", "Public IP of this node for P2P communication. If empty, try to discover with NAT") + fs.String(PublicIPKey, "", "Public IP of this node for P2P communication") fs.Duration(PublicIPResolutionFreqKey, 5*time.Minute, "Frequency at which this node resolves/updates its public IP and renew NAT mappings, if applicable") - fs.String(PublicIPResolutionServiceKey, "", fmt.Sprintf("Only acceptable values are 'ifconfigco', 'opendns' or 'ifconfigme'. When provided, the node will use that service to periodically resolve/update its public IP. Ignored if %s is set", PublicIPKey)) + fs.String(PublicIPResolutionServiceKey, "", fmt.Sprintf("Only acceptable values are %q, %q or %q. When provided, the node will use that service to periodically resolve/update its public IP", dynamicip.OpenDNSName, dynamicip.IFConfigCoName, dynamicip.IFConfigMeName)) // Inbound Connection Throttling fs.Duration(NetworkInboundConnUpgradeThrottlerCooldownKey, constants.DefaultInboundConnUpgradeThrottlerCooldown, "Upgrade an inbound connection from a given IP at most once per this duration. If 0, don't rate-limit inbound connection upgrades") @@ -160,7 +168,7 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Duration(NetworkPingTimeoutKey, constants.DefaultPingPongTimeout, "Timeout value for Ping-Pong with a peer") fs.Duration(NetworkPingFrequencyKey, constants.DefaultPingFrequency, "Frequency of pinging other peers") - fs.String(NetworkCompressionTypeKey, constants.DefaultNetworkCompressionType.String(), fmt.Sprintf("Compression type for outbound messages. Must be one of [%s, %s, %s]", compression.TypeGzip, compression.TypeZstd, compression.TypeNone)) + fs.String(NetworkCompressionTypeKey, constants.DefaultNetworkCompressionType.String(), fmt.Sprintf("Compression type for outbound messages. Must be one of [%s, %s]", compression.TypeZstd, compression.TypeNone)) fs.Duration(NetworkMaxClockDifferenceKey, constants.DefaultNetworkMaxClockDifference, "Max allowed clock difference value between this node and peers") // Note: The default value is set to false here because the default @@ -187,9 +195,9 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Duration(BenchlistMinFailingDurationKey, constants.DefaultBenchlistMinFailingDuration, "Minimum amount of time messages to a peer must be failing before the peer is benched") // Router - fs.Duration(ConsensusAcceptedFrontierGossipFrequencyKey, constants.DefaultAcceptedFrontierGossipFrequency, "Frequency of gossiping accepted frontiers") fs.Uint(ConsensusAppConcurrencyKey, constants.DefaultConsensusAppConcurrency, "Maximum number of goroutines to use when handling App messages on a chain") fs.Duration(ConsensusShutdownTimeoutKey, constants.DefaultConsensusShutdownTimeout, "Timeout before killing an unresponsive chain") + fs.Duration(ConsensusFrontierPollFrequencyKey, constants.DefaultFrontierPollFrequency, "Frequency of polling for new consensus frontiers") fs.Uint(ConsensusGossipAcceptedFrontierValidatorSizeKey, constants.DefaultConsensusGossipAcceptedFrontierValidatorSize, "Number of validators to gossip to when gossiping accepted frontier") fs.Uint(ConsensusGossipAcceptedFrontierNonValidatorSizeKey, constants.DefaultConsensusGossipAcceptedFrontierNonValidatorSize, "Number of non-validators to gossip to when gossiping accepted frontier") fs.Uint(ConsensusGossipAcceptedFrontierPeerSizeKey, constants.DefaultConsensusGossipAcceptedFrontierPeerSize, "Number of peers to gossip to when gossiping accepted frontier") @@ -302,8 +310,6 @@ func addNodeFlags(fs *pflag.FlagSet) { // TODO: combine "BootstrapIPsKey" and "BootstrapIDsKey" into one flag fs.String(BootstrapIPsKey, "", "Comma separated list of bootstrap peer ips to connect to. Example: 127.0.0.1:9630,127.0.0.1:9631") fs.String(BootstrapIDsKey, "", "Comma separated list of bootstrap peer ids to connect to. Example: NodeID-JR4dVmy6ffUGAKCBDkyCbeZbyHQBeDsET,NodeID-8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") - fs.Bool(RetryBootstrapKey, true, "Specifies whether bootstrap should be retried") - fs.Int(RetryBootstrapWarnFrequencyKey, 50, "Specifies how many times bootstrap should be retried before warning the operator") fs.Duration(BootstrapBeaconConnectionTimeoutKey, time.Minute, "Timeout before emitting a warn log when connecting to bootstrapping beacons") fs.Duration(BootstrapMaxTimeGetAncestorsKey, 50*time.Millisecond, "Max Time to spend fetching a container and its ancestors when responding to a GetAncestors") fs.Uint(BootstrapAncestorsMaxContainersSentKey, 2000, "Max number of containers in an Ancestors message sent by this node") @@ -314,10 +320,12 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Int(SnowQuorumSizeKey, snowball.DefaultParameters.AlphaConfidence, "Threshold of nodes required to update this node's preference and increase its confidence in a network poll") fs.Int(SnowPreferenceQuorumSizeKey, snowball.DefaultParameters.AlphaPreference, fmt.Sprintf("Threshold of nodes required to update this node's preference in a network poll. Ignored if %s is provided", SnowQuorumSizeKey)) fs.Int(SnowConfidenceQuorumSizeKey, snowball.DefaultParameters.AlphaConfidence, fmt.Sprintf("Threshold of nodes required to increase this node's confidence in a network poll. Ignored if %s is provided", SnowQuorumSizeKey)) - // TODO: Replace this temporary flag description after the X-chain - // linearization with "Beta value to use for virtuous transactions" + + fs.Int(SnowCommitThresholdKey, snowball.DefaultParameters.BetaRogue, "Beta value to use for transactions") + // TODO: Remove these once enough time has passed with SnowCommitThresholdKey fs.Int(SnowVirtuousCommitThresholdKey, snowball.DefaultParameters.BetaVirtuous, "This flag is temporarily ignored due to the X-chain linearization") fs.Int(SnowRogueCommitThresholdKey, snowball.DefaultParameters.BetaRogue, "Beta value to use for rogue transactions") + fs.Int(SnowConcurrentRepollsKey, snowball.DefaultParameters.ConcurrentRepolls, "Minimum number of concurrent polls for finalizing consensus") fs.Int(SnowOptimalProcessingKey, snowball.DefaultParameters.OptimalProcessing, "Optimal number of processing containers in consensus") fs.Int(SnowMaxProcessingKey, snowball.DefaultParameters.MaxOutstandingItems, "Maximum number of processing items to be considered healthy") diff --git a/config/keys.go b/config/keys.go index 52038889fe3a..5aef8f0a0700 100644 --- a/config/keys.go +++ b/config/keys.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config @@ -23,6 +23,8 @@ const ( GenesisFileKey = "genesis-file" GenesisFileContentKey = "genesis-file-content" NetworkNameKey = "network-id" + ACPSupportKey = "acp-support" + ACPObjectKey = "acp-object" TxFeeKey = "tx-fee" CreateAssetTxFeeKey = "create-asset-tx-fee" CreateSubnetTxFeeKey = "create-subnet-tx-fee" @@ -44,6 +46,7 @@ const ( StakeMintingPeriodKey = "stake-minting-period" StakeSupplyCapKey = "stake-supply-cap" DBTypeKey = "db-type" + DBReadOnlyKey = "db-read-only" DBPathKey = "db-dir" DBConfigFileKey = "db-config-file" DBConfigContentKey = "db-config-file-content" @@ -101,6 +104,8 @@ const ( NetworkPeerListNonValidatorGossipSizeKey = "network-peer-list-non-validator-gossip-size" NetworkPeerListPeersGossipSizeKey = "network-peer-list-peers-gossip-size" NetworkPeerListGossipFreqKey = "network-peer-list-gossip-frequency" + NetworkPeerListPullGossipFreqKey = "network-peer-list-pull-gossip-frequency" + NetworkPeerListBloomResetFreqKey = "network-peer-list-bloom-reset-frequency" NetworkInitialReconnectDelayKey = "network-initial-reconnect-delay" NetworkReadHandshakeTimeoutKey = "network-read-handshake-timeout" NetworkPingTimeoutKey = "network-ping-timeout" @@ -137,6 +142,7 @@ const ( SnowConfidenceQuorumSizeKey = "snow-confidence-quorum-size" SnowVirtuousCommitThresholdKey = "snow-virtuous-commit-threshold" SnowRogueCommitThresholdKey = "snow-rogue-commit-threshold" + SnowCommitThresholdKey = "snow-commit-threshold" SnowConcurrentRepollsKey = "snow-concurrent-repolls" SnowOptimalProcessingKey = "snow-optimal-processing" SnowMaxProcessingKey = "snow-max-processing" @@ -152,8 +158,9 @@ const ( IpcsChainIDsKey = "ipcs-chain-ids" IpcsPathKey = "ipcs-path" MeterVMsEnabledKey = "meter-vms-enabled" - ConsensusAcceptedFrontierGossipFrequencyKey = "consensus-accepted-frontier-gossip-frequency" ConsensusAppConcurrencyKey = "consensus-app-concurrency" + ConsensusShutdownTimeoutKey = "consensus-shutdown-timeout" + ConsensusFrontierPollFrequencyKey = "consensus-frontier-poll-frequency" ConsensusGossipAcceptedFrontierValidatorSizeKey = "consensus-accepted-frontier-gossip-validator-size" ConsensusGossipAcceptedFrontierNonValidatorSizeKey = "consensus-accepted-frontier-gossip-non-validator-size" ConsensusGossipAcceptedFrontierPeerSizeKey = "consensus-accepted-frontier-gossip-peer-size" @@ -163,7 +170,6 @@ const ( AppGossipValidatorSizeKey = "consensus-app-gossip-validator-size" AppGossipNonValidatorSizeKey = "consensus-app-gossip-non-validator-size" AppGossipPeerSizeKey = "consensus-app-gossip-peer-size" - ConsensusShutdownTimeoutKey = "consensus-shutdown-timeout" ProposerVMUseCurrentHeightKey = "proposervm-use-current-height" FdLimitKey = "fd-limit" IndexEnabledKey = "index-enabled" @@ -172,8 +178,6 @@ const ( RouterHealthMaxOutstandingRequestsKey = "router-health-max-outstanding-requests" HealthCheckFreqKey = "health-check-frequency" HealthCheckAveragerHalflifeKey = "health-check-averager-halflife" - RetryBootstrapKey = "bootstrap-retry-enabled" - RetryBootstrapWarnFrequencyKey = "bootstrap-retry-warn-frequency" PluginDirKey = "plugin-dir" BootstrapBeaconConnectionTimeoutKey = "bootstrap-beacon-connection-timeout" BootstrapMaxTimeGetAncestorsKey = "bootstrap-max-time-get-ancestors" diff --git a/config/viper.go b/config/viper.go index 1e236ea32001..59ecf1941687 100644 --- a/config/viper.go +++ b/config/viper.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config diff --git a/database/batch.go b/database/batch.go index b097dc60eea7..8699a90c2960 100644 --- a/database/batch.go +++ b/database/batch.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // For ease of implementation, our database's interface matches Ethereum's diff --git a/database/benchmark_database.go b/database/benchmark_database.go index 949c071f84f5..43af10db1c2b 100644 --- a/database/benchmark_database.go +++ b/database/benchmark_database.go @@ -1,10 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database import ( - "fmt" "math/rand" "testing" @@ -15,16 +14,16 @@ import ( var ( // Benchmarks is a list of all database benchmarks - Benchmarks = []func(b *testing.B, db Database, name string, keys, values [][]byte){ - BenchmarkGet, - BenchmarkPut, - BenchmarkDelete, - BenchmarkBatchPut, - BenchmarkBatchDelete, - BenchmarkBatchWrite, - BenchmarkParallelGet, - BenchmarkParallelPut, - BenchmarkParallelDelete, + Benchmarks = map[string]func(b *testing.B, db Database, keys, values [][]byte){ + "Get": BenchmarkGet, + "Put": BenchmarkPut, + "Delete": BenchmarkDelete, + "BatchPut": BenchmarkBatchPut, + "BatchDelete": BenchmarkBatchDelete, + "BatchWrite": BenchmarkBatchWrite, + "ParallelGet": BenchmarkParallelGet, + "ParallelPut": BenchmarkParallelPut, + "ParallelDelete": BenchmarkParallelDelete, } // BenchmarkSizes to use with each benchmark BenchmarkSizes = [][]int{ @@ -56,169 +55,150 @@ func SetupBenchmark(b *testing.B, count int, keySize, valueSize int) ([][]byte, } // BenchmarkGet measures the time it takes to get an operation from a database. -func BenchmarkGet(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkGet(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.get", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - require := require.New(b) + require := require.New(b) - for i, key := range keys { - value := values[i] - require.NoError(db.Put(key, value)) - } + for i, key := range keys { + value := values[i] + require.NoError(db.Put(key, value)) + } - b.ResetTimer() + b.ResetTimer() - // Reads b.N values from the db - for i := 0; i < b.N; i++ { - _, err := db.Get(keys[i%count]) - require.NoError(err) - } - }) + // Reads b.N values from the db + for i := 0; i < b.N; i++ { + _, err := db.Get(keys[i%count]) + require.NoError(err) + } } // BenchmarkPut measures the time it takes to write an operation to a database. -func BenchmarkPut(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkPut(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.put", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - // Writes b.N values to the db - for i := 0; i < b.N; i++ { - require.NoError(b, db.Put(keys[i%count], values[i%count])) - } - }) + // Writes b.N values to the db + for i := 0; i < b.N; i++ { + require.NoError(b, db.Put(keys[i%count], values[i%count])) + } } // BenchmarkDelete measures the time it takes to delete a (k, v) from a database. -func BenchmarkDelete(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkDelete(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.delete", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - require := require.New(b) + require := require.New(b) - // Writes random values of size _size_ to the database - for i, key := range keys { - value := values[i] - require.NoError(db.Put(key, value)) - } + // Writes random values of size _size_ to the database + for i, key := range keys { + value := values[i] + require.NoError(db.Put(key, value)) + } - b.ResetTimer() + b.ResetTimer() - // Deletes b.N values from the db - for i := 0; i < b.N; i++ { - require.NoError(db.Delete(keys[i%count])) - } - }) + // Deletes b.N values from the db + for i := 0; i < b.N; i++ { + require.NoError(db.Delete(keys[i%count])) + } } // BenchmarkBatchPut measures the time it takes to batch put. -func BenchmarkBatchPut(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkBatchPut(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_batch.put", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - batch := db.NewBatch() - for i := 0; i < b.N; i++ { - require.NoError(b, batch.Put(keys[i%count], values[i%count])) - } - }) + batch := db.NewBatch() + for i := 0; i < b.N; i++ { + require.NoError(b, batch.Put(keys[i%count], values[i%count])) + } } // BenchmarkBatchDelete measures the time it takes to batch delete. -func BenchmarkBatchDelete(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkBatchDelete(b *testing.B, db Database, keys, _ [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_batch.delete", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - batch := db.NewBatch() - for i := 0; i < b.N; i++ { - require.NoError(b, batch.Delete(keys[i%count])) - } - }) + batch := db.NewBatch() + for i := 0; i < b.N; i++ { + require.NoError(b, batch.Delete(keys[i%count])) + } } // BenchmarkBatchWrite measures the time it takes to batch write. -func BenchmarkBatchWrite(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkBatchWrite(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) - count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_batch.write", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - require := require.New(b) + require := require.New(b) - batch := db.NewBatch() - for i, key := range keys { - value := values[i] - require.NoError(batch.Put(key, value)) - } + batch := db.NewBatch() + for i, key := range keys { + value := values[i] + require.NoError(batch.Put(key, value)) + } - b.ResetTimer() + b.ResetTimer() - for i := 0; i < b.N; i++ { - require.NoError(batch.Write()) - } - }) + for i := 0; i < b.N; i++ { + require.NoError(batch.Write()) + } } // BenchmarkParallelGet measures the time it takes to read in parallel. -func BenchmarkParallelGet(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkParallelGet(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.get_parallel", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - require := require.New(b) + require := require.New(b) - for i, key := range keys { - value := values[i] - require.NoError(db.Put(key, value)) - } + for i, key := range keys { + value := values[i] + require.NoError(db.Put(key, value)) + } - b.ResetTimer() + b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for i := 0; pb.Next(); i++ { - _, err := db.Get(keys[i%count]) - require.NoError(err) - } - }) + b.RunParallel(func(pb *testing.PB) { + for i := 0; pb.Next(); i++ { + _, err := db.Get(keys[i%count]) + require.NoError(err) + } }) } // BenchmarkParallelPut measures the time it takes to write to the db in parallel. -func BenchmarkParallelPut(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkParallelPut(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.put_parallel", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - b.RunParallel(func(pb *testing.PB) { - // Write N values to the db - for i := 0; pb.Next(); i++ { - require.NoError(b, db.Put(keys[i%count], values[i%count])) - } - }) + b.RunParallel(func(pb *testing.PB) { + // Write N values to the db + for i := 0; pb.Next(); i++ { + require.NoError(b, db.Put(keys[i%count], values[i%count])) + } }) } // BenchmarkParallelDelete measures the time it takes to delete a (k, v) from the db. -func BenchmarkParallelDelete(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkParallelDelete(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.delete_parallel", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - require := require.New(b) - for i, key := range keys { - value := values[i] - require.NoError(db.Put(key, value)) + require := require.New(b) + for i, key := range keys { + value := values[i] + require.NoError(db.Put(key, value)) + } + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + // Deletes b.N values from the db + for i := 0; pb.Next(); i++ { + require.NoError(db.Delete(keys[i%count])) } - b.ResetTimer() - - b.RunParallel(func(pb *testing.PB) { - // Deletes b.N values from the db - for i := 0; pb.Next(); i++ { - require.NoError(db.Delete(keys[i%count])) - } - }) }) } diff --git a/database/common.go b/database/common.go index a27b0d27d4d0..651b8fe5719c 100644 --- a/database/common.go +++ b/database/common.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database diff --git a/database/corruptabledb/db.go b/database/corruptabledb/db.go index cb46953f6858..d5bd6a711353 100644 --- a/database/corruptabledb/db.go +++ b/database/corruptabledb/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package corruptabledb diff --git a/database/corruptabledb/db_test.go b/database/corruptabledb/db_test.go index d4c14f782986..5c7a48a64c4d 100644 --- a/database/corruptabledb/db_test.go +++ b/database/corruptabledb/db_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package corruptabledb @@ -18,24 +18,29 @@ import ( var errTest = errors.New("non-nil error") +func newDB() *Database { + baseDB := memdb.New() + return New(baseDB) +} + func TestInterface(t *testing.T) { - for _, test := range database.Tests { - baseDB := memdb.New() - db := New(baseDB) - test(t, db) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + test(t, newDB()) + }) } } func FuzzKeyValue(f *testing.F) { - baseDB := memdb.New() - db := New(baseDB) - database.FuzzKeyValue(f, db) + database.FuzzKeyValue(f, newDB()) } func FuzzNewIteratorWithPrefix(f *testing.F) { - baseDB := memdb.New() - db := New(baseDB) - database.FuzzNewIteratorWithPrefix(f, db) + database.FuzzNewIteratorWithPrefix(f, newDB()) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, newDB()) } // TestCorruption tests to make sure corruptabledb wrapper works as expected. @@ -70,9 +75,7 @@ func TestCorruption(t *testing.T) { return err }, } - baseDB := memdb.New() - // wrap this db - corruptableDB := New(baseDB) + corruptableDB := newDB() _ = corruptableDB.handleError(errTest) for name, testFn := range tests { t.Run(name, func(tt *testing.T) { @@ -176,9 +179,7 @@ func TestIterator(t *testing.T) { ctrl := gomock.NewController(t) // Make a database - baseDB := memdb.New() - corruptableDB := New(baseDB) - + corruptableDB := newDB() // Put a key-value pair in the database. require.NoError(corruptableDB.Put([]byte{0}, []byte{1})) diff --git a/database/database.go b/database/database.go index d0c274131d75..938c7f631b93 100644 --- a/database/database.go +++ b/database/database.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // For ease of implementation, our database's interface matches Ethereum's diff --git a/database/encdb/codec.go b/database/encdb/codec.go new file mode 100644 index 000000000000..62223b4fdd2f --- /dev/null +++ b/database/encdb/codec.go @@ -0,0 +1,24 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package encdb + +import ( + "time" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" +) + +const CodecVersion = 0 + +var Codec codec.Manager + +func init() { + lc := linearcodec.NewDefault(time.Time{}) + Codec = codec.NewDefaultManager() + + if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { + panic(err) + } +} diff --git a/database/encdb/db.go b/database/encdb/db.go index 42518bef9fc9..1b225b38f3cd 100644 --- a/database/encdb/db.go +++ b/database/encdb/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package encdb @@ -13,16 +13,10 @@ import ( "golang.org/x/exp/slices" - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/utils/hashing" ) -const ( - codecVersion = 0 -) - var ( _ database.Database = (*Database)(nil) _ database.Batch = (*batch)(nil) @@ -32,7 +26,6 @@ var ( // Database encrypts all values that are provided type Database struct { lock sync.RWMutex - codec codec.Manager cipher cipher.AEAD db database.Database closed bool @@ -42,16 +35,10 @@ type Database struct { func New(password []byte, db database.Database) (*Database, error) { h := hashing.ComputeHash256(password) aead, err := chacha20poly1305.NewX(h) - if err != nil { - return nil, err - } - c := linearcodec.NewDefault() - manager := codec.NewDefaultManager() return &Database{ - codec: manager, cipher: aead, db: db, - }, manager.RegisterCodec(codecVersion, c) + }, err } func (db *Database) Has(key []byte) (bool, error) { @@ -297,7 +284,7 @@ func (db *Database) encrypt(plaintext []byte) ([]byte, error) { return nil, err } ciphertext := db.cipher.Seal(nil, nonce, plaintext, nil) - return db.codec.Marshal(codecVersion, &encryptedValue{ + return Codec.Marshal(CodecVersion, &encryptedValue{ Ciphertext: ciphertext, Nonce: nonce, }) @@ -305,7 +292,7 @@ func (db *Database) encrypt(plaintext []byte) ([]byte, error) { func (db *Database) decrypt(ciphertext []byte) ([]byte, error) { val := encryptedValue{} - if _, err := db.codec.Unmarshal(ciphertext, &val); err != nil { + if _, err := Codec.Unmarshal(ciphertext, &val); err != nil { return nil, err } return db.cipher.Open(nil, val.Nonce, val.Ciphertext, nil) diff --git a/database/encdb/db_test.go b/database/encdb/db_test.go index 177259f5c7f2..b3dfdfed68e7 100644 --- a/database/encdb/db_test.go +++ b/database/encdb/db_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package encdb import ( + "fmt" "testing" "github.com/stretchr/testify/require" @@ -15,37 +16,43 @@ import ( const testPassword = "lol totally a secure password" //nolint:gosec func TestInterface(t *testing.T) { - for _, test := range database.Tests { - unencryptedDB := memdb.New() - db, err := New([]byte(testPassword), unencryptedDB) - require.NoError(t, err) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + unencryptedDB := memdb.New() + db, err := New([]byte(testPassword), unencryptedDB) + require.NoError(t, err) - test(t, db) + test(t, db) + }) } } -func FuzzKeyValue(f *testing.F) { +func newDB(t testing.TB) database.Database { unencryptedDB := memdb.New() db, err := New([]byte(testPassword), unencryptedDB) - require.NoError(f, err) - database.FuzzKeyValue(f, db) + require.NoError(t, err) + return db +} + +func FuzzKeyValue(f *testing.F) { + database.FuzzKeyValue(f, newDB(f)) } func FuzzNewIteratorWithPrefix(f *testing.F) { - unencryptedDB := memdb.New() - db, err := New([]byte(testPassword), unencryptedDB) - require.NoError(f, err) - database.FuzzNewIteratorWithPrefix(f, db) + database.FuzzNewIteratorWithPrefix(f, newDB(f)) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, newDB(f)) } func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - unencryptedDB := memdb.New() - db, err := New([]byte(testPassword), unencryptedDB) - require.NoError(b, err) - bench(b, db, "encdb", keys, values) + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("encdb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + bench(b, newDB(b), keys, values) + }) } } } diff --git a/database/errors.go b/database/errors.go index ee46521b6499..24f93aa8da27 100644 --- a/database/errors.go +++ b/database/errors.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database diff --git a/database/helpers.go b/database/helpers.go index d4261920dcdf..7e66c58fa770 100644 --- a/database/helpers.go +++ b/database/helpers.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database diff --git a/database/helpers_test.go b/database/helpers_test.go index 79e37ea74b67..1ad3ccc2b20d 100644 --- a/database/helpers_test.go +++ b/database/helpers_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database diff --git a/database/iterator.go b/database/iterator.go index c83ceac49639..75126006d4ac 100644 --- a/database/iterator.go +++ b/database/iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // For ease of implementation, our database's interface matches Ethereum's diff --git a/database/leveldb/db.go b/database/leveldb/db.go index ab08db75a1c8..8ae825ef2fb1 100644 --- a/database/leveldb/db.go +++ b/database/leveldb/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package leveldb diff --git a/database/leveldb/db_test.go b/database/leveldb/db_test.go index bf6bdeac7f27..ad8d60cbbc4f 100644 --- a/database/leveldb/db_test.go +++ b/database/leveldb/db_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package leveldb import ( + "fmt" "testing" "github.com/prometheus/client_golang/prometheus" @@ -15,51 +16,60 @@ import ( ) func TestInterface(t *testing.T) { - for _, test := range database.Tests { - folder := t.TempDir() - db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(t, err) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + folder := t.TempDir() + db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) + require.NoError(t, err) - test(t, db) + test(t, db) - _ = db.Close() + _ = db.Close() + }) } } -func FuzzKeyValue(f *testing.F) { - folder := f.TempDir() +func newDB(t testing.TB) database.Database { + folder := t.TempDir() db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(f, err) + require.NoError(t, err) + return db +} +func FuzzKeyValue(f *testing.F) { + db := newDB(f) defer db.Close() database.FuzzKeyValue(f, db) } func FuzzNewIteratorWithPrefix(f *testing.F) { - folder := f.TempDir() - db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(f, err) - + db := newDB(f) defer db.Close() database.FuzzNewIteratorWithPrefix(f, db) } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + db := newDB(f) + defer db.Close() + + database.FuzzNewIteratorWithStartAndPrefix(f, db) +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - folder := b.TempDir() + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("leveldb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + db := newDB(b) - db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(b, err) - - bench(b, db, "leveldb", keys, values) + bench(b, db, keys, values) - // The database may have been closed by the test, so we don't care if it - // errors here. - _ = db.Close() + // The database may have been closed by the test, so we don't care if it + // errors here. + _ = db.Close() + }) } } } diff --git a/database/leveldb/metrics.go b/database/leveldb/metrics.go index 11bca8ddb07e..055579c8e6f4 100644 --- a/database/leveldb/metrics.go +++ b/database/leveldb/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package leveldb diff --git a/database/linkeddb/codec.go b/database/linkeddb/codec.go index 7780690b2e92..f1982e1c7cfd 100644 --- a/database/linkeddb/codec.go +++ b/database/linkeddb/codec.go @@ -1,29 +1,25 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkeddb import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" ) -const ( - codecVersion = 0 -) +const CodecVersion = 0 -// c does serialization and deserialization -var ( - c codec.Manager -) +var Codec codec.Manager func init() { - lc := linearcodec.NewCustomMaxLength(math.MaxUint32) - c = codec.NewManager(math.MaxInt32) + lc := linearcodec.NewDefault(time.Time{}) + Codec = codec.NewManager(math.MaxInt32) - if err := c.RegisterCodec(codecVersion, lc); err != nil { + if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { panic(err) } } diff --git a/database/linkeddb/linkeddb.go b/database/linkeddb/linkeddb.go index 6e62dd6015e2..2d6721626862 100644 --- a/database/linkeddb/linkeddb.go +++ b/database/linkeddb/linkeddb.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkeddb @@ -327,7 +327,7 @@ func (ldb *linkedDB) getNode(key []byte) (node, error) { return node{}, err } n := node{} - _, err = c.Unmarshal(nodeBytes, &n) + _, err = Codec.Unmarshal(nodeBytes, &n) if err == nil { ldb.nodeCache.Put(keyStr, &n) } @@ -336,7 +336,7 @@ func (ldb *linkedDB) getNode(key []byte) (node, error) { func (ldb *linkedDB) putNode(key []byte, n node) error { ldb.updatedNodes[string(key)] = &n - nodeBytes, err := c.Marshal(codecVersion, n) + nodeBytes, err := Codec.Marshal(CodecVersion, n) if err != nil { return err } diff --git a/database/linkeddb/linkeddb_test.go b/database/linkeddb/linkeddb_test.go index 6f9cba4501de..815cac730b05 100644 --- a/database/linkeddb/linkeddb_test.go +++ b/database/linkeddb/linkeddb_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkeddb diff --git a/database/memdb/db.go b/database/memdb/db.go index 92b687afc0a5..914739f9e206 100644 --- a/database/memdb/db.go +++ b/database/memdb/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package memdb diff --git a/database/memdb/db_test.go b/database/memdb/db_test.go index b0497758f5c2..90dc459f3602 100644 --- a/database/memdb/db_test.go +++ b/database/memdb/db_test.go @@ -1,17 +1,20 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package memdb import ( + "fmt" "testing" "github.com/ava-labs/avalanchego/database" ) func TestInterface(t *testing.T) { - for _, test := range database.Tests { - test(t, New()) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + test(t, New()) + }) } } @@ -23,12 +26,18 @@ func FuzzNewIteratorWithPrefix(f *testing.F) { database.FuzzNewIteratorWithPrefix(f, New()) } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, New()) +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - db := New() - bench(b, db, "memdb", keys, values) + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("memdb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + db := New() + bench(b, db, keys, values) + }) } } } diff --git a/database/meterdb/db.go b/database/meterdb/db.go index a2640ca2dc00..fd3b3b77d7a8 100644 --- a/database/meterdb/db.go +++ b/database/meterdb/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meterdb diff --git a/database/meterdb/db_test.go b/database/meterdb/db_test.go index ddd613353946..eee3f1c23c01 100644 --- a/database/meterdb/db_test.go +++ b/database/meterdb/db_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meterdb import ( + "fmt" "testing" "github.com/prometheus/client_golang/prometheus" @@ -15,37 +16,43 @@ import ( ) func TestInterface(t *testing.T) { - for _, test := range database.Tests { - baseDB := memdb.New() - db, err := New("", prometheus.NewRegistry(), baseDB) - require.NoError(t, err) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + baseDB := memdb.New() + db, err := New("", prometheus.NewRegistry(), baseDB) + require.NoError(t, err) - test(t, db) + test(t, db) + }) } } -func FuzzKeyValue(f *testing.F) { +func newDB(t testing.TB) database.Database { baseDB := memdb.New() db, err := New("", prometheus.NewRegistry(), baseDB) - require.NoError(f, err) - database.FuzzKeyValue(f, db) + require.NoError(t, err) + return db +} + +func FuzzKeyValue(f *testing.F) { + database.FuzzKeyValue(f, newDB(f)) } func FuzzNewIteratorWithPrefix(f *testing.F) { - baseDB := memdb.New() - db, err := New("", prometheus.NewRegistry(), baseDB) - require.NoError(f, err) - database.FuzzNewIteratorWithPrefix(f, db) + database.FuzzNewIteratorWithPrefix(f, newDB(f)) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, newDB(f)) } func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - baseDB := memdb.New() - db, err := New("", prometheus.NewRegistry(), baseDB) - require.NoError(b, err) - bench(b, db, "meterdb", keys, values) + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("meterdb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + bench(b, newDB(b), keys, values) + }) } } } diff --git a/database/meterdb/metrics.go b/database/meterdb/metrics.go index a0a20e9d5f26..40cdf4566413 100644 --- a/database/meterdb/metrics.go +++ b/database/meterdb/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meterdb diff --git a/database/mock_batch.go b/database/mock_batch.go index b20ae92473f1..e3762514954f 100644 --- a/database/mock_batch.go +++ b/database/mock_batch.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/database (interfaces: Batch) +// +// Generated by this command: +// +// mockgen -package=database -destination=database/mock_batch.go github.com/ava-labs/avalanchego/database Batch +// // Package database is a generated GoMock package. package database @@ -45,7 +47,7 @@ func (m *MockBatch) Delete(arg0 []byte) error { } // Delete indicates an expected call of Delete. -func (mr *MockBatchMockRecorder) Delete(arg0 interface{}) *gomock.Call { +func (mr *MockBatchMockRecorder) Delete(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockBatch)(nil).Delete), arg0) } @@ -73,7 +75,7 @@ func (m *MockBatch) Put(arg0, arg1 []byte) error { } // Put indicates an expected call of Put. -func (mr *MockBatchMockRecorder) Put(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockBatchMockRecorder) Put(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockBatch)(nil).Put), arg0, arg1) } @@ -87,7 +89,7 @@ func (m *MockBatch) Replay(arg0 KeyValueWriterDeleter) error { } // Replay indicates an expected call of Replay. -func (mr *MockBatchMockRecorder) Replay(arg0 interface{}) *gomock.Call { +func (mr *MockBatchMockRecorder) Replay(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Replay", reflect.TypeOf((*MockBatch)(nil).Replay), arg0) } diff --git a/database/mock_database.go b/database/mock_database.go index f790f2f08e37..e393b47091d7 100644 --- a/database/mock_database.go +++ b/database/mock_database.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/database (interfaces: Database) +// +// Generated by this command: +// +// mockgen -package=database -destination=database/mock_database.go github.com/ava-labs/avalanchego/database Database +// // Package database is a generated GoMock package. package database @@ -60,7 +62,7 @@ func (m *MockDatabase) Compact(arg0, arg1 []byte) error { } // Compact indicates an expected call of Compact. -func (mr *MockDatabaseMockRecorder) Compact(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockDatabaseMockRecorder) Compact(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Compact", reflect.TypeOf((*MockDatabase)(nil).Compact), arg0, arg1) } @@ -74,7 +76,7 @@ func (m *MockDatabase) Delete(arg0 []byte) error { } // Delete indicates an expected call of Delete. -func (mr *MockDatabaseMockRecorder) Delete(arg0 interface{}) *gomock.Call { +func (mr *MockDatabaseMockRecorder) Delete(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDatabase)(nil).Delete), arg0) } @@ -89,7 +91,7 @@ func (m *MockDatabase) Get(arg0 []byte) ([]byte, error) { } // Get indicates an expected call of Get. -func (mr *MockDatabaseMockRecorder) Get(arg0 interface{}) *gomock.Call { +func (mr *MockDatabaseMockRecorder) Get(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDatabase)(nil).Get), arg0) } @@ -104,22 +106,22 @@ func (m *MockDatabase) Has(arg0 []byte) (bool, error) { } // Has indicates an expected call of Has. -func (mr *MockDatabaseMockRecorder) Has(arg0 interface{}) *gomock.Call { +func (mr *MockDatabaseMockRecorder) Has(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockDatabase)(nil).Has), arg0) } // HealthCheck mocks base method. -func (m *MockDatabase) HealthCheck(arg0 context.Context) (interface{}, error) { +func (m *MockDatabase) HealthCheck(arg0 context.Context) (any, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "HealthCheck", arg0) - ret0, _ := ret[0].(interface{}) + ret0, _ := ret[0].(any) ret1, _ := ret[1].(error) return ret0, ret1 } // HealthCheck indicates an expected call of HealthCheck. -func (mr *MockDatabaseMockRecorder) HealthCheck(arg0 interface{}) *gomock.Call { +func (mr *MockDatabaseMockRecorder) HealthCheck(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockDatabase)(nil).HealthCheck), arg0) } @@ -161,7 +163,7 @@ func (m *MockDatabase) NewIteratorWithPrefix(arg0 []byte) Iterator { } // NewIteratorWithPrefix indicates an expected call of NewIteratorWithPrefix. -func (mr *MockDatabaseMockRecorder) NewIteratorWithPrefix(arg0 interface{}) *gomock.Call { +func (mr *MockDatabaseMockRecorder) NewIteratorWithPrefix(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithPrefix", reflect.TypeOf((*MockDatabase)(nil).NewIteratorWithPrefix), arg0) } @@ -175,7 +177,7 @@ func (m *MockDatabase) NewIteratorWithStart(arg0 []byte) Iterator { } // NewIteratorWithStart indicates an expected call of NewIteratorWithStart. -func (mr *MockDatabaseMockRecorder) NewIteratorWithStart(arg0 interface{}) *gomock.Call { +func (mr *MockDatabaseMockRecorder) NewIteratorWithStart(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithStart", reflect.TypeOf((*MockDatabase)(nil).NewIteratorWithStart), arg0) } @@ -189,7 +191,7 @@ func (m *MockDatabase) NewIteratorWithStartAndPrefix(arg0, arg1 []byte) Iterator } // NewIteratorWithStartAndPrefix indicates an expected call of NewIteratorWithStartAndPrefix. -func (mr *MockDatabaseMockRecorder) NewIteratorWithStartAndPrefix(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockDatabaseMockRecorder) NewIteratorWithStartAndPrefix(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithStartAndPrefix", reflect.TypeOf((*MockDatabase)(nil).NewIteratorWithStartAndPrefix), arg0, arg1) } @@ -203,7 +205,7 @@ func (m *MockDatabase) Put(arg0, arg1 []byte) error { } // Put indicates an expected call of Put. -func (mr *MockDatabaseMockRecorder) Put(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockDatabaseMockRecorder) Put(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockDatabase)(nil).Put), arg0, arg1) } diff --git a/database/mock_iterator.go b/database/mock_iterator.go index 4fa36ae24f69..77856c92ea5e 100644 --- a/database/mock_iterator.go +++ b/database/mock_iterator.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/database (interfaces: Iterator) +// +// Generated by this command: +// +// mockgen -package=database -destination=database/mock_iterator.go github.com/ava-labs/avalanchego/database Iterator +// // Package database is a generated GoMock package. package database diff --git a/database/pebble/batch.go b/database/pebble/batch.go index b6c9d283b64d..a53b962dc7be 100644 --- a/database/pebble/batch.go +++ b/database/pebble/batch.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pebble diff --git a/database/pebble/batch_test.go b/database/pebble/batch_test.go index a84134708956..66f4075558fd 100644 --- a/database/pebble/batch_test.go +++ b/database/pebble/batch_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pebble diff --git a/database/pebble/db.go b/database/pebble/db.go index 7aa718082a35..4838ff4b0ffe 100644 --- a/database/pebble/db.go +++ b/database/pebble/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pebble diff --git a/database/pebble/db_test.go b/database/pebble/db_test.go index cba67a79a88f..5c8650dbf183 100644 --- a/database/pebble/db_test.go +++ b/database/pebble/db_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pebble import ( + "fmt" "testing" "github.com/prometheus/client_golang/prometheus" @@ -22,10 +23,12 @@ func newDB(t testing.TB) *Database { } func TestInterface(t *testing.T) { - for _, test := range database.Tests { - db := newDB(t) - test(t, db) - _ = db.Close() + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + db := newDB(t) + test(t, db) + _ = db.Close() + }) } } @@ -41,13 +44,21 @@ func FuzzNewIteratorWithPrefix(f *testing.F) { _ = db.Close() } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + db := newDB(f) + database.FuzzNewIteratorWithStartAndPrefix(f, db) + _ = db.Close() +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - db := newDB(b) - bench(b, db, "pebble", keys, values) - _ = db.Close() + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("pebble_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + db := newDB(b) + bench(b, db, keys, values) + _ = db.Close() + }) } } } diff --git a/database/pebble/iterator.go b/database/pebble/iterator.go index 115c122e30f4..5fc73f308a13 100644 --- a/database/pebble/iterator.go +++ b/database/pebble/iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pebble diff --git a/database/prefixdb/db.go b/database/prefixdb/db.go index f4ba04e31b06..d8af4b101900 100644 --- a/database/prefixdb/db.go +++ b/database/prefixdb/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package prefixdb @@ -39,29 +39,57 @@ type Database struct { closed bool } +func newDB(prefix []byte, db database.Database) *Database { + return &Database{ + dbPrefix: prefix, + db: db, + bufferPool: sync.Pool{ + New: func() interface{} { + return make([]byte, 0, defaultBufCap) + }, + }, + } +} + // New returns a new prefixed database func New(prefix []byte, db database.Database) *Database { if prefixDB, ok := db.(*Database); ok { - simplePrefix := make([]byte, len(prefixDB.dbPrefix)+len(prefix)) - copy(simplePrefix, prefixDB.dbPrefix) - copy(simplePrefix[len(prefixDB.dbPrefix):], prefix) - return NewNested(simplePrefix, prefixDB.db) + return newDB( + JoinPrefixes(prefixDB.dbPrefix, prefix), + prefixDB.db, + ) } - return NewNested(prefix, db) + return newDB( + MakePrefix(prefix), + db, + ) } // NewNested returns a new prefixed database without attempting to compress // prefixes. func NewNested(prefix []byte, db database.Database) *Database { - return &Database{ - dbPrefix: hashing.ComputeHash256(prefix), - db: db, - bufferPool: sync.Pool{ - New: func() interface{} { - return make([]byte, 0, defaultBufCap) - }, - }, - } + return newDB( + MakePrefix(prefix), + db, + ) +} + +func MakePrefix(prefix []byte) []byte { + return hashing.ComputeHash256(prefix) +} + +func JoinPrefixes(firstPrefix, secondPrefix []byte) []byte { + simplePrefix := make([]byte, len(firstPrefix)+len(secondPrefix)) + copy(simplePrefix, firstPrefix) + copy(simplePrefix[len(firstPrefix):], secondPrefix) + return MakePrefix(simplePrefix) +} + +func PrefixKey(prefix, key []byte) []byte { + prefixedKey := make([]byte, len(prefix)+len(key)) + copy(prefixedKey, prefix) + copy(prefixedKey[len(prefix):], key) + return prefixedKey } // Assumes that it is OK for the argument to db.db.Has diff --git a/database/prefixdb/db_test.go b/database/prefixdb/db_test.go index 065e1d7a00c8..f928d2f635a4 100644 --- a/database/prefixdb/db_test.go +++ b/database/prefixdb/db_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package prefixdb import ( + "fmt" "testing" "github.com/ava-labs/avalanchego/database" @@ -11,14 +12,16 @@ import ( ) func TestInterface(t *testing.T) { - for _, test := range database.Tests { - db := memdb.New() - test(t, New([]byte("hello"), db)) - test(t, New([]byte("world"), db)) - test(t, New([]byte("wor"), New([]byte("ld"), db))) - test(t, New([]byte("ld"), New([]byte("wor"), db))) - test(t, NewNested([]byte("wor"), New([]byte("ld"), db))) - test(t, NewNested([]byte("ld"), New([]byte("wor"), db))) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + db := memdb.New() + test(t, New([]byte("hello"), db)) + test(t, New([]byte("world"), db)) + test(t, New([]byte("wor"), New([]byte("ld"), db))) + test(t, New([]byte("ld"), New([]byte("wor"), db))) + test(t, NewNested([]byte("wor"), New([]byte("ld"), db))) + test(t, NewNested([]byte("ld"), New([]byte("wor"), db))) + }) } } @@ -30,12 +33,18 @@ func FuzzNewIteratorWithPrefix(f *testing.F) { database.FuzzNewIteratorWithPrefix(f, New([]byte(""), memdb.New())) } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, New([]byte(""), memdb.New())) +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - db := New([]byte("hello"), memdb.New()) - bench(b, db, "prefixdb", keys, values) + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("prefixdb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + db := New([]byte("hello"), memdb.New()) + bench(b, db, keys, values) + }) } } } diff --git a/database/rpcdb/db_client.go b/database/rpcdb/db_client.go index 3e92a9a8477a..c71ccd0603e6 100644 --- a/database/rpcdb/db_client.go +++ b/database/rpcdb/db_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcdb @@ -43,7 +43,7 @@ func (db *DatabaseClient) Has(key []byte) (bool, error) { if err != nil { return false, err } - return resp.Has, errEnumToError[resp.Err] + return resp.Has, ErrEnumToError[resp.Err] } // Get attempts to return the value that was mapped to the key that was provided @@ -54,7 +54,7 @@ func (db *DatabaseClient) Get(key []byte) ([]byte, error) { if err != nil { return nil, err } - return resp.Value, errEnumToError[resp.Err] + return resp.Value, ErrEnumToError[resp.Err] } // Put attempts to set the value this key maps to @@ -66,7 +66,7 @@ func (db *DatabaseClient) Put(key, value []byte) error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } // Delete attempts to remove any mapping from the key @@ -77,7 +77,7 @@ func (db *DatabaseClient) Delete(key []byte) error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } // NewBatch returns a new batch @@ -120,7 +120,7 @@ func (db *DatabaseClient) Compact(start, limit []byte) error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } // Close attempts to close the database @@ -130,7 +130,7 @@ func (db *DatabaseClient) Close() error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } func (db *DatabaseClient) HealthCheck(ctx context.Context) (interface{}, error) { @@ -175,7 +175,7 @@ func (b *batch) Write() error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } func (b *batch) Inner() database.Batch { @@ -224,7 +224,7 @@ func (it *iterator) fetch() { if err != nil { it.setError(err) } else { - it.setError(errEnumToError[resp.Err]) + it.setError(ErrEnumToError[resp.Err]) } close(it.fetchedData) @@ -324,7 +324,7 @@ func (it *iterator) updateError() { if err != nil { it.setError(err) } else { - it.setError(errEnumToError[resp.Err]) + it.setError(ErrEnumToError[resp.Err]) } } diff --git a/database/rpcdb/db_server.go b/database/rpcdb/db_server.go index e9e135738e89..8a07a672d070 100644 --- a/database/rpcdb/db_server.go +++ b/database/rpcdb/db_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcdb @@ -50,8 +50,8 @@ func (db *DatabaseServer) Has(_ context.Context, req *rpcdbpb.HasRequest) (*rpcd has, err := db.db.Has(req.Key) return &rpcdbpb.HasResponse{ Has: has, - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } // Get delegates the Get call to the managed database and returns the result @@ -59,34 +59,34 @@ func (db *DatabaseServer) Get(_ context.Context, req *rpcdbpb.GetRequest) (*rpcd value, err := db.db.Get(req.Key) return &rpcdbpb.GetResponse{ Value: value, - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } // Put delegates the Put call to the managed database and returns the result func (db *DatabaseServer) Put(_ context.Context, req *rpcdbpb.PutRequest) (*rpcdbpb.PutResponse, error) { err := db.db.Put(req.Key, req.Value) - return &rpcdbpb.PutResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.PutResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // Delete delegates the Delete call to the managed database and returns the // result func (db *DatabaseServer) Delete(_ context.Context, req *rpcdbpb.DeleteRequest) (*rpcdbpb.DeleteResponse, error) { err := db.db.Delete(req.Key) - return &rpcdbpb.DeleteResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.DeleteResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // Compact delegates the Compact call to the managed database and returns the // result func (db *DatabaseServer) Compact(_ context.Context, req *rpcdbpb.CompactRequest) (*rpcdbpb.CompactResponse, error) { err := db.db.Compact(req.Start, req.Limit) - return &rpcdbpb.CompactResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.CompactResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // Close delegates the Close call to the managed database and returns the result func (db *DatabaseServer) Close(context.Context, *rpcdbpb.CloseRequest) (*rpcdbpb.CloseResponse, error) { err := db.db.Close() - return &rpcdbpb.CloseResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.CloseResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // HealthCheck performs a heath check against the underlying database. @@ -109,22 +109,22 @@ func (db *DatabaseServer) WriteBatch(_ context.Context, req *rpcdbpb.WriteBatchR for _, put := range req.Puts { if err := batch.Put(put.Key, put.Value); err != nil { return &rpcdbpb.WriteBatchResponse{ - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } } for _, del := range req.Deletes { if err := batch.Delete(del.Key); err != nil { return &rpcdbpb.WriteBatchResponse{ - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } } err := batch.Write() return &rpcdbpb.WriteBatchResponse{ - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } // NewIteratorWithStartAndPrefix allocates an iterator and returns the iterator @@ -177,7 +177,7 @@ func (db *DatabaseServer) IteratorError(_ context.Context, req *rpcdbpb.Iterator return nil, errUnknownIterator } err := it.Error() - return &rpcdbpb.IteratorErrorResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.IteratorErrorResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // IteratorRelease attempts to release the resources allocated to an iterator @@ -193,5 +193,5 @@ func (db *DatabaseServer) IteratorRelease(_ context.Context, req *rpcdbpb.Iterat err := it.Error() it.Release() - return &rpcdbpb.IteratorReleaseResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.IteratorReleaseResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } diff --git a/database/rpcdb/db_test.go b/database/rpcdb/db_test.go index 763b95b83f75..cc0cca3694b0 100644 --- a/database/rpcdb/db_test.go +++ b/database/rpcdb/db_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcdb import ( "context" + "fmt" "testing" "github.com/stretchr/testify/require" @@ -18,9 +19,8 @@ import ( ) type testDatabase struct { - client *DatabaseClient - server *memdb.Database - closeFn func() + client *DatabaseClient + server *memdb.Database } func setupDB(t testing.TB) *testDatabase { @@ -44,44 +44,48 @@ func setupDB(t testing.TB) *testDatabase { require.NoError(err) db.client = NewClient(rpcdbpb.NewDatabaseClient(conn)) - db.closeFn = func() { + + t.Cleanup(func() { serverCloser.Stop() _ = conn.Close() _ = listener.Close() - } + }) + return db } func TestInterface(t *testing.T) { - for _, test := range database.Tests { - db := setupDB(t) - test(t, db.client) - - db.closeFn() + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + db := setupDB(t) + test(t, db.client) + }) } } func FuzzKeyValue(f *testing.F) { db := setupDB(f) database.FuzzKeyValue(f, db.client) - - db.closeFn() } func FuzzNewIteratorWithPrefix(f *testing.F) { db := setupDB(f) database.FuzzNewIteratorWithPrefix(f, db.client) +} - db.closeFn() +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + db := setupDB(f) + database.FuzzNewIteratorWithStartAndPrefix(f, db.client) } func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - db := setupDB(b) - bench(b, db.client, "rpcdb", keys, values) - db.closeFn() + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("rpcdb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + db := setupDB(b) + bench(b, db.client, keys, values) + }) } } } diff --git a/database/rpcdb/errors.go b/database/rpcdb/errors.go index 8a1fae2f0a1e..52788cc0a42a 100644 --- a/database/rpcdb/errors.go +++ b/database/rpcdb/errors.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcdb @@ -10,18 +10,18 @@ import ( ) var ( - errEnumToError = map[rpcdbpb.Error]error{ + ErrEnumToError = map[rpcdbpb.Error]error{ rpcdbpb.Error_ERROR_CLOSED: database.ErrClosed, rpcdbpb.Error_ERROR_NOT_FOUND: database.ErrNotFound, } - errorToErrEnum = map[error]rpcdbpb.Error{ + ErrorToErrEnum = map[error]rpcdbpb.Error{ database.ErrClosed: rpcdbpb.Error_ERROR_CLOSED, database.ErrNotFound: rpcdbpb.Error_ERROR_NOT_FOUND, } ) -func errorToRPCError(err error) error { - if _, ok := errorToErrEnum[err]; ok { +func ErrorToRPCError(err error) error { + if _, ok := ErrorToErrEnum[err]; ok { return nil } return err diff --git a/database/test_database.go b/database/test_database.go index 2e68f53341b8..792b038012cb 100644 --- a/database/test_database.go +++ b/database/test_database.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database @@ -24,44 +24,44 @@ import ( ) // Tests is a list of all database tests -var Tests = []func(t *testing.T, db Database){ - TestSimpleKeyValue, - TestOverwriteKeyValue, - TestEmptyKey, - TestKeyEmptyValue, - TestSimpleKeyValueClosed, - TestNewBatchClosed, - TestBatchPut, - TestBatchDelete, - TestBatchReset, - TestBatchReuse, - TestBatchRewrite, - TestBatchReplay, - TestBatchReplayPropagateError, - TestBatchInner, - TestBatchLargeSize, - TestIteratorSnapshot, - TestIterator, - TestIteratorStart, - TestIteratorPrefix, - TestIteratorStartPrefix, - TestIteratorMemorySafety, - TestIteratorClosed, - TestIteratorError, - TestIteratorErrorAfterRelease, - TestCompactNoPanic, - TestMemorySafetyDatabase, - TestMemorySafetyBatch, - TestAtomicClear, - TestClear, - TestAtomicClearPrefix, - TestClearPrefix, - TestModifyValueAfterPut, - TestModifyValueAfterBatchPut, - TestModifyValueAfterBatchPutReplay, - TestConcurrentBatches, - TestManySmallConcurrentKVPairBatches, - TestPutGetEmpty, +var Tests = map[string]func(t *testing.T, db Database){ + "SimpleKeyValue": TestSimpleKeyValue, + "OverwriteKeyValue": TestOverwriteKeyValue, + "EmptyKey": TestEmptyKey, + "KeyEmptyValue": TestKeyEmptyValue, + "SimpleKeyValueClosed": TestSimpleKeyValueClosed, + "NewBatchClosed": TestNewBatchClosed, + "BatchPut": TestBatchPut, + "BatchDelete": TestBatchDelete, + "BatchReset": TestBatchReset, + "BatchReuse": TestBatchReuse, + "BatchRewrite": TestBatchRewrite, + "BatchReplay": TestBatchReplay, + "BatchReplayPropagateError": TestBatchReplayPropagateError, + "BatchInner": TestBatchInner, + "BatchLargeSize": TestBatchLargeSize, + "IteratorSnapshot": TestIteratorSnapshot, + "Iterator": TestIterator, + "IteratorStart": TestIteratorStart, + "IteratorPrefix": TestIteratorPrefix, + "IteratorStartPrefix": TestIteratorStartPrefix, + "IteratorMemorySafety": TestIteratorMemorySafety, + "IteratorClosed": TestIteratorClosed, + "IteratorError": TestIteratorError, + "IteratorErrorAfterRelease": TestIteratorErrorAfterRelease, + "CompactNoPanic": TestCompactNoPanic, + "MemorySafetyDatabase": TestMemorySafetyDatabase, + "MemorySafetyBatch": TestMemorySafetyBatch, + "AtomicClear": TestAtomicClear, + "Clear": TestClear, + "AtomicClearPrefix": TestAtomicClearPrefix, + "ClearPrefix": TestClearPrefix, + "ModifyValueAfterPut": TestModifyValueAfterPut, + "ModifyValueAfterBatchPut": TestModifyValueAfterBatchPut, + "ModifyValueAfterBatchPutReplay": TestModifyValueAfterBatchPutReplay, + "ConcurrentBatches": TestConcurrentBatches, + "ManySmallConcurrentKVPairBatches": TestManySmallConcurrentKVPairBatches, + "PutGetEmpty": TestPutGetEmpty, } // TestSimpleKeyValue tests to make sure that simple Put + Get + Delete + Has @@ -1266,7 +1266,74 @@ func FuzzNewIteratorWithPrefix(f *testing.F, db Database) { require.Equal(expected[string(iter.Key())], val) numIterElts++ } - require.Equal(len(expectedList), numIterElts) + require.Len(expectedList, numIterElts) + + // Clear the database for the next fuzz iteration. + require.NoError(AtomicClear(db, db)) + }) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F, db Database) { + const ( + maxKeyLen = 32 + maxValueLen = 32 + ) + + f.Fuzz(func( + t *testing.T, + randSeed int64, + start []byte, + prefix []byte, + numKeyValues uint, + ) { + require := require.New(t) + r := rand.New(rand.NewSource(randSeed)) // #nosec G404 + + expected := map[string][]byte{} + + // Put a bunch of key-values + for i := 0; i < int(numKeyValues); i++ { + key := make([]byte, r.Intn(maxKeyLen)) + _, _ = r.Read(key) // #nosec G404 + + value := make([]byte, r.Intn(maxValueLen)) + _, _ = r.Read(value) // #nosec G404 + + if len(value) == 0 { + // Consistently treat zero length values as nil + // so that we can compare [expected] and [got] with + // require.Equal, which treats nil and empty byte + // as being unequal, whereas the database treats + // them as being equal. + value = nil + } + + if bytes.HasPrefix(key, prefix) && bytes.Compare(key, start) >= 0 { + expected[string(key)] = value + } + + require.NoError(db.Put(key, value)) + } + + expectedList := maps.Keys(expected) + slices.Sort(expectedList) + + iter := db.NewIteratorWithStartAndPrefix(start, prefix) + defer iter.Release() + + // Assert the iterator returns the expected key-values. + numIterElts := 0 + for iter.Next() { + val := iter.Value() + if len(val) == 0 { + val = nil + } + keyStr := string(iter.Key()) + require.Equal(expectedList[numIterElts], keyStr) + require.Equal(expected[keyStr], val) + numIterElts++ + } + require.Len(expectedList, numIterElts) // Clear the database for the next fuzz iteration. require.NoError(AtomicClear(db, db)) diff --git a/database/versiondb/db.go b/database/versiondb/db.go index d65dca947502..479e8af814d1 100644 --- a/database/versiondb/db.go +++ b/database/versiondb/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package versiondb diff --git a/database/versiondb/db_test.go b/database/versiondb/db_test.go index fdea2934b8d2..0ff801dfe0dd 100644 --- a/database/versiondb/db_test.go +++ b/database/versiondb/db_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package versiondb import ( + "fmt" "testing" "github.com/stretchr/testify/require" @@ -13,9 +14,11 @@ import ( ) func TestInterface(t *testing.T) { - for _, test := range database.Tests { - baseDB := memdb.New() - test(t, New(baseDB)) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + baseDB := memdb.New() + test(t, New(baseDB)) + }) } } @@ -27,6 +30,10 @@ func FuzzNewIteratorWithPrefix(f *testing.F) { database.FuzzNewIteratorWithPrefix(f, New(memdb.New())) } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, New(memdb.New())) +} + func TestIterate(t *testing.T) { require := require.New(t) @@ -295,11 +302,13 @@ func TestSetDatabaseClosed(t *testing.T) { func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - baseDB := memdb.New() - db := New(baseDB) - bench(b, db, "versiondb", keys, values) - _ = db.Close() + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("versiondb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + baseDB := memdb.New() + db := New(baseDB) + bench(b, db, keys, values) + _ = db.Close() + }) } } } diff --git a/genesis/aliases.go b/genesis/aliases.go index b12e50d6f885..c6f8b3df2b74 100644 --- a/genesis/aliases.go +++ b/genesis/aliases.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis diff --git a/genesis/bootstrappers.go b/genesis/bootstrappers.go index 4aec33f9c3d9..b3e3a2a24f0c 100644 --- a/genesis/bootstrappers.go +++ b/genesis/bootstrappers.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -36,10 +36,15 @@ type Bootstrapper struct { IP ips.IPDesc `json:"ip"` } +// GetBootstrappers returns all default bootstrappers for the provided network. +func GetBootstrappers(networkID uint32) []Bootstrapper { + networkName := constants.NetworkIDToNetworkName[networkID] + return bootstrappersPerNetwork[networkName] +} + // SampleBootstrappers returns the some beacons this node should connect to func SampleBootstrappers(networkID uint32, count int) []Bootstrapper { - networkName := constants.NetworkIDToNetworkName[networkID] - bootstrappers := bootstrappersPerNetwork[networkName] + bootstrappers := GetBootstrappers(networkID) count = math.Min(count, len(bootstrappers)) s := sampler.NewUniform() diff --git a/genesis/bootstrappers_test.go b/genesis/bootstrappers_test.go index 5b480d9040fc..b4877bae0a3e 100644 --- a/genesis/bootstrappers_test.go +++ b/genesis/bootstrappers_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis diff --git a/genesis/camino_config.go b/genesis/camino_config.go index 0a07fe68ba17..3786ce7b79e5 100644 --- a/genesis/camino_config.go +++ b/genesis/camino_config.go @@ -9,6 +9,7 @@ import ( "fmt" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/hashing" @@ -121,9 +122,11 @@ func (a CaminoAllocation) Unparse(networkID uint32) (UnparsedCaminoAllocation, e return ua, err } -func (a CaminoAllocation) Less(other CaminoAllocation) bool { - return a.XAmount < other.XAmount || - (a.XAmount == other.XAmount && a.AVAXAddr.Less(other.AVAXAddr)) +func (a CaminoAllocation) Compare(other CaminoAllocation) int { + if amountCmp := utils.Compare(a.XAmount, other.XAmount); amountCmp != 0 { + return amountCmp + } + return a.AVAXAddr.Compare(other.AVAXAddr) } type PlatformAllocation struct { diff --git a/genesis/camino_genesis.go b/genesis/camino_genesis.go index ff29648bbb1b..12da7d88e29f 100644 --- a/genesis/camino_genesis.go +++ b/genesis/camino_genesis.go @@ -434,8 +434,8 @@ func buildPGenesis(config *Config, hrp string, xGenesisBytes []byte, xGenesisDat stakingOffset += time.Duration(config.InitialStakeDurationOffset) * time.Second platformvmArgs.Validators = append(platformvmArgs.Validators, - api.PermissionlessValidator{ - Staker: api.Staker{ + api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(startStakingTime.Unix()), EndTime: json.Uint64(endStakingTime.Unix()), NodeID: platformAllocation.NodeID, diff --git a/genesis/config.go b/genesis/config.go index 647deb320676..3f0f52de996f 100644 --- a/genesis/config.go +++ b/genesis/config.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -27,6 +27,7 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" ) var ( @@ -62,15 +63,18 @@ func (a Allocation) Unparse(networkID uint32) (UnparsedAllocation, error) { return ua, err } -func (a Allocation) Less(other Allocation) bool { - return a.InitialAmount < other.InitialAmount || - (a.InitialAmount == other.InitialAmount && a.AVAXAddr.Less(other.AVAXAddr)) +func (a Allocation) Compare(other Allocation) int { + if amountCmp := utils.Compare(a.InitialAmount, other.InitialAmount); amountCmp != 0 { + return amountCmp + } + return a.AVAXAddr.Compare(other.AVAXAddr) } type Staker struct { - NodeID ids.NodeID `json:"nodeID"` - RewardAddress ids.ShortID `json:"rewardAddress"` - DelegationFee uint32 `json:"delegationFee"` + NodeID ids.NodeID `json:"nodeID"` + RewardAddress ids.ShortID `json:"rewardAddress"` + DelegationFee uint32 `json:"delegationFee"` + Signer *signer.ProofOfPossession `json:"signer,omitempty"` } func (s Staker) Unparse(networkID uint32) (UnparsedStaker, error) { @@ -83,6 +87,7 @@ func (s Staker) Unparse(networkID uint32) (UnparsedStaker, error) { NodeID: s.NodeID, RewardAddress: avaxAddr, DelegationFee: s.DelegationFee, + Signer: s.Signer, }, err } diff --git a/genesis/config_test.go b/genesis/config_test.go index 455045dad70b..8a9bc96a9c94 100644 --- a/genesis/config_test.go +++ b/genesis/config_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -11,56 +11,43 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -func TestAllocationLess(t *testing.T) { +func TestAllocationCompare(t *testing.T) { type test struct { name string alloc1 Allocation alloc2 Allocation - expected bool + expected int } tests := []test{ { name: "equal", alloc1: Allocation{}, alloc2: Allocation{}, - expected: false, + expected: 0, }, { - name: "first initial amount smaller", + name: "initial amount smaller", alloc1: Allocation{}, alloc2: Allocation{ InitialAmount: 1, }, - expected: true, + expected: -1, }, { - name: "first initial amount larger", - alloc1: Allocation{ - InitialAmount: 1, - }, - alloc2: Allocation{}, - expected: false, - }, - { - name: "first bytes smaller", + name: "bytes smaller", alloc1: Allocation{}, alloc2: Allocation{ AVAXAddr: ids.ShortID{1}, }, - expected: true, - }, - { - name: "first bytes larger", - alloc1: Allocation{ - AVAXAddr: ids.ShortID{1}, - }, - alloc2: Allocation{}, - expected: false, + expected: -1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.expected, tt.alloc1.Less(tt.alloc2)) + require := require.New(t) + + require.Equal(tt.expected, tt.alloc1.Compare(tt.alloc2)) + require.Equal(-tt.expected, tt.alloc2.Compare(tt.alloc1)) }) } } diff --git a/genesis/genesis.go b/genesis/genesis.go index a13f966a1526..650631c5709d 100644 --- a/genesis/genesis.go +++ b/genesis/genesis.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -444,8 +444,8 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { delegationFee := json.Uint32(staker.DelegationFee) platformvmArgs.Validators = append(platformvmArgs.Validators, - api.PermissionlessValidator{ - Staker: api.Staker{ + api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(genesisTime.Unix()), EndTime: json.Uint64(endStakingTime.Unix()), NodeID: staker.NodeID, @@ -456,6 +456,7 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { }, Staked: utxos, ExactDelegationFee: &delegationFee, + Signer: staker.Signer, }, ) } @@ -577,9 +578,12 @@ func VMGenesis(genesisBytes []byte, vmID ids.ID) (*pchaintxs.Tx, error) { } func AVAXAssetID(avmGenesisBytes []byte) (ids.ID, error) { - parser, err := xchaintxs.NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - }) + parser, err := xchaintxs.NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) if err != nil { return ids.Empty, err } @@ -596,7 +600,7 @@ func AVAXAssetID(avmGenesisBytes []byte) (ids.ID, error) { genesisTx := genesis.Txs[0] tx := xchaintxs.Tx{Unsigned: &genesisTx.CreateAssetTx} - if err := parser.InitializeGenesisTx(&tx); err != nil { + if err := tx.Initialize(genesisCodec); err != nil { return ids.Empty, err } return tx.ID(), nil diff --git a/genesis/genesis_local.go b/genesis/genesis_local.go index eb284d7aa078..948c50d84201 100644 --- a/genesis/genesis_local.go +++ b/genesis/genesis_local.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis diff --git a/genesis/genesis_test.go b/genesis/genesis_test.go index 45755ee6f8c4..b964f33a109e 100644 --- a/genesis/genesis_test.go +++ b/genesis/genesis_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis diff --git a/genesis/params.go b/genesis/params.go index 6a30a3283456..f6f2d8a36e80 100644 --- a/genesis/params.go +++ b/genesis/params.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis diff --git a/genesis/unparsed_config.go b/genesis/unparsed_config.go index a6330701979e..66d0f7e42062 100644 --- a/genesis/unparsed_config.go +++ b/genesis/unparsed_config.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" ) var errInvalidETHAddress = errors.New("invalid eth address") @@ -64,15 +65,17 @@ func (ua UnparsedAllocation) Parse() (Allocation, error) { } type UnparsedStaker struct { - NodeID ids.NodeID `json:"nodeID"` - RewardAddress string `json:"rewardAddress"` - DelegationFee uint32 `json:"delegationFee"` + NodeID ids.NodeID `json:"nodeID"` + RewardAddress string `json:"rewardAddress"` + DelegationFee uint32 `json:"delegationFee"` + Signer *signer.ProofOfPossession `json:"signer,omitempty"` } func (us UnparsedStaker) Parse() (Staker, error) { s := Staker{ NodeID: us.NodeID, DelegationFee: us.DelegationFee, + Signer: us.Signer, } _, _, avaxAddrBytes, err := address.Parse(us.RewardAddress) diff --git a/go.mod b/go.mod index d6d106ecda73..fa94da112281 100644 --- a/go.mod +++ b/go.mod @@ -11,8 +11,8 @@ require ( github.com/DataDog/zstd v1.5.2 github.com/Microsoft/go-winio v0.5.2 github.com/NYTimes/gziphandler v1.1.1 - github.com/ava-labs/coreth v0.12.8-rc.1 - github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7 + github.com/ava-labs/coreth v0.12.11-rc.2 + github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 github.com/btcsuite/btcd/btcutil v1.1.3 github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 @@ -24,7 +24,6 @@ require ( github.com/gorilla/rpc v1.2.0 github.com/gorilla/websocket v1.4.2 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/holiman/bloomfilter/v2 v2.0.3 github.com/huin/goupnp v1.0.3 github.com/jackpal/gateway v1.0.6 github.com/jackpal/go-nat-pmp v1.0.2 @@ -32,22 +31,22 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/mr-tron/base58 v1.2.0 github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d - github.com/onsi/ginkgo/v2 v2.4.0 - github.com/onsi/gomega v1.24.0 + github.com/onsi/ginkgo/v2 v2.13.1 + github.com/onsi/gomega v1.29.0 github.com/pires/go-proxyproto v0.6.2 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.3.0 github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v3.21.11+incompatible - github.com/spaolacci/murmur3 v1.1.0 github.com/spf13/cast v1.5.0 github.com/spf13/cobra v1.0.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.12.0 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.4 github.com/supranational/blst v0.3.11 github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a github.com/thepudds/fzgen v0.4.2 + github.com/tyler-smith/go-bip32 v1.0.0 github.com/xuri/excelize/v2 v2.8.0 go.opentelemetry.io/otel v1.11.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.0 @@ -56,13 +55,13 @@ require ( go.opentelemetry.io/otel/sdk v1.11.0 go.opentelemetry.io/otel/trace v1.11.0 go.uber.org/goleak v1.2.1 - go.uber.org/mock v0.2.0 - go.uber.org/zap v1.24.0 - golang.org/x/crypto v0.14.0 - golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df - golang.org/x/net v0.17.0 - golang.org/x/sync v0.3.0 - golang.org/x/term v0.13.0 + go.uber.org/mock v0.4.0 + go.uber.org/zap v1.26.0 + golang.org/x/crypto v0.17.0 + golang.org/x/exp v0.0.0-20231127185646-65229373498e + golang.org/x/net v0.19.0 + golang.org/x/sync v0.5.0 + golang.org/x/term v0.15.0 golang.org/x/time v0.0.0-20220922220347-f3bd1da661af gonum.org/v1/gonum v0.11.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 @@ -73,8 +72,9 @@ require ( require ( github.com/BurntSushi/toml v1.2.1 // indirect + github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e // indirect + github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec // indirect github.com/VictoriaMetrics/fastcache v1.10.0 // indirect - github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect @@ -92,22 +92,23 @@ require ( github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-stack/stack v1.8.1 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/google/uuid v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e // indirect + github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/klauspost/compress v1.15.15 // indirect @@ -144,14 +145,14 @@ require ( github.com/xuri/efp v0.0.0-20230802181842-ad255f2331ca // indirect github.com/xuri/nfp v0.0.0-20230819163627-dc951e3ffe1a // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - github.com/zondax/hid v0.9.1 // indirect - github.com/zondax/ledger-go v0.14.1 // indirect + github.com/zondax/hid v0.9.2 // indirect + github.com/zondax/ledger-go v0.14.3 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.8.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + go.uber.org/multierr v1.10.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.16.0 // indirect google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect gopkg.in/ini.v1 v1.67.0 // indirect @@ -161,4 +162,4 @@ require ( replace github.com/ava-labs/avalanche-ledger-go => github.com/chain4travel/camino-ledger-go v0.0.13-c4t -replace github.com/ava-labs/coreth => github.com/chain4travel/caminoethvm v1.1.15-rc1.0.20240721114647-ffc063541f3f +replace github.com/ava-labs/coreth => github.com/chain4travel/caminoethvm v1.1.19-rc0 diff --git a/go.sum b/go.sum index 48913e70355d..540f2ca9fcfd 100644 --- a/go.sum +++ b/go.sum @@ -45,6 +45,10 @@ github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3 github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e h1:ahyvB3q25YnZWly5Gq1ekg6jcmWaGj/vG/MhF4aisoc= +github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:kGUqhHd//musdITWjFvNTHn90WG9bMLBEPQZ17Cmlpw= +github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec h1:1Qb69mGp/UtRPn422BH4/Y4Q3SLUrD9KHuDkm8iodFc= +github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec/go.mod h1:CD8UlnlLDiqb36L110uqiP2iSflVjx9g/3U9hCI4q2U= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= @@ -62,11 +66,9 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7 h1:EdxD90j5sClfL5Ngpz2TlnbnkNYdFPDXa0jDOjam65c= -github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7/go.mod h1:XhiXSrh90sHUbkERzaxEftCmUz53eCijshDLZ4fByVM= +github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 h1:mg9Uw6oZFJKytJxgxnl3uxZOs/SB8CVHg6Io4Tf99Zc= +github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34/go.mod h1:pJxaT9bUgeRNVmNRgtCHb7sFDIRKy7CzTQVi8gGNT6g= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -104,8 +106,8 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chain4travel/caminoethvm v1.1.15-rc1.0.20240721114647-ffc063541f3f h1:UXaMNZQQBLT8Fu3l6p8Fktr3NhYBHfjs5ZRi4UauP4I= -github.com/chain4travel/caminoethvm v1.1.15-rc1.0.20240721114647-ffc063541f3f/go.mod h1:aXs2X5y4BVp+fGUk4sR1rk1qHmrtTl6NxTJPhUVw3P0= +github.com/chain4travel/caminoethvm v1.1.19-rc0 h1:HfthNcZLyL9HS2f2Sv529lCvdmlzY6hThRnCtzcXRN0= +github.com/chain4travel/caminoethvm v1.1.19-rc0/go.mod h1:F2be/crCphktEOCKfR4P7r0rV0fppOsFMj0mR3kvVqQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -113,6 +115,8 @@ github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86c github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cmars/basen v0.0.0-20150613233007-fe3947df716e h1:0XBUw73chJ1VYSsfvcPvVT7auykAJce9FpRr10L6Qhw= +github.com/cmars/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:P13beTBKr5Q18lJe1rIoLUqjM+CB1zYrRg44ZqGuQSA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -213,8 +217,8 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= @@ -226,6 +230,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= @@ -291,8 +297,9 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -346,8 +353,6 @@ github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuW github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw= -github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c h1:DZfsyhDK1hnSS5lH8l+JggqzEleHteTYfutAiVlSUM8= @@ -475,16 +480,16 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.13.1 h1:LNGfMbR2OVGBfXjvRZIZ2YCTQdGKtPLvuI1rMCCj3OU= +github.com/onsi/ginkgo/v2 v2.13.1/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= -github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -554,8 +559,6 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= @@ -580,18 +583,19 @@ github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.1.5-0.20170601210322-f6abca593680/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= @@ -606,6 +610,8 @@ github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITn github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tyler-smith/go-bip32 v1.0.0 h1:sDR9juArbUgX+bO/iblgZnMPeWY1KZMUC2AFUJdv5KE= +github.com/tyler-smith/go-bip32 v1.0.0/go.mod h1:onot+eHknzV4BVPwrzqY5OoVpyCvnwD7lMawL5aQupE= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= @@ -645,10 +651,10 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/zondax/hid v0.9.1 h1:gQe66rtmyZ8VeGFcOpbuH3r7erYtNEAezCAYu8LdkJo= -github.com/zondax/hid v0.9.1/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= -github.com/zondax/ledger-go v0.14.1 h1:Pip65OOl4iJ84WTpA4BKChvOufMhhbxED3BaihoZN4c= -github.com/zondax/ledger-go v0.14.1/go.mod h1:fZ3Dqg6qcdXWSOJFKMG8GCTnD7slO/RL2feOQv8K320= +github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= +github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= +github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -674,19 +680,17 @@ go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqe go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= -go.uber.org/mock v0.2.0 h1:TaP3xedm7JaAgScZO7tlvlKrqT0p7I6OsdGB5YNSMDU= -go.uber.org/mock v0.2.0/go.mod h1:J0y0rp9L3xiff1+ZBfKxlC1fz2+aO16tw0tsDOixfuM= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20170613210332-850760c427c5/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -702,8 +706,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -714,8 +718,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME= -golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No= +golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.11.0 h1:ds2RoQvBvYTiJkwpSFDwCcDFNX7DqjL2WsUgTNk0Ooo= @@ -745,6 +749,7 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -794,8 +799,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -819,8 +824,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -893,15 +898,15 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -915,8 +920,8 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -980,6 +985,8 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1138,6 +1145,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/ids/aliases.go b/ids/aliases.go index c7958e1c425e..484c6f8aa4b3 100644 --- a/ids/aliases.go +++ b/ids/aliases.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids diff --git a/ids/aliases_test.go b/ids/aliases_test.go index b25177242ff2..6c77d7443703 100644 --- a/ids/aliases_test.go +++ b/ids/aliases_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids diff --git a/ids/bits.go b/ids/bits.go index a884578f420b..bb3586704c4f 100644 --- a/ids/bits.go +++ b/ids/bits.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids diff --git a/ids/bits_test.go b/ids/bits_test.go index 4b5783dded46..feb381902000 100644 --- a/ids/bits_test.go +++ b/ids/bits_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids diff --git a/ids/galiasreader/alias_reader_client.go b/ids/galiasreader/alias_reader_client.go index aa77f9ec870f..319d7508cbd4 100644 --- a/ids/galiasreader/alias_reader_client.go +++ b/ids/galiasreader/alias_reader_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package galiasreader diff --git a/ids/galiasreader/alias_reader_server.go b/ids/galiasreader/alias_reader_server.go index 48f31bf74e3a..eeb9083ca1e4 100644 --- a/ids/galiasreader/alias_reader_server.go +++ b/ids/galiasreader/alias_reader_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package galiasreader diff --git a/ids/galiasreader/alias_reader_test.go b/ids/galiasreader/alias_reader_test.go index f268d10fc268..899c13a24ed2 100644 --- a/ids/galiasreader/alias_reader_test.go +++ b/ids/galiasreader/alias_reader_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package galiasreader diff --git a/ids/id.go b/ids/id.go index 68148018b078..6cda4b6ec854 100644 --- a/ids/id.go +++ b/ids/id.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids @@ -51,7 +51,7 @@ func (id ID) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } - return []byte("\"" + str + "\""), nil + return []byte(`"` + str + `"`), nil } func (id *ID) UnmarshalJSON(b []byte) error { @@ -145,6 +145,6 @@ func (id ID) MarshalText() ([]byte, error) { return []byte(id.String()), nil } -func (id ID) Less(other ID) bool { - return bytes.Compare(id[:], other[:]) < 0 +func (id ID) Compare(other ID) int { + return bytes.Compare(id[:], other[:]) } diff --git a/ids/id_test.go b/ids/id_test.go index 3424b17633e6..930a323e614c 100644 --- a/ids/id_test.go +++ b/ids/id_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids import ( "encoding/json" + "fmt" "testing" "github.com/stretchr/testify/require" @@ -101,11 +102,16 @@ func TestIDMarshalJSON(t *testing.T) { out []byte err error }{ - {"ID{}", ID{}, []byte("\"11111111111111111111111111111111LpoYY\""), nil}, { - "ID(\"ava labs\")", + "ID{}", + ID{}, + []byte(`"11111111111111111111111111111111LpoYY"`), + nil, + }, + { + `ID("ava labs")`, ID{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}, - []byte("\"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7\""), + []byte(`"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7"`), nil, }, } @@ -127,10 +133,15 @@ func TestIDUnmarshalJSON(t *testing.T) { out ID err error }{ - {"ID{}", []byte("null"), ID{}, nil}, { - "ID(\"ava labs\")", - []byte("\"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7\""), + "ID{}", + []byte("null"), + ID{}, + nil, + }, + { + `ID("ava labs")`, + []byte(`"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7"`), ID{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}, nil, }, @@ -200,26 +211,34 @@ func TestIDMapMarshalling(t *testing.T) { require.Equal(originalMap, unmarshalledMap) } -func TestIDLess(t *testing.T) { - require := require.New(t) +func TestIDCompare(t *testing.T) { + tests := []struct { + a ID + b ID + expected int + }{ + { + a: ID{1}, + b: ID{0}, + expected: 1, + }, + { + a: ID{1}, + b: ID{1}, + expected: 0, + }, + { + a: ID{1, 0}, + b: ID{1, 2}, + expected: -1, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%s_%s_%d", test.a, test.b, test.expected), func(t *testing.T) { + require := require.New(t) - id1 := ID{} - id2 := ID{} - require.False(id1.Less(id2)) - require.False(id2.Less(id1)) - - id1 = ID{1} - id2 = ID{0} - require.False(id1.Less(id2)) - require.True(id2.Less(id1)) - - id1 = ID{1} - id2 = ID{1} - require.False(id1.Less(id2)) - require.False(id2.Less(id1)) - - id1 = ID{1, 0} - id2 = ID{1, 2} - require.True(id1.Less(id2)) - require.False(id2.Less(id1)) + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) + } } diff --git a/ids/node_id.go b/ids/node_id.go index 7e4ad693764e..f4aaf867d6c0 100644 --- a/ids/node_id.go +++ b/ids/node_id.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids @@ -35,7 +35,7 @@ func (id NodeID) Bytes() []byte { } func (id NodeID) MarshalJSON() ([]byte, error) { - return []byte("\"" + id.String() + "\""), nil + return []byte(`"` + id.String() + `"`), nil } func (id NodeID) MarshalText() ([]byte, error) { @@ -64,8 +64,8 @@ func (id *NodeID) UnmarshalText(text []byte) error { return id.UnmarshalJSON(text) } -func (id NodeID) Less(other NodeID) bool { - return bytes.Compare(id[:], other[:]) == -1 +func (id NodeID) Compare(other NodeID) int { + return bytes.Compare(id[:], other[:]) } // ToNodeID attempt to convert a byte slice into a node id diff --git a/ids/node_id_test.go b/ids/node_id_test.go index b92fb6e19053..2c94450f5b66 100644 --- a/ids/node_id_test.go +++ b/ids/node_id_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids import ( "encoding/json" + "fmt" "testing" "github.com/stretchr/testify/require" @@ -67,11 +68,16 @@ func TestNodeIDMarshalJSON(t *testing.T) { out []byte err error }{ - {"NodeID{}", NodeID{}, []byte("\"NodeID-111111111111111111116DBWJs\""), nil}, { - "ID(\"ava labs\")", + "NodeID{}", + NodeID{}, + []byte(`"NodeID-111111111111111111116DBWJs"`), + nil, + }, + { + `ID("ava labs")`, NodeID{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}, - []byte("\"NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz\""), + []byte(`"NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz"`), nil, }, } @@ -93,40 +99,45 @@ func TestNodeIDUnmarshalJSON(t *testing.T) { out NodeID expectedErr error }{ - {"NodeID{}", []byte("null"), NodeID{}, nil}, { - "NodeID(\"ava labs\")", - []byte("\"NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz\""), + "NodeID{}", + []byte("null"), + NodeID{}, + nil, + }, + { + `NodeID("ava labs")`, + []byte(`"NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz"`), NodeID{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}, nil, }, { "missing start quote", - []byte("NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz\""), + []byte(`NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz"`), NodeID{}, errMissingQuotes, }, { "missing end quote", - []byte("\"NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz"), + []byte(`"NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz`), NodeID{}, errMissingQuotes, }, { "NodeID-", - []byte("\"NodeID-\""), + []byte(`"NodeID-"`), NodeID{}, errShortNodeID, }, { "NodeID-1", - []byte("\"NodeID-1\""), + []byte(`"NodeID-1"`), NodeID{}, cb58.ErrMissingChecksum, }, { "NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz1", - []byte("\"NodeID-1\""), + []byte(`"NodeID-1"`), NodeID{}, cb58.ErrMissingChecksum, }, @@ -174,26 +185,34 @@ func TestNodeIDMapMarshalling(t *testing.T) { require.Equal(originalMap, unmarshalledMap) } -func TestNodeIDLess(t *testing.T) { - require := require.New(t) +func TestNodeIDCompare(t *testing.T) { + tests := []struct { + a NodeID + b NodeID + expected int + }{ + { + a: NodeID{1}, + b: NodeID{0}, + expected: 1, + }, + { + a: NodeID{1}, + b: NodeID{1}, + expected: 0, + }, + { + a: NodeID{1, 0}, + b: NodeID{1, 2}, + expected: -1, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%s_%s_%d", test.a, test.b, test.expected), func(t *testing.T) { + require := require.New(t) - id1 := NodeID{} - id2 := NodeID{} - require.False(id1.Less(id2)) - require.False(id2.Less(id1)) - - id1 = NodeID{1} - id2 = NodeID{} - require.False(id1.Less(id2)) - require.True(id2.Less(id1)) - - id1 = NodeID{1} - id2 = NodeID{1} - require.False(id1.Less(id2)) - require.False(id2.Less(id1)) - - id1 = NodeID{1} - id2 = NodeID{1, 2} - require.True(id1.Less(id2)) - require.False(id2.Less(id1)) + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) + } } diff --git a/ids/request_id.go b/ids/request_id.go index 779f819d9d4e..e1d9459866f7 100644 --- a/ids/request_id.go +++ b/ids/request_id.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids diff --git a/ids/short.go b/ids/short.go index 25b96f1755b0..7c01dca4785a 100644 --- a/ids/short.go +++ b/ids/short.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids @@ -54,7 +54,7 @@ func (id ShortID) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } - return []byte("\"" + str + "\""), nil + return []byte(`"` + str + `"`), nil } func (id *ShortID) UnmarshalJSON(b []byte) error { @@ -110,8 +110,8 @@ func (id ShortID) MarshalText() ([]byte, error) { return []byte(id.String()), nil } -func (id ShortID) Less(other ShortID) bool { - return bytes.Compare(id[:], other[:]) == -1 +func (id ShortID) Compare(other ShortID) int { + return bytes.Compare(id[:], other[:]) } // ShortIDsToStrings converts an array of shortIDs to an array of their string diff --git a/ids/test_aliases.go b/ids/test_aliases.go index 7e2b4fb9790e..ce9991f5f737 100644 --- a/ids/test_aliases.go +++ b/ids/test_aliases.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids diff --git a/ids/test_generator.go b/ids/test_generator.go index 2f8edf3567d8..2df95ec0499f 100644 --- a/ids/test_generator.go +++ b/ids/test_generator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids @@ -23,3 +23,12 @@ func GenerateTestShortID() ShortID { func GenerateTestNodeID() NodeID { return NodeID(GenerateTestShortID()) } + +// BuildTestNodeID is an utility to build NodeID from bytes in UTs +// It must not be used in production code. In production code we should +// use ToNodeID, which performs proper length checking. +func BuildTestNodeID(src []byte) NodeID { + res := NodeID{} + copy(res[:], src) + return res +} diff --git a/indexer/client.go b/indexer/client.go index 785018e3072c..821059a1d6c0 100644 --- a/indexer/client.go +++ b/indexer/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer diff --git a/indexer/client_test.go b/indexer/client_test.go index 004c8eeb283d..95124a2129ab 100644 --- a/indexer/client_test.go +++ b/indexer/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer diff --git a/indexer/codec.go b/indexer/codec.go new file mode 100644 index 000000000000..afde47502ac3 --- /dev/null +++ b/indexer/codec.go @@ -0,0 +1,25 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package indexer + +import ( + "math" + "time" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" +) + +const CodecVersion = 0 + +var Codec codec.Manager + +func init() { + lc := linearcodec.NewDefault(time.Time{}) + Codec = codec.NewManager(math.MaxInt) + + if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { + panic(err) + } +} diff --git a/indexer/container.go b/indexer/container.go index c640fdd96d19..2bbb68e5db6f 100644 --- a/indexer/container.go +++ b/indexer/container.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer diff --git a/indexer/examples/p-chain/main.go b/indexer/examples/p-chain/main.go index fb9ec2d6f2f6..80f0e1aeed8e 100644 --- a/indexer/examples/p-chain/main.go +++ b/indexer/examples/p-chain/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main @@ -10,6 +10,7 @@ import ( "time" "github.com/ava-labs/avalanchego/indexer" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/wallet/subnet/primary" platformvmblock "github.com/ava-labs/avalanchego/vms/platformvm/block" @@ -34,7 +35,7 @@ func main() { } platformvmBlockBytes := container.Bytes - proposerVMBlock, err := proposervmblock.Parse(container.Bytes) + proposerVMBlock, err := proposervmblock.Parse(container.Bytes, version.DefaultUpgradeTime) if err == nil { platformvmBlockBytes = proposerVMBlock.Block() } diff --git a/indexer/examples/x-chain-blocks/main.go b/indexer/examples/x-chain-blocks/main.go index a995f9612bbd..e25693b62998 100644 --- a/indexer/examples/x-chain-blocks/main.go +++ b/indexer/examples/x-chain-blocks/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main @@ -10,6 +10,7 @@ import ( "time" "github.com/ava-labs/avalanchego/indexer" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/proposervm/block" "github.com/ava-labs/avalanchego/wallet/chain/x" "github.com/ava-labs/avalanchego/wallet/subnet/primary" @@ -32,7 +33,7 @@ func main() { continue } - proposerVMBlock, err := block.Parse(container.Bytes) + proposerVMBlock, err := block.Parse(container.Bytes, version.DefaultUpgradeTime) if err != nil { log.Fatalf("failed to parse proposervm block: %s\n", err) } diff --git a/indexer/index.go b/indexer/index.go index 5361754bf73d..86d75b37d4cb 100644 --- a/indexer/index.go +++ b/indexer/index.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -6,12 +6,10 @@ package indexer import ( "errors" "fmt" - "io" "sync" "go.uber.org/zap" - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" @@ -36,26 +34,15 @@ var ( errNumToFetchInvalid = fmt.Errorf("numToFetch must be in [1,%d]", MaxFetchedByRange) errNoContainerAtIndex = errors.New("no container at index") - _ Index = (*index)(nil) + _ snow.Acceptor = (*index)(nil) ) -// Index indexes containers in their order of acceptance -// Index is thread-safe. -// Index assumes that Accept is called before the container is committed to the -// database of the VM that the container exists in. -type Index interface { - snow.Acceptor - GetContainerByIndex(index uint64) (Container, error) - GetContainerRange(startIndex uint64, numToFetch uint64) ([]Container, error) - GetLastAccepted() (Container, error) - GetIndex(id ids.ID) (uint64, error) - GetContainerByID(id ids.ID) (Container, error) - io.Closer -} - -// indexer indexes all accepted transactions by the order in which they were accepted +// index indexes containers in their order of acceptance +// +// Invariant: index is thread-safe. +// Invariant: index assumes that Accept is called, before the container is +// committed to the database of the VM, in the order they were accepted. type index struct { - codec codec.Manager clock mockable.Clock lock sync.RWMutex // The index of the next accepted transaction @@ -71,21 +58,20 @@ type index struct { log logging.Logger } -// Returns a new, thread-safe Index. -// Closes [baseDB] on close. +// Create a new thread-safe index. +// +// Invariant: Closes [baseDB] on close. func newIndex( baseDB database.Database, log logging.Logger, - codec codec.Manager, clock mockable.Clock, -) (Index, error) { +) (*index, error) { vDB := versiondb.New(baseDB) indexToContainer := prefixdb.New(indexToContainerPrefix, vDB) containerToIndex := prefixdb.New(containerToIDPrefix, vDB) i := &index{ clock: clock, - codec: codec, baseDB: baseDB, vDB: vDB, indexToContainer: indexToContainer, @@ -150,7 +136,7 @@ func (i *index) Accept(ctx *snow.ConsensusContext, containerID ids.ID, container ) // Persist index --> Container nextAcceptedIndexBytes := database.PackUInt64(i.nextAcceptedIndex) - bytes, err := i.codec.Marshal(codecVersion, Container{ + bytes, err := Codec.Marshal(CodecVersion, Container{ ID: containerID, Bytes: containerBytes, Timestamp: i.clock.Time().UnixNano(), @@ -209,7 +195,7 @@ func (i *index) getContainerByIndexBytes(indexBytes []byte) (Container, error) { return Container{}, fmt.Errorf("couldn't read from database: %w", err) } var container Container - if _, err := i.codec.Unmarshal(containerBytes, &container); err != nil { + if _, err := Codec.Unmarshal(containerBytes, &container); err != nil { return Container{}, fmt.Errorf("couldn't unmarshal container: %w", err) } return container, nil diff --git a/indexer/index_test.go b/indexer/index_test.go index f31117c77b71..127aa64bda69 100644 --- a/indexer/index_test.go +++ b/indexer/index_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -8,12 +8,10 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" @@ -24,15 +22,13 @@ func TestIndex(t *testing.T) { // Setup pageSize := uint64(64) require := require.New(t) - codec := codec.NewDefaultManager() - require.NoError(codec.RegisterCodec(codecVersion, linearcodec.NewDefault())) baseDB := memdb.New() db := versiondb.New(baseDB) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) - indexIntf, err := newIndex(db, logging.NoLog{}, codec, mockable.Clock{}) + idx, err := newIndex(db, logging.NoLog{}, mockable.Clock{}) require.NoError(err) - idx := indexIntf.(*index) // Populate "containers" with random IDs/bytes containers := map[ids.ID][]byte{} @@ -83,9 +79,8 @@ func TestIndex(t *testing.T) { require.NoError(db.Commit()) require.NoError(idx.Close()) db = versiondb.New(baseDB) - indexIntf, err = newIndex(db, logging.NoLog{}, codec, mockable.Clock{}) + idx, err = newIndex(db, logging.NoLog{}, mockable.Clock{}) require.NoError(err) - idx = indexIntf.(*index) // Get all of the containers containersList, err := idx.GetContainerRange(0, pageSize) @@ -113,13 +108,11 @@ func TestIndex(t *testing.T) { func TestIndexGetContainerByRangeMaxPageSize(t *testing.T) { // Setup require := require.New(t) - codec := codec.NewDefaultManager() - require.NoError(codec.RegisterCodec(codecVersion, linearcodec.NewDefault())) db := memdb.New() - ctx := snow.DefaultConsensusContextTest() - indexIntf, err := newIndex(db, logging.NoLog{}, codec, mockable.Clock{}) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + idx, err := newIndex(db, logging.NoLog{}, mockable.Clock{}) require.NoError(err) - idx := indexIntf.(*index) // Insert [MaxFetchedByRange] + 1 containers for i := uint64(0); i < MaxFetchedByRange+1; i++ { @@ -153,11 +146,10 @@ func TestIndexGetContainerByRangeMaxPageSize(t *testing.T) { func TestDontIndexSameContainerTwice(t *testing.T) { // Setup require := require.New(t) - codec := codec.NewDefaultManager() - require.NoError(codec.RegisterCodec(codecVersion, linearcodec.NewDefault())) db := memdb.New() - ctx := snow.DefaultConsensusContextTest() - idx, err := newIndex(db, logging.NoLog{}, codec, mockable.Clock{}) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + idx, err := newIndex(db, logging.NoLog{}, mockable.Clock{}) require.NoError(err) // Accept the same container twice diff --git a/indexer/indexer.go b/indexer/indexer.go index e6080c104c2a..6b16f8245ebc 100644 --- a/indexer/indexer.go +++ b/indexer/indexer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -6,7 +6,6 @@ package indexer import ( "fmt" "io" - "math" "sync" "github.com/gorilla/rpc/v2" @@ -15,8 +14,6 @@ import ( "github.com/ava-labs/avalanchego/api/server" "github.com/ava-labs/avalanchego/chains" - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" @@ -32,26 +29,18 @@ import ( ) const ( - indexNamePrefix = "index-" - codecVersion = uint16(0) - // Max size, in bytes, of something serialized by this indexer - // Assumes no containers are larger than math.MaxUint32 - // wrappers.IntLen accounts for the size of the container bytes - // wrappers.LongLen accounts for the timestamp of the container - // ids.IDLen accounts for the container ID - // wrappers.ShortLen accounts for the codec version - codecMaxSize = int(constants.DefaultMaxMessageSize) + wrappers.IntLen + wrappers.LongLen + ids.IDLen + wrappers.ShortLen + indexNamePrefix = "index-" + txPrefix = 0x01 + vtxPrefix = 0x02 + blockPrefix = 0x03 + isIncompletePrefix = 0x04 + previouslyIndexedPrefix = 0x05 ) var ( - txPrefix = byte(0x01) - vtxPrefix = byte(0x02) - blockPrefix = byte(0x03) - isIncompletePrefix = byte(0x04) - previouslyIndexedPrefix = byte(0x05) - hasRunKey = []byte{0x07} - _ Indexer = (*indexer)(nil) + + hasRunKey = []byte{0x07} ) // Config for an indexer @@ -80,7 +69,6 @@ type Indexer interface { // NewIndexer returns a new Indexer and registers a new endpoint on the given API server. func NewIndexer(config Config) (Indexer, error) { indexer := &indexer{ - codec: codec.NewManager(codecMaxSize), log: config.Log, db: config.DB, allowIncompleteIndex: config.AllowIncompleteIndex, @@ -88,19 +76,13 @@ func NewIndexer(config Config) (Indexer, error) { blockAcceptorGroup: config.BlockAcceptorGroup, txAcceptorGroup: config.TxAcceptorGroup, vertexAcceptorGroup: config.VertexAcceptorGroup, - txIndices: map[ids.ID]Index{}, - vtxIndices: map[ids.ID]Index{}, - blockIndices: map[ids.ID]Index{}, + txIndices: map[ids.ID]*index{}, + vtxIndices: map[ids.ID]*index{}, + blockIndices: map[ids.ID]*index{}, pathAdder: config.APIServer, shutdownF: config.ShutdownF, } - if err := indexer.codec.RegisterCodec( - codecVersion, - linearcodec.NewCustomMaxLength(math.MaxUint32), - ); err != nil { - return nil, fmt.Errorf("couldn't register codec: %w", err) - } hasRun, err := indexer.hasRun() if err != nil { return nil, err @@ -110,7 +92,6 @@ func NewIndexer(config Config) (Indexer, error) { } type indexer struct { - codec codec.Manager clock mockable.Clock lock sync.RWMutex log logging.Logger @@ -134,11 +115,11 @@ type indexer struct { indexingEnabled bool // Chain ID --> index of blocks of that chain (if applicable) - blockIndices map[ids.ID]Index + blockIndices map[ids.ID]*index // Chain ID --> index of vertices of that chain (if applicable) - vtxIndices map[ids.ID]Index + vtxIndices map[ids.ID]*index // Chain ID --> index of txs of that chain (if applicable) - txIndices map[ids.ID]Index + txIndices map[ids.ID]*index // Notifies of newly accepted blocks blockAcceptorGroup snow.AcceptorGroup @@ -331,12 +312,12 @@ func (i *indexer) registerChainHelper( prefixEnd byte, name, endpoint string, acceptorGroup snow.AcceptorGroup, -) (Index, error) { +) (*index, error) { prefix := make([]byte, ids.IDLen+wrappers.ByteLen) copy(prefix, chainID[:]) prefix[ids.IDLen] = prefixEnd indexDB := prefixdb.New(prefix, i.db) - index, err := newIndex(indexDB, i.log, i.codec, i.clock) + index, err := newIndex(indexDB, i.log, i.clock) if err != nil { _ = indexDB.Close() return nil, err @@ -353,7 +334,7 @@ func (i *indexer) registerChainHelper( codec := json.NewCodec() apiServer.RegisterCodec(codec, "application/json") apiServer.RegisterCodec(codec, "application/json;charset=UTF-8") - if err := apiServer.RegisterService(&service{Index: index}, "index"); err != nil { + if err := apiServer.RegisterService(&service{index: index}, "index"); err != nil { _ = index.Close() return nil, err } diff --git a/indexer/indexer_test.go b/indexer/indexer_test.go index a8c2f474f743..348aa87d13a9 100644 --- a/indexer/indexer_test.go +++ b/indexer/indexer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -20,7 +20,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -67,7 +68,6 @@ func TestNewIndexer(t *testing.T) { require.NoError(err) require.IsType(&indexer{}, idxrIntf) idxr := idxrIntf.(*indexer) - require.NotNil(idxr.codec) require.NotNil(idxr.log) require.NotNil(idxr.db) require.False(idxr.closed) @@ -153,8 +153,8 @@ func TestIndexer(t *testing.T) { idxr.clock.Set(now) // Assert state is right - chain1Ctx := snow.DefaultConsensusContextTest() - chain1Ctx.ChainID = ids.GenerateTestID() + snow1Ctx := snowtest.Context(t, snowtest.CChainID) + chain1Ctx := snowtest.ConsensusContext(snow1Ctx) isIncomplete, err := idxr.isIncomplete(chain1Ctx.ChainID) require.NoError(err) require.False(isIncomplete) @@ -163,7 +163,7 @@ func TestIndexer(t *testing.T) { require.False(previouslyIndexed) // Register this chain, creating a new index - chainVM := mocks.NewMockChainVM(ctrl) + chainVM := block.NewMockChainVM(ctrl) idxr.RegisterChain("chain1", chain1Ctx, chainVM) isIncomplete, err = idxr.isIncomplete(chain1Ctx.ChainID) require.NoError(err) @@ -258,8 +258,8 @@ func TestIndexer(t *testing.T) { require.Contains(server.endpoints, "/block") // Register a DAG chain - chain2Ctx := snow.DefaultConsensusContextTest() - chain2Ctx.ChainID = ids.GenerateTestID() + snow2Ctx := snowtest.Context(t, snowtest.XChainID) + chain2Ctx := snowtest.ConsensusContext(snow2Ctx) isIncomplete, err = idxr.isIncomplete(chain2Ctx.ChainID) require.NoError(err) require.False(isIncomplete) @@ -418,15 +418,15 @@ func TestIncompleteIndex(t *testing.T) { require.False(idxr.indexingEnabled) // Register a chain - chain1Ctx := snow.DefaultConsensusContextTest() - chain1Ctx.ChainID = ids.GenerateTestID() + snow1Ctx := snowtest.Context(t, snowtest.CChainID) + chain1Ctx := snowtest.ConsensusContext(snow1Ctx) isIncomplete, err := idxr.isIncomplete(chain1Ctx.ChainID) require.NoError(err) require.False(isIncomplete) previouslyIndexed, err := idxr.previouslyIndexed(chain1Ctx.ChainID) require.NoError(err) require.False(previouslyIndexed) - chainVM := mocks.NewMockChainVM(ctrl) + chainVM := block.NewMockChainVM(ctrl) idxr.RegisterChain("chain1", chain1Ctx, chainVM) isIncomplete, err = idxr.isIncomplete(chain1Ctx.ChainID) require.NoError(err) @@ -500,13 +500,14 @@ func TestIgnoreNonDefaultChains(t *testing.T) { require.IsType(&indexer{}, idxrIntf) idxr := idxrIntf.(*indexer) - // Assert state is right - chain1Ctx := snow.DefaultConsensusContextTest() - chain1Ctx.ChainID = ids.GenerateTestID() - chain1Ctx.SubnetID = ids.GenerateTestID() + // Create chain1Ctx for a random subnet + chain. + chain1Ctx := snowtest.ConsensusContext(&snow.Context{ + ChainID: ids.GenerateTestID(), + SubnetID: ids.GenerateTestID(), + }) // RegisterChain should return without adding an index for this chain - chainVM := mocks.NewMockChainVM(ctrl) + chainVM := block.NewMockChainVM(ctrl) idxr.RegisterChain("chain1", chain1Ctx, chainVM) require.Empty(idxr.blockIndices) } diff --git a/indexer/service.go b/indexer/service.go index 98bc91e90787..83f9912f2fea 100644 --- a/indexer/service.go +++ b/indexer/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -15,7 +15,7 @@ import ( ) type service struct { - Index + index *index } type FormattedContainer struct { @@ -46,11 +46,11 @@ type GetLastAcceptedArgs struct { } func (s *service) GetLastAccepted(_ *http.Request, args *GetLastAcceptedArgs, reply *FormattedContainer) error { - container, err := s.Index.GetLastAccepted() + container, err := s.index.GetLastAccepted() if err != nil { return err } - index, err := s.Index.GetIndex(container.ID) + index, err := s.index.GetIndex(container.ID) if err != nil { return fmt.Errorf("couldn't get index: %w", err) } @@ -64,11 +64,11 @@ type GetContainerByIndexArgs struct { } func (s *service) GetContainerByIndex(_ *http.Request, args *GetContainerByIndexArgs, reply *FormattedContainer) error { - container, err := s.Index.GetContainerByIndex(uint64(args.Index)) + container, err := s.index.GetContainerByIndex(uint64(args.Index)) if err != nil { return err } - index, err := s.Index.GetIndex(container.ID) + index, err := s.index.GetIndex(container.ID) if err != nil { return fmt.Errorf("couldn't get index: %w", err) } @@ -92,14 +92,14 @@ type GetContainerRangeResponse struct { // If [n] > [MaxFetchedByRange], returns an error. // If we run out of transactions, returns the ones fetched before running out. func (s *service) GetContainerRange(_ *http.Request, args *GetContainerRangeArgs, reply *GetContainerRangeResponse) error { - containers, err := s.Index.GetContainerRange(uint64(args.StartIndex), uint64(args.NumToFetch)) + containers, err := s.index.GetContainerRange(uint64(args.StartIndex), uint64(args.NumToFetch)) if err != nil { return err } reply.Containers = make([]FormattedContainer, len(containers)) for i, container := range containers { - index, err := s.Index.GetIndex(container.ID) + index, err := s.index.GetIndex(container.ID) if err != nil { return fmt.Errorf("couldn't get index: %w", err) } @@ -120,7 +120,7 @@ type GetIndexResponse struct { } func (s *service) GetIndex(_ *http.Request, args *GetIndexArgs, reply *GetIndexResponse) error { - index, err := s.Index.GetIndex(args.ID) + index, err := s.index.GetIndex(args.ID) reply.Index = json.Uint64(index) return err } @@ -134,7 +134,7 @@ type IsAcceptedResponse struct { } func (s *service) IsAccepted(_ *http.Request, args *IsAcceptedArgs, reply *IsAcceptedResponse) error { - _, err := s.Index.GetIndex(args.ID) + _, err := s.index.GetIndex(args.ID) if err == nil { reply.IsAccepted = true return nil @@ -152,11 +152,11 @@ type GetContainerByIDArgs struct { } func (s *service) GetContainerByID(_ *http.Request, args *GetContainerByIDArgs, reply *FormattedContainer) error { - container, err := s.Index.GetContainerByID(args.ID) + container, err := s.index.GetContainerByID(args.ID) if err != nil { return err } - index, err := s.Index.GetIndex(container.ID) + index, err := s.index.GetIndex(container.ID) if err != nil { return fmt.Errorf("couldn't get index: %w", err) } diff --git a/ipcs/chainipc.go b/ipcs/chainipc.go index 56d4393360ff..bb78cc175327 100644 --- a/ipcs/chainipc.go +++ b/ipcs/chainipc.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ipcs diff --git a/ipcs/eventsocket.go b/ipcs/eventsocket.go index 109b42bb34af..0dbbe1c92e5a 100644 --- a/ipcs/eventsocket.go +++ b/ipcs/eventsocket.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ipcs diff --git a/ipcs/socket/socket.go b/ipcs/socket/socket.go index d3ca391dd019..77f2d6fdb8e2 100644 --- a/ipcs/socket/socket.go +++ b/ipcs/socket/socket.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package socket diff --git a/ipcs/socket/socket_test.go b/ipcs/socket/socket_test.go index 4204d032285a..a2c1ec638754 100644 --- a/ipcs/socket/socket_test.go +++ b/ipcs/socket/socket_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package socket @@ -8,6 +8,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/logging" ) func TestSocketSendAndReceive(t *testing.T) { @@ -21,7 +23,7 @@ func TestSocketSendAndReceive(t *testing.T) { ) // Create socket and client; wait for client to connect - socket := NewSocket(socketName, nil) + socket := NewSocket(socketName, logging.NoLog{}) socket.accept, connCh = newTestAcceptFn(t) require.NoError(socket.Listen()) diff --git a/ipcs/socket/socket_unix.go b/ipcs/socket/socket_unix.go index 98f4ad492330..14d5aabde4e3 100644 --- a/ipcs/socket/socket_unix.go +++ b/ipcs/socket/socket_unix.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build !windows && !plan9 && !js diff --git a/ipcs/socket/socket_windows.go b/ipcs/socket/socket_windows.go index eb54ccf07066..99590cb674b4 100644 --- a/ipcs/socket/socket_windows.go +++ b/ipcs/socket/socket_windows.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build windows diff --git a/main/main.go b/main/main.go index 5d85530177dd..549ef65f48cf 100644 --- a/main/main.go +++ b/main/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main @@ -41,11 +41,16 @@ func main() { os.Exit(1) } - nodeApp := app.New(nodeConfig) // Create node wrapper if term.IsTerminal(int(os.Stdout.Fd())) { fmt.Println(app.Header) } + nodeApp, err := app.New(nodeConfig) + if err != nil { + fmt.Printf("couldn't start node: %s\n", err) + os.Exit(1) + } + exitCode := app.Run(nodeApp) os.Exit(exitCode) } diff --git a/message/creator.go b/message/creator.go index f1a6def2b21e..8040bccb1861 100644 --- a/message/creator.go +++ b/message/creator.go @@ -1,16 +1,16 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message import ( - "fmt" "time" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/metric" ) var _ Creator = (*creator)(nil) @@ -32,7 +32,7 @@ func NewCreator( compressionType compression.Type, maxMessageTimeout time.Duration, ) (Creator, error) { - namespace := fmt.Sprintf("%s_codec", parentNamespace) + namespace := metric.AppendNamespace(parentNamespace, "codec") builder, err := newMsgBuilder( log, namespace, diff --git a/message/fields.go b/message/fields.go index 87bffe518abd..08e744fab911 100644 --- a/message/fields.go +++ b/message/fields.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message diff --git a/message/inbound_msg_builder.go b/message/inbound_msg_builder.go index a287ade2d8cb..b32dbc5d480d 100644 --- a/message/inbound_msg_builder.go +++ b/message/inbound_msg_builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -284,6 +284,26 @@ func InboundAppRequest( } } +func InboundAppError( + nodeID ids.NodeID, + chainID ids.ID, + requestID uint32, + errorCode int32, + errorMessage string, +) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: AppErrorOp, + message: &p2p.AppError{ + ChainId: chainID[:], + RequestId: requestID, + ErrorCode: errorCode, + ErrorMessage: errorMessage, + }, + expiration: mockable.MaxTime, + } +} + func InboundAppResponse( chainID ids.ID, requestID uint32, @@ -304,7 +324,7 @@ func InboundAppResponse( func encodeIDs(ids []ids.ID, result [][]byte) { for i, id := range ids { - copy := id - result[i] = copy[:] + id := id + result[i] = id[:] } } diff --git a/message/inbound_msg_builder_test.go b/message/inbound_msg_builder_test.go index c9642c57250c..b14b3ddedab7 100644 --- a/message/inbound_msg_builder_test.go +++ b/message/inbound_msg_builder_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) @@ -396,3 +397,46 @@ func TestInboundMsgBuilder(t *testing.T) { }, ) } + +func TestAppError(t *testing.T) { + require := require.New(t) + + mb, err := newMsgBuilder( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + time.Second, + ) + require.NoError(err) + + nodeID := ids.GenerateTestNodeID() + chainID := ids.GenerateTestID() + requestID := uint32(1) + errorCode := int32(2) + errorMessage := "hello world" + + want := &p2p.Message{ + Message: &p2p.Message_AppError{ + AppError: &p2p.AppError{ + ChainId: chainID[:], + RequestId: requestID, + ErrorCode: errorCode, + ErrorMessage: errorMessage, + }, + }, + } + + outMsg, err := mb.createOutbound(want, compression.TypeNone, false) + require.NoError(err) + + got, err := mb.parseInbound(outMsg.Bytes(), nodeID, func() {}) + require.NoError(err) + + require.Equal(nodeID, got.NodeID()) + require.Equal(AppErrorOp, got.Op()) + + msg, ok := got.Message().(*p2p.AppError) + require.True(ok) + require.Equal(errorCode, msg.ErrorCode) + require.Equal(errorMessage, msg.ErrorMessage) +} diff --git a/message/internal_msg_builder.go b/message/internal_msg_builder.go index 46e7b7e78405..38a95cb78a8c 100644 --- a/message/internal_msg_builder.go +++ b/message/internal_msg_builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //nolint:stylecheck // proto generates interfaces that fail linting @@ -52,10 +52,6 @@ var ( _ requestIDGetter = (*QueryFailed)(nil) _ engineTypeGetter = (*QueryFailed)(nil) - _ fmt.Stringer = (*AppRequestFailed)(nil) - _ chainIDGetter = (*AppRequestFailed)(nil) - _ requestIDGetter = (*AppRequestFailed)(nil) - _ fmt.Stringer = (*CrossChainAppRequest)(nil) _ sourceChainIDGetter = (*CrossChainAppRequest)(nil) _ chainIDGetter = (*CrossChainAppRequest)(nil) @@ -365,42 +361,6 @@ func InternalQueryFailed( } } -type AppRequestFailed struct { - ChainID ids.ID `json:"chain_id,omitempty"` - RequestID uint32 `json:"request_id,omitempty"` -} - -func (m *AppRequestFailed) String() string { - return fmt.Sprintf( - "ChainID: %s RequestID: %d", - m.ChainID, m.RequestID, - ) -} - -func (m *AppRequestFailed) GetChainId() []byte { - return m.ChainID[:] -} - -func (m *AppRequestFailed) GetRequestId() uint32 { - return m.RequestID -} - -func InternalAppRequestFailed( - nodeID ids.NodeID, - chainID ids.ID, - requestID uint32, -) InboundMessage { - return &inboundMessage{ - nodeID: nodeID, - op: AppRequestFailedOp, - message: &AppRequestFailed{ - ChainID: chainID, - RequestID: requestID, - }, - expiration: mockable.MaxTime, - } -} - type CrossChainAppRequest struct { SourceChainID ids.ID `json:"source_chain_id,omitempty"` DestinationChainID ids.ID `json:"destination_chain_id,omitempty"` @@ -452,6 +412,8 @@ type CrossChainAppRequestFailed struct { SourceChainID ids.ID `json:"source_chain_id,omitempty"` DestinationChainID ids.ID `json:"destination_chain_id,omitempty"` RequestID uint32 `json:"request_id,omitempty"` + ErrorCode int32 `json:"error_code,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` } func (m *CrossChainAppRequestFailed) String() string { @@ -473,19 +435,23 @@ func (m *CrossChainAppRequestFailed) GetRequestId() uint32 { return m.RequestID } -func InternalCrossChainAppRequestFailed( +func InternalCrossChainAppError( nodeID ids.NodeID, sourceChainID ids.ID, destinationChainID ids.ID, requestID uint32, + errorCode int32, + errorMessage string, ) InboundMessage { return &inboundMessage{ nodeID: nodeID, - op: CrossChainAppRequestFailedOp, + op: CrossChainAppErrorOp, message: &CrossChainAppRequestFailed{ SourceChainID: sourceChainID, DestinationChainID: destinationChainID, RequestID: requestID, + ErrorCode: errorCode, + ErrorMessage: errorMessage, }, expiration: mockable.MaxTime, } diff --git a/message/messages.go b/message/messages.go index 8d9671e63123..b8b5db69e958 100644 --- a/message/messages.go +++ b/message/messages.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -134,8 +134,8 @@ func (m *outboundMessage) BytesSavedCompression() int { type msgBuilder struct { log logging.Logger + // TODO: Remove gzip once v1.11.x is out. gzipCompressor compression.Compressor - gzipCompressTimeMetrics map[Op]metric.Averager gzipDecompressTimeMetrics map[Op]metric.Averager zstdCompressor compression.Compressor @@ -164,7 +164,6 @@ func newMsgBuilder( log: log, gzipCompressor: gzipCompressor, - gzipCompressTimeMetrics: make(map[Op]metric.Averager, len(ExternalOps)), gzipDecompressTimeMetrics: make(map[Op]metric.Averager, len(ExternalOps)), zstdCompressor: zstdCompressor, @@ -176,13 +175,6 @@ func newMsgBuilder( errs := wrappers.Errs{} for _, op := range ExternalOps { - mb.gzipCompressTimeMetrics[op] = metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("gzip_%s_compress_time", op), - fmt.Sprintf("time (in ns) to compress %s messages with gzip", op), - metrics, - &errs, - ) mb.gzipDecompressTimeMetrics[op] = metric.NewAveragerWithErrs( namespace, fmt.Sprintf("gzip_%s_decompress_time", op), @@ -236,17 +228,6 @@ func (mb *msgBuilder) marshal( switch compressionType { case compression.TypeNone: return uncompressedMsgBytes, 0, op, nil - case compression.TypeGzip: - compressedBytes, err := mb.gzipCompressor.Compress(uncompressedMsgBytes) - if err != nil { - return nil, 0, 0, err - } - compressedMsg = p2p.Message{ - Message: &p2p.Message_CompressedGzip{ - CompressedGzip: compressedBytes, - }, - } - opToCompressTimeMetrics = mb.gzipCompressTimeMetrics case compression.TypeZstd: compressedBytes, err := mb.zstdCompressor.Compress(uncompressedMsgBytes) if err != nil { diff --git a/message/messages_benchmark_test.go b/message/messages_benchmark_test.go index 8d0939a67348..48ae10acf5dd 100644 --- a/message/messages_benchmark_test.go +++ b/message/messages_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -26,35 +26,35 @@ var ( dummyOnFinishedHandling = func() {} ) -// Benchmarks marshal-ing "Version" message. +// Benchmarks marshal-ing "Handshake" message. // // e.g., // // $ go install -v golang.org/x/tools/cmd/benchcmp@latest // $ go install -v golang.org/x/perf/cmd/benchstat@latest // -// $ go test -run=NONE -bench=BenchmarkMarshalVersion > /tmp/cpu.before.txt -// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkMarshalVersion > /tmp/cpu.after.txt +// $ go test -run=NONE -bench=BenchmarkMarshalHandshake > /tmp/cpu.before.txt +// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkMarshalHandshake > /tmp/cpu.after.txt // $ benchcmp /tmp/cpu.before.txt /tmp/cpu.after.txt // $ benchstat -alpha 0.03 -geomean /tmp/cpu.before.txt /tmp/cpu.after.txt // -// $ go test -run=NONE -bench=BenchmarkMarshalVersion -benchmem > /tmp/mem.before.txt -// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkMarshalVersion -benchmem > /tmp/mem.after.txt +// $ go test -run=NONE -bench=BenchmarkMarshalHandshake -benchmem > /tmp/mem.before.txt +// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkMarshalHandshake -benchmem > /tmp/mem.after.txt // $ benchcmp /tmp/mem.before.txt /tmp/mem.after.txt // $ benchstat -alpha 0.03 -geomean /tmp/mem.before.txt /tmp/mem.after.txt -func BenchmarkMarshalVersion(b *testing.B) { +func BenchmarkMarshalHandshake(b *testing.B) { require := require.New(b) id := ids.GenerateTestID() msg := p2p.Message{ - Message: &p2p.Message_Version{ - Version: &p2p.Version{ + Message: &p2p.Message_Handshake{ + Handshake: &p2p.Handshake{ NetworkId: uint32(1337), MyTime: uint64(time.Now().Unix()), IpAddr: []byte(net.IPv4(1, 2, 3, 4).To16()), IpPort: 0, MyVersion: "v1.2.3", - MyVersionTime: uint64(time.Now().Unix()), + IpSigningTime: uint64(time.Now().Unix()), Sig: []byte{'y', 'e', 'e', 't'}, TrackedSubnets: [][]byte{id[:]}, }, @@ -87,30 +87,30 @@ func BenchmarkMarshalVersion(b *testing.B) { // $ go install -v golang.org/x/tools/cmd/benchcmp@latest // $ go install -v golang.org/x/perf/cmd/benchstat@latest // -// $ go test -run=NONE -bench=BenchmarkUnmarshalVersion > /tmp/cpu.before.txt -// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkUnmarshalVersion > /tmp/cpu.after.txt +// $ go test -run=NONE -bench=BenchmarkUnmarshalHandshake > /tmp/cpu.before.txt +// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkUnmarshalHandshake > /tmp/cpu.after.txt // $ benchcmp /tmp/cpu.before.txt /tmp/cpu.after.txt // $ benchstat -alpha 0.03 -geomean /tmp/cpu.before.txt /tmp/cpu.after.txt // -// $ go test -run=NONE -bench=BenchmarkUnmarshalVersion -benchmem > /tmp/mem.before.txt -// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkUnmarshalVersion -benchmem > /tmp/mem.after.txt +// $ go test -run=NONE -bench=BenchmarkUnmarshalHandshake -benchmem > /tmp/mem.before.txt +// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkUnmarshalHandshake -benchmem > /tmp/mem.after.txt // $ benchcmp /tmp/mem.before.txt /tmp/mem.after.txt // $ benchstat -alpha 0.03 -geomean /tmp/mem.before.txt /tmp/mem.after.txt -func BenchmarkUnmarshalVersion(b *testing.B) { +func BenchmarkUnmarshalHandshake(b *testing.B) { require := require.New(b) b.StopTimer() id := ids.GenerateTestID() msg := p2p.Message{ - Message: &p2p.Message_Version{ - Version: &p2p.Version{ + Message: &p2p.Message_Handshake{ + Handshake: &p2p.Handshake{ NetworkId: uint32(1337), MyTime: uint64(time.Now().Unix()), IpAddr: []byte(net.IPv4(1, 2, 3, 4).To16()), IpPort: 0, MyVersion: "v1.2.3", - MyVersionTime: uint64(time.Now().Unix()), + IpSigningTime: uint64(time.Now().Unix()), Sig: []byte{'y', 'e', 'e', 't'}, TrackedSubnets: [][]byte{id[:]}, }, diff --git a/message/messages_test.go b/message/messages_test.go index a7cf74c95c48..27c0fb5fb646 100644 --- a/message/messages_test.go +++ b/message/messages_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -122,17 +122,17 @@ func TestMessage(t *testing.T) { bytesSaved: false, }, { - desc: "version message with no compression", - op: VersionOp, + desc: "Handshake message with no compression", + op: HandshakeOp, msg: &p2p.Message{ - Message: &p2p.Message_Version{ - Version: &p2p.Version{ + Message: &p2p.Message_Handshake{ + Handshake: &p2p.Handshake{ NetworkId: uint32(1337), MyTime: uint64(nowUnix), IpAddr: []byte(net.IPv6zero), IpPort: 9651, MyVersion: "v1.2.3", - MyVersionTime: uint64(nowUnix), + IpSigningTime: uint64(nowUnix), Sig: []byte{'y', 'e', 'e', 't'}, TrackedSubnets: [][]byte{testID[:]}, }, @@ -143,55 +143,67 @@ func TestMessage(t *testing.T) { bytesSaved: false, }, { - desc: "peer_list message with no compression", - op: PeerListOp, + desc: "get_peer_list message with no compression", + op: GetPeerListOp, msg: &p2p.Message{ - Message: &p2p.Message_PeerList{ - PeerList: &p2p.PeerList{ - ClaimedIpPorts: []*p2p.ClaimedIpPort{ - { - X509Certificate: testTLSCert.Certificate[0], - IpAddr: []byte(net.IPv4zero), - IpPort: 10, - Timestamp: 1, - Signature: []byte{0}, - }, + Message: &p2p.Message_GetPeerList{ + GetPeerList: &p2p.GetPeerList{ + KnownPeers: &p2p.BloomFilter{ + Filter: make([]byte, 2048), + Salt: make([]byte, 32), }, }, }, }, compressionType: compression.TypeNone, - bypassThrottling: true, + bypassThrottling: false, bytesSaved: false, }, { - desc: "peer_list message with gzip compression", + desc: "get_peer_list message with zstd compression", + op: GetPeerListOp, + msg: &p2p.Message{ + Message: &p2p.Message_GetPeerList{ + GetPeerList: &p2p.GetPeerList{ + KnownPeers: &p2p.BloomFilter{ + Filter: make([]byte, 2048), + Salt: make([]byte, 32), + }, + }, + }, + }, + compressionType: compression.TypeZstd, + bypassThrottling: false, + bytesSaved: true, + }, + { + desc: "peer_list message with no compression", op: PeerListOp, msg: &p2p.Message{ - Message: &p2p.Message_PeerList{ - PeerList: &p2p.PeerList{ + Message: &p2p.Message_PeerList_{ + PeerList_: &p2p.PeerList{ ClaimedIpPorts: []*p2p.ClaimedIpPort{ { X509Certificate: testTLSCert.Certificate[0], - IpAddr: []byte(net.IPv6zero), - IpPort: 9651, - Timestamp: uint64(nowUnix), - Signature: compressibleContainers[0], + IpAddr: []byte(net.IPv4zero), + IpPort: 10, + Timestamp: 1, + Signature: []byte{0}, }, }, }, }, }, - compressionType: compression.TypeGzip, + compressionType: compression.TypeNone, bypassThrottling: true, - bytesSaved: true, + bytesSaved: false, }, { desc: "peer_list message with zstd compression", op: PeerListOp, msg: &p2p.Message{ - Message: &p2p.Message_PeerList{ - PeerList: &p2p.PeerList{ + Message: &p2p.Message_PeerList_{ + PeerList_: &p2p.PeerList{ ClaimedIpPorts: []*p2p.ClaimedIpPort{ { X509Certificate: testTLSCert.Certificate[0], @@ -208,25 +220,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: true, }, - { - desc: "peer_list_ack message with no compression", - op: PeerListAckOp, - msg: &p2p.Message{ - Message: &p2p.Message_PeerListAck{ - PeerListAck: &p2p.PeerListAck{ - PeerAcks: []*p2p.PeerAck{ - { - TxId: testID[:], - Timestamp: 1, - }, - }, - }, - }, - }, - compressionType: compression.TypeNone, - bypassThrottling: false, - bytesSaved: false, - }, { desc: "get_state_summary_frontier message with no compression", op: GetStateSummaryFrontierOp, @@ -259,22 +252,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "state_summary_frontier message with gzip compression", - op: StateSummaryFrontierOp, - msg: &p2p.Message{ - Message: &p2p.Message_StateSummaryFrontier_{ - StateSummaryFrontier_: &p2p.StateSummaryFrontier{ - ChainId: testID[:], - RequestId: 1, - Summary: compressibleContainers[0], - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "state_summary_frontier message with zstd compression", op: StateSummaryFrontierOp, @@ -308,23 +285,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "get_accepted_state_summary message with gzip compression", - op: GetAcceptedStateSummaryOp, - msg: &p2p.Message{ - Message: &p2p.Message_GetAcceptedStateSummary{ - GetAcceptedStateSummary: &p2p.GetAcceptedStateSummary{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, - Heights: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: false, - }, { desc: "get_accepted_state_summary message with zstd compression", op: GetAcceptedStateSummaryOp, @@ -358,22 +318,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "accepted_state_summary message with gzip compression", - op: AcceptedStateSummaryOp, - msg: &p2p.Message{ - Message: &p2p.Message_AcceptedStateSummary_{ - AcceptedStateSummary_: &p2p.AcceptedStateSummary{ - ChainId: testID[:], - RequestId: 1, - SummaryIds: [][]byte{testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:]}, - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "accepted_state_summary message with zstd compression", op: AcceptedStateSummaryOp, @@ -491,22 +435,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "ancestors message with gzip compression", - op: AncestorsOp, - msg: &p2p.Message{ - Message: &p2p.Message_Ancestors_{ - Ancestors_: &p2p.Ancestors{ - ChainId: testID[:], - RequestId: 12345, - Containers: compressibleContainers, - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "ancestors message with zstd compression", op: AncestorsOp, @@ -558,23 +486,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "put message with gzip compression", - op: PutOp, - msg: &p2p.Message{ - Message: &p2p.Message_Put{ - Put: &p2p.Put{ - ChainId: testID[:], - RequestId: 1, - Container: compressibleContainers[0], - EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "put message with zstd compression", op: PutOp, @@ -610,24 +521,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "push_query message with gzip compression", - op: PushQueryOp, - msg: &p2p.Message{ - Message: &p2p.Message_PushQuery{ - PushQuery: &p2p.PushQuery{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, - Container: compressibleContainers[0], - EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "push_query message with zstd compression", op: PushQueryOp, @@ -697,23 +590,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "app_request message with gzip compression", - op: AppRequestOp, - msg: &p2p.Message{ - Message: &p2p.Message_AppRequest{ - AppRequest: &p2p.AppRequest{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, - AppBytes: compressibleContainers[0], - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "app_request message with zstd compression", op: AppRequestOp, @@ -747,22 +623,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "app_response message with gzip compression", - op: AppResponseOp, - msg: &p2p.Message{ - Message: &p2p.Message_AppResponse{ - AppResponse: &p2p.AppResponse{ - ChainId: testID[:], - RequestId: 1, - AppBytes: compressibleContainers[0], - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "app_response message with zstd compression", op: AppResponseOp, @@ -794,21 +654,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "app_gossip message with gzip compression", - op: AppGossipOp, - msg: &p2p.Message{ - Message: &p2p.Message_AppGossip{ - AppGossip: &p2p.AppGossip{ - ChainId: testID[:], - AppBytes: compressibleContainers[0], - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "app_gossip message with zstd compression", op: AppGossipOp, @@ -836,8 +681,9 @@ func TestMessage(t *testing.T) { require.Equal(tv.bypassThrottling, encodedMsg.BypassThrottling()) require.Equal(tv.op, encodedMsg.Op()) - bytesSaved := encodedMsg.BytesSavedCompression() - require.Equal(tv.bytesSaved, bytesSaved > 0) + if bytesSaved := encodedMsg.BytesSavedCompression(); tv.bytesSaved { + require.Greater(bytesSaved, 0) + } parsedMsg, err := mb.parseInbound(encodedMsg.Bytes(), ids.EmptyNodeID, func() {}) require.NoError(err) diff --git a/message/mock_message.go b/message/mock_message.go index 938a90edbb8e..ea6b9a67afcf 100644 --- a/message/mock_message.go +++ b/message/mock_message.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/message (interfaces: OutboundMessage) +// +// Generated by this command: +// +// mockgen -package=message -destination=message/mock_message.go github.com/ava-labs/avalanchego/message OutboundMessage +// // Package message is a generated GoMock package. package message diff --git a/message/mock_outbound_message_builder.go b/message/mock_outbound_message_builder.go index 6c128123cc3c..0d053f71090b 100644 --- a/message/mock_outbound_message_builder.go +++ b/message/mock_outbound_message_builder.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/message (interfaces: OutboundMsgBuilder) +// +// Generated by this command: +// +// mockgen -package=message -destination=message/mock_outbound_message_builder.go github.com/ava-labs/avalanchego/message OutboundMsgBuilder +// // Package message is a generated GoMock package. package message @@ -50,7 +52,7 @@ func (m *MockOutboundMsgBuilder) Accepted(arg0 ids.ID, arg1 uint32, arg2 []ids.I } // Accepted indicates an expected call of Accepted. -func (mr *MockOutboundMsgBuilderMockRecorder) Accepted(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Accepted(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accepted", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Accepted), arg0, arg1, arg2) } @@ -65,7 +67,7 @@ func (m *MockOutboundMsgBuilder) AcceptedFrontier(arg0 ids.ID, arg1 uint32, arg2 } // AcceptedFrontier indicates an expected call of AcceptedFrontier. -func (mr *MockOutboundMsgBuilderMockRecorder) AcceptedFrontier(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) AcceptedFrontier(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcceptedFrontier", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AcceptedFrontier), arg0, arg1, arg2) } @@ -80,7 +82,7 @@ func (m *MockOutboundMsgBuilder) AcceptedStateSummary(arg0 ids.ID, arg1 uint32, } // AcceptedStateSummary indicates an expected call of AcceptedStateSummary. -func (mr *MockOutboundMsgBuilderMockRecorder) AcceptedStateSummary(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) AcceptedStateSummary(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcceptedStateSummary", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AcceptedStateSummary), arg0, arg1, arg2) } @@ -95,7 +97,7 @@ func (m *MockOutboundMsgBuilder) Ancestors(arg0 ids.ID, arg1 uint32, arg2 [][]by } // Ancestors indicates an expected call of Ancestors. -func (mr *MockOutboundMsgBuilderMockRecorder) Ancestors(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Ancestors(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ancestors", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Ancestors), arg0, arg1, arg2) } @@ -110,7 +112,7 @@ func (m *MockOutboundMsgBuilder) AppGossip(arg0 ids.ID, arg1 []byte) (OutboundMe } // AppGossip indicates an expected call of AppGossip. -func (mr *MockOutboundMsgBuilderMockRecorder) AppGossip(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) AppGossip(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppGossip", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AppGossip), arg0, arg1) } @@ -125,7 +127,7 @@ func (m *MockOutboundMsgBuilder) AppRequest(arg0 ids.ID, arg1 uint32, arg2 time. } // AppRequest indicates an expected call of AppRequest. -func (mr *MockOutboundMsgBuilderMockRecorder) AppRequest(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) AppRequest(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequest", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AppRequest), arg0, arg1, arg2, arg3) } @@ -140,7 +142,7 @@ func (m *MockOutboundMsgBuilder) AppResponse(arg0 ids.ID, arg1 uint32, arg2 []by } // AppResponse indicates an expected call of AppResponse. -func (mr *MockOutboundMsgBuilderMockRecorder) AppResponse(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) AppResponse(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppResponse", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AppResponse), arg0, arg1, arg2) } @@ -155,7 +157,7 @@ func (m *MockOutboundMsgBuilder) Chits(arg0 ids.ID, arg1 uint32, arg2, arg3, arg } // Chits indicates an expected call of Chits. -func (mr *MockOutboundMsgBuilderMockRecorder) Chits(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Chits(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Chits", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Chits), arg0, arg1, arg2, arg3, arg4) } @@ -170,7 +172,7 @@ func (m *MockOutboundMsgBuilder) Get(arg0 ids.ID, arg1 uint32, arg2 time.Duratio } // Get indicates an expected call of Get. -func (mr *MockOutboundMsgBuilderMockRecorder) Get(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Get(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Get), arg0, arg1, arg2, arg3, arg4) } @@ -185,7 +187,7 @@ func (m *MockOutboundMsgBuilder) GetAccepted(arg0 ids.ID, arg1 uint32, arg2 time } // GetAccepted indicates an expected call of GetAccepted. -func (mr *MockOutboundMsgBuilderMockRecorder) GetAccepted(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) GetAccepted(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccepted", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAccepted), arg0, arg1, arg2, arg3, arg4) } @@ -200,7 +202,7 @@ func (m *MockOutboundMsgBuilder) GetAcceptedFrontier(arg0 ids.ID, arg1 uint32, a } // GetAcceptedFrontier indicates an expected call of GetAcceptedFrontier. -func (mr *MockOutboundMsgBuilderMockRecorder) GetAcceptedFrontier(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) GetAcceptedFrontier(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAcceptedFrontier", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAcceptedFrontier), arg0, arg1, arg2, arg3) } @@ -215,7 +217,7 @@ func (m *MockOutboundMsgBuilder) GetAcceptedStateSummary(arg0 ids.ID, arg1 uint3 } // GetAcceptedStateSummary indicates an expected call of GetAcceptedStateSummary. -func (mr *MockOutboundMsgBuilderMockRecorder) GetAcceptedStateSummary(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) GetAcceptedStateSummary(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAcceptedStateSummary", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAcceptedStateSummary), arg0, arg1, arg2, arg3) } @@ -230,11 +232,26 @@ func (m *MockOutboundMsgBuilder) GetAncestors(arg0 ids.ID, arg1 uint32, arg2 tim } // GetAncestors indicates an expected call of GetAncestors. -func (mr *MockOutboundMsgBuilderMockRecorder) GetAncestors(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) GetAncestors(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAncestors", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAncestors), arg0, arg1, arg2, arg3, arg4) } +// GetPeerList mocks base method. +func (m *MockOutboundMsgBuilder) GetPeerList(arg0, arg1 []byte) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerList", arg0, arg1) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerList indicates an expected call of GetPeerList. +func (mr *MockOutboundMsgBuilderMockRecorder) GetPeerList(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerList", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetPeerList), arg0, arg1) +} + // GetStateSummaryFrontier mocks base method. func (m *MockOutboundMsgBuilder) GetStateSummaryFrontier(arg0 ids.ID, arg1 uint32, arg2 time.Duration) (OutboundMessage, error) { m.ctrl.T.Helper() @@ -245,39 +262,39 @@ func (m *MockOutboundMsgBuilder) GetStateSummaryFrontier(arg0 ids.ID, arg1 uint3 } // GetStateSummaryFrontier indicates an expected call of GetStateSummaryFrontier. -func (mr *MockOutboundMsgBuilderMockRecorder) GetStateSummaryFrontier(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) GetStateSummaryFrontier(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStateSummaryFrontier", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetStateSummaryFrontier), arg0, arg1, arg2) } -// PeerList mocks base method. -func (m *MockOutboundMsgBuilder) PeerList(arg0 []ips.ClaimedIPPort, arg1 bool) (OutboundMessage, error) { +// Handshake mocks base method. +func (m *MockOutboundMsgBuilder) Handshake(arg0 uint32, arg1 uint64, arg2 ips.IPPort, arg3, arg4 string, arg5, arg6, arg7 uint32, arg8 uint64, arg9 []byte, arg10 []ids.ID, arg11, arg12 []uint32, arg13, arg14 []byte) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PeerList", arg0, arg1) + ret := m.ctrl.Call(m, "Handshake", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } -// PeerList indicates an expected call of PeerList. -func (mr *MockOutboundMsgBuilderMockRecorder) PeerList(arg0, arg1 interface{}) *gomock.Call { +// Handshake indicates an expected call of Handshake. +func (mr *MockOutboundMsgBuilderMockRecorder) Handshake(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerList", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PeerList), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Handshake", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Handshake), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14) } -// PeerListAck mocks base method. -func (m *MockOutboundMsgBuilder) PeerListAck(arg0 []*p2p.PeerAck) (OutboundMessage, error) { +// PeerList mocks base method. +func (m *MockOutboundMsgBuilder) PeerList(arg0 []*ips.ClaimedIPPort, arg1 bool) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PeerListAck", arg0) + ret := m.ctrl.Call(m, "PeerList", arg0, arg1) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } -// PeerListAck indicates an expected call of PeerListAck. -func (mr *MockOutboundMsgBuilderMockRecorder) PeerListAck(arg0 interface{}) *gomock.Call { +// PeerList indicates an expected call of PeerList. +func (mr *MockOutboundMsgBuilderMockRecorder) PeerList(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerListAck", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PeerListAck), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerList", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PeerList), arg0, arg1) } // Ping mocks base method. @@ -290,7 +307,7 @@ func (m *MockOutboundMsgBuilder) Ping(arg0 uint32, arg1 []*p2p.SubnetUptime) (Ou } // Ping indicates an expected call of Ping. -func (mr *MockOutboundMsgBuilderMockRecorder) Ping(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Ping(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Ping), arg0, arg1) } @@ -305,7 +322,7 @@ func (m *MockOutboundMsgBuilder) Pong(arg0 uint32, arg1 []*p2p.SubnetUptime) (Ou } // Pong indicates an expected call of Pong. -func (mr *MockOutboundMsgBuilderMockRecorder) Pong(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Pong(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pong", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Pong), arg0, arg1) } @@ -320,7 +337,7 @@ func (m *MockOutboundMsgBuilder) PullQuery(arg0 ids.ID, arg1 uint32, arg2 time.D } // PullQuery indicates an expected call of PullQuery. -func (mr *MockOutboundMsgBuilderMockRecorder) PullQuery(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) PullQuery(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullQuery", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PullQuery), arg0, arg1, arg2, arg3, arg4, arg5) } @@ -335,7 +352,7 @@ func (m *MockOutboundMsgBuilder) PushQuery(arg0 ids.ID, arg1 uint32, arg2 time.D } // PushQuery indicates an expected call of PushQuery. -func (mr *MockOutboundMsgBuilderMockRecorder) PushQuery(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) PushQuery(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushQuery", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PushQuery), arg0, arg1, arg2, arg3, arg4, arg5) } @@ -350,7 +367,7 @@ func (m *MockOutboundMsgBuilder) Put(arg0 ids.ID, arg1 uint32, arg2 []byte, arg3 } // Put indicates an expected call of Put. -func (mr *MockOutboundMsgBuilderMockRecorder) Put(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Put(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Put), arg0, arg1, arg2, arg3) } @@ -365,22 +382,7 @@ func (m *MockOutboundMsgBuilder) StateSummaryFrontier(arg0 ids.ID, arg1 uint32, } // StateSummaryFrontier indicates an expected call of StateSummaryFrontier. -func (mr *MockOutboundMsgBuilderMockRecorder) StateSummaryFrontier(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) StateSummaryFrontier(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSummaryFrontier", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).StateSummaryFrontier), arg0, arg1, arg2) } - -// Version mocks base method. -func (m *MockOutboundMsgBuilder) Version(arg0 uint32, arg1 uint64, arg2 ips.IPPort, arg3 string, arg4 uint64, arg5 []byte, arg6 []ids.ID) (OutboundMessage, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Version", arg0, arg1, arg2, arg3, arg4, arg5, arg6) - ret0, _ := ret[0].(OutboundMessage) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Version indicates an expected call of Version. -func (mr *MockOutboundMsgBuilderMockRecorder) Version(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Version), arg0, arg1, arg2, arg3, arg4, arg5, arg6) -} diff --git a/message/ops.go b/message/ops.go index e069553abd42..0c58eb60690b 100644 --- a/message/ops.go +++ b/message/ops.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -21,9 +21,9 @@ const ( // Handshake: PingOp Op = iota PongOp - VersionOp + HandshakeOp + GetPeerListOp PeerListOp - PeerListAckOp // State sync: GetStateSummaryFrontierOp GetStateSummaryFrontierFailedOp @@ -51,12 +51,12 @@ const ( ChitsOp // Application: AppRequestOp - AppRequestFailedOp + AppErrorOp AppResponseOp AppGossipOp // Cross chain: CrossChainAppRequestOp - CrossChainAppRequestFailedOp + CrossChainAppErrorOp CrossChainAppResponseOp // Internal: ConnectedOp @@ -71,9 +71,9 @@ var ( HandshakeOps = []Op{ PingOp, PongOp, - VersionOp, + HandshakeOp, + GetPeerListOp, PeerListOp, - PeerListAckOp, } // List of all consensus request message types @@ -115,9 +115,9 @@ var ( GetAncestorsFailedOp, GetFailedOp, QueryFailedOp, - AppRequestFailedOp, + AppErrorOp, CrossChainAppRequestOp, - CrossChainAppRequestFailedOp, + CrossChainAppErrorOp, CrossChainAppResponseOp, ConnectedOp, ConnectedSubnetOp, @@ -165,12 +165,12 @@ var ( AsynchronousOps = []Op{ // Application AppRequestOp, - AppRequestFailedOp, + AppErrorOp, AppGossipOp, AppResponseOp, // Cross chain CrossChainAppRequestOp, - CrossChainAppRequestFailedOp, + CrossChainAppErrorOp, CrossChainAppResponseOp, } @@ -182,8 +182,8 @@ var ( GetAncestorsFailedOp: AncestorsOp, GetFailedOp: PutOp, QueryFailedOp: ChitsOp, - AppRequestFailedOp: AppResponseOp, - CrossChainAppRequestFailedOp: CrossChainAppResponseOp, + AppErrorOp: AppResponseOp, + CrossChainAppErrorOp: CrossChainAppResponseOp, } UnrequestedOps = set.Of( GetAcceptedFrontierOp, @@ -209,12 +209,12 @@ func (op Op) String() string { return "ping" case PongOp: return "pong" - case VersionOp: - return "version" + case HandshakeOp: + return "handshake" + case GetPeerListOp: + return "get_peerlist" case PeerListOp: return "peerlist" - case PeerListAckOp: - return "peerlist_ack" // State sync case GetStateSummaryFrontierOp: return "get_state_summary_frontier" @@ -265,8 +265,8 @@ func (op Op) String() string { // Application case AppRequestOp: return "app_request" - case AppRequestFailedOp: - return "app_request_failed" + case AppErrorOp: + return "app_error" case AppResponseOp: return "app_response" case AppGossipOp: @@ -274,8 +274,8 @@ func (op Op) String() string { // Cross chain case CrossChainAppRequestOp: return "cross_chain_app_request" - case CrossChainAppRequestFailedOp: - return "cross_chain_app_request_failed" + case CrossChainAppErrorOp: + return "cross_chain_app_error" case CrossChainAppResponseOp: return "cross_chain_app_response" // Internal @@ -303,12 +303,12 @@ func Unwrap(m *p2p.Message) (fmt.Stringer, error) { return msg.Ping, nil case *p2p.Message_Pong: return msg.Pong, nil - case *p2p.Message_Version: - return msg.Version, nil - case *p2p.Message_PeerList: - return msg.PeerList, nil - case *p2p.Message_PeerListAck: - return msg.PeerListAck, nil + case *p2p.Message_Handshake: + return msg.Handshake, nil + case *p2p.Message_GetPeerList: + return msg.GetPeerList, nil + case *p2p.Message_PeerList_: + return msg.PeerList_, nil // State sync: case *p2p.Message_GetStateSummaryFrontier: return msg.GetStateSummaryFrontier, nil @@ -347,6 +347,8 @@ func Unwrap(m *p2p.Message) (fmt.Stringer, error) { return msg.AppRequest, nil case *p2p.Message_AppResponse: return msg.AppResponse, nil + case *p2p.Message_AppError: + return msg.AppError, nil case *p2p.Message_AppGossip: return msg.AppGossip, nil default: @@ -360,12 +362,12 @@ func ToOp(m *p2p.Message) (Op, error) { return PingOp, nil case *p2p.Message_Pong: return PongOp, nil - case *p2p.Message_Version: - return VersionOp, nil - case *p2p.Message_PeerList: + case *p2p.Message_Handshake: + return HandshakeOp, nil + case *p2p.Message_GetPeerList: + return GetPeerListOp, nil + case *p2p.Message_PeerList_: return PeerListOp, nil - case *p2p.Message_PeerListAck: - return PeerListAckOp, nil case *p2p.Message_GetStateSummaryFrontier: return GetStateSummaryFrontierOp, nil case *p2p.Message_StateSummaryFrontier_: @@ -400,6 +402,8 @@ func ToOp(m *p2p.Message) (Op, error) { return AppRequestOp, nil case *p2p.Message_AppResponse: return AppResponseOp, nil + case *p2p.Message_AppError: + return AppErrorOp, nil case *p2p.Message_AppGossip: return AppGossipOp, nil default: diff --git a/message/outbound_msg_builder.go b/message/outbound_msg_builder.go index f38a1d98ffed..150c9f4f3a65 100644 --- a/message/outbound_msg_builder.go +++ b/message/outbound_msg_builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -18,23 +18,32 @@ var _ OutboundMsgBuilder = (*outMsgBuilder)(nil) // with a reference count of 1. Once the reference count hits 0, the message // bytes should no longer be accessed. type OutboundMsgBuilder interface { - Version( + Handshake( networkID uint32, myTime uint64, ip ips.IPPort, myVersion string, - myVersionTime uint64, + client string, + major uint32, + minor uint32, + patch uint32, + ipSigningTime uint64, sig []byte, trackedSubnets []ids.ID, + supportedACPs []uint32, + objectedACPs []uint32, + knownPeersFilter []byte, + knownPeersSalt []byte, ) (OutboundMessage, error) - PeerList( - peers []ips.ClaimedIPPort, - bypassThrottling bool, + GetPeerList( + knownPeersFilter []byte, + knownPeersSalt []byte, ) (OutboundMessage, error) - PeerListAck( - peerAcks []*p2p.PeerAck, + PeerList( + peers []*ips.ClaimedIPPort, + bypassThrottling bool, ) (OutboundMessage, error) Ping( @@ -224,29 +233,49 @@ func (b *outMsgBuilder) Pong( ) } -func (b *outMsgBuilder) Version( +func (b *outMsgBuilder) Handshake( networkID uint32, myTime uint64, ip ips.IPPort, myVersion string, - myVersionTime uint64, + client string, + major uint32, + minor uint32, + patch uint32, + ipSigningTime uint64, sig []byte, trackedSubnets []ids.ID, + supportedACPs []uint32, + objectedACPs []uint32, + knownPeersFilter []byte, + knownPeersSalt []byte, ) (OutboundMessage, error) { subnetIDBytes := make([][]byte, len(trackedSubnets)) encodeIDs(trackedSubnets, subnetIDBytes) return b.builder.createOutbound( &p2p.Message{ - Message: &p2p.Message_Version{ - Version: &p2p.Version{ + Message: &p2p.Message_Handshake{ + Handshake: &p2p.Handshake{ NetworkId: networkID, MyTime: myTime, IpAddr: ip.IP.To16(), IpPort: uint32(ip.Port), MyVersion: myVersion, - MyVersionTime: myVersionTime, + IpSigningTime: ipSigningTime, Sig: sig, TrackedSubnets: subnetIDBytes, + Client: &p2p.Client{ + Name: client, + Major: major, + Minor: minor, + Patch: patch, + }, + SupportedAcps: supportedACPs, + ObjectedAcps: objectedACPs, + KnownPeers: &p2p.BloomFilter{ + Filter: knownPeersFilter, + Salt: knownPeersSalt, + }, }, }, }, @@ -255,7 +284,27 @@ func (b *outMsgBuilder) Version( ) } -func (b *outMsgBuilder) PeerList(peers []ips.ClaimedIPPort, bypassThrottling bool) (OutboundMessage, error) { +func (b *outMsgBuilder) GetPeerList( + knownPeersFilter []byte, + knownPeersSalt []byte, +) (OutboundMessage, error) { + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_GetPeerList{ + GetPeerList: &p2p.GetPeerList{ + KnownPeers: &p2p.BloomFilter{ + Filter: knownPeersFilter, + Salt: knownPeersSalt, + }, + }, + }, + }, + b.compressionType, + false, + ) +} + +func (b *outMsgBuilder) PeerList(peers []*ips.ClaimedIPPort, bypassThrottling bool) (OutboundMessage, error) { claimIPPorts := make([]*p2p.ClaimedIpPort, len(peers)) for i, p := range peers { claimIPPorts[i] = &p2p.ClaimedIpPort{ @@ -264,13 +313,13 @@ func (b *outMsgBuilder) PeerList(peers []ips.ClaimedIPPort, bypassThrottling boo IpPort: uint32(p.IPPort.Port), Timestamp: p.Timestamp, Signature: p.Signature, - TxId: p.TxID[:], + TxId: ids.Empty[:], } } return b.builder.createOutbound( &p2p.Message{ - Message: &p2p.Message_PeerList{ - PeerList: &p2p.PeerList{ + Message: &p2p.Message_PeerList_{ + PeerList_: &p2p.PeerList{ ClaimedIpPorts: claimIPPorts, }, }, @@ -280,20 +329,6 @@ func (b *outMsgBuilder) PeerList(peers []ips.ClaimedIPPort, bypassThrottling boo ) } -func (b *outMsgBuilder) PeerListAck(peerAcks []*p2p.PeerAck) (OutboundMessage, error) { - return b.builder.createOutbound( - &p2p.Message{ - Message: &p2p.Message_PeerListAck{ - PeerListAck: &p2p.PeerListAck{ - PeerAcks: peerAcks, - }, - }, - }, - compression.TypeNone, - false, - ) -} - func (b *outMsgBuilder) GetStateSummaryFrontier( chainID ids.ID, requestID uint32, diff --git a/message/outbound_msg_builder_test.go b/message/outbound_msg_builder_test.go index 50f273bf4b13..39d22442b0a4 100644 --- a/message/outbound_msg_builder_test.go +++ b/message/outbound_msg_builder_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -29,7 +29,6 @@ func Test_newOutboundBuilder(t *testing.T) { for _, compressionType := range []compression.Type{ compression.TypeNone, - compression.TypeGzip, compression.TypeZstd, } { t.Run(compressionType.String(), func(t *testing.T) { diff --git a/nat/nat.go b/nat/nat.go index 33749ca0e572..a6e37078e7a6 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nat @@ -53,8 +53,8 @@ type Mapper struct { } // NewPortMapper returns an initialized mapper -func NewPortMapper(log logging.Logger, r Router) Mapper { - return Mapper{ +func NewPortMapper(log logging.Logger, r Router) *Mapper { + return &Mapper{ log: log, r: r, closer: make(chan struct{}), diff --git a/nat/no_router.go b/nat/no_router.go index 5c894c8c673c..19c68dac5538 100644 --- a/nat/no_router.go +++ b/nat/no_router.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nat diff --git a/nat/pmp.go b/nat/pmp.go index ad2032ec1493..ecee9793f934 100644 --- a/nat/pmp.go +++ b/nat/pmp.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nat diff --git a/nat/upnp.go b/nat/upnp.go index 2571048e367e..aa26d6d82fc6 100644 --- a/nat/upnp.go +++ b/nat/upnp.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nat diff --git a/network/README.md b/network/README.md index 5364d9db39e5..303d1f56ab82 100644 --- a/network/README.md +++ b/network/README.md @@ -46,7 +46,7 @@ When starting an Avalanche node, a node needs to be able to initiate some proces In Avalanche, nodes connect to an initial set of bootstrapper nodes known as **beacons** (this is user-configurable). Once connected to a set of beacons, a node is able to discover other nodes in the network. Over time, a node eventually discovers other peers in the network through `PeerList` messages it receives through: - The handshake initiated between two peers when attempting to connect to a peer (see [Connecting](#connecting)). -- Periodic `PeerList` gossip messages that every peer sends to the peers it's connected to (see [Connected](#connected)). +- Responses to periodically sent `GetPeerList` messages requesting a `PeerList` of unknown peers (see [Connected](#connected)). #### Connecting @@ -54,32 +54,31 @@ In Avalanche, nodes connect to an initial set of bootstrapper nodes known as **b Upon connection to any peer, a handshake is performed between the node attempting to establish the outbound connection to the peer and the peer receiving the inbound connection. -When attempting to establish the connection, the first message that the node attempting to connect to the peer in the network is a `Version` message describing compatibility of the candidate node with the peer. As an example, nodes that are attempting to connect with an incompatible version of AvalancheGo or a significantly skewed local clock are rejected by the peer. +When attempting to establish the connection, the first message that the node attempting to connect to the peer in the network is a `Handshake` message describing compatibility of the candidate node with the peer. As an example, nodes that are attempting to connect with an incompatible version of AvalancheGo or a significantly skewed local clock are rejected by the peer. ```mermaid sequenceDiagram Note over Node,Peer: Initiate Handshake Note left of Node: I want to connect to you! -Note over Node,Peer: Version message +Note over Node,Peer: Handshake message Node->>Peer: AvalancheGo v1.0.0 Note right of Peer: My version v1.9.4 is incompatible with your version v1.0.0. Peer-xNode: Connection dropped Note over Node,Peer: Handshake Failed ``` -If the `Version` message is successfully received and the peer decides that it wants a connection with this node, it replies with a `PeerList` message that contains metadata about other peers that allows a node to connect to them. Upon reception of a `PeerList` message, a node will attempt to connect to any peers that the node is not already connected to to allow the node to discover more peers in the network. +If the `Handshake` message is successfully received and the peer decides that it wants a connection with this node, it replies with a `PeerList` message that contains metadata about other peers that allows a node to connect to them. Upon reception of a `PeerList` message, a node will attempt to connect to any peers that the node is not already connected to to allow the node to discover more peers in the network. ```mermaid sequenceDiagram Note over Node,Peer: Initiate Handshake Note left of Node: I want to connect to you! -Note over Node,Peer: Version message +Note over Node,Peer: Handshake message Node->>Peer: AvalancheGo v1.9.4 Note right of Peer: LGTM! Note over Node,Peer: PeerList message Peer->>Node: Peer-X, Peer-Y, Peer-Z Note over Node,Peer: Handshake Complete -Node->>Peer: ACK Peer-X, Peer-Y, Peer-Z ``` Once the node attempting to join the network receives this `PeerList` message, the handshake is complete and the node is now connected to the peer. The node attempts to connect to the new peers discovered in the `PeerList` message. Each connection results in another peer handshake, which results in the node incrementally discovering more and more peers in the network as more and more `PeerList` messages are exchanged. @@ -90,73 +89,55 @@ Some peers aren't discovered through the `PeerList` messages exchanged through p ```mermaid sequenceDiagram -Node ->> Peer-1: Version - v1.9.5 +Node ->> Peer-1: Handshake - v1.9.5 Peer-1 ->> Node: PeerList - Peer-2 -Node ->> Peer-1: ACK - Peer-2 Note left of Node: Node is connected to Peer-1 and now tries to connect to Peer-2. -Node ->> Peer-2: Version - v1.9.5 +Node ->> Peer-2: Handshake - v1.9.5 Peer-2 ->> Node: PeerList - Peer-1 -Node ->> Peer-2: ACK - Peer-1 Note left of Node: Peer-3 was never sampled, so we haven't connected yet! Node --> Peer-3: No connection ``` -To guarantee that a node can discover all peers, each node periodically gossips a sample of the peers it knows about to other peers. +To guarantee that a node can discover all peers, each node periodically sends a `GetPeerList` message to a random peer. ##### PeerList Gossip ###### Messages -A `PeerList` is the message that is used to communicate the presence of peers in the network. Each `PeerList` message contains networking-level metadata about the peer that provides the necessary information to connect to it, alongside the corresponding transaction id that added that peer to the validator set. Transaction ids are unique hashes that only add a single validator, so it is guaranteed that there is a 1:1 mapping between a validator and its associated transaction id. +A `GetPeerList` message requests that the peer sends a `PeerList` message. `GetPeerList` messages contain a bloom filter of already known peers to reduce useless bandwidth on `PeerList` messages. The bloom filter reduces bandwidth by enabling the `PeerList` message to only include peers that aren't already known. -`PeerListAck` messages are sent in response to `PeerList` messages to allow a peer to confirm which peers it will actually attempt to connect to. Because nodes only gossip peers they believe another peer doesn't already know about to optimize bandwidth, `PeerListAck` messages are important to confirm that a peer will attempt to connect to someone. Without this, a node might gossip a peer to another peer and assume a connection between the two is being established, and not re-gossip the peer in future gossip cycles. If the connection was never actually wanted by the peer being gossiped to due to a transient reason, that peer would never be able to re-discover the gossiped peer and could be isolated from a subset of the network. +A `PeerList` is the message that is used to communicate the presence of peers in the network. Each `PeerList` message contains signed networking-level metadata about a peer that provides the necessary information to connect to it. -Once a `PeerListAck` message is received from a peer, the node that sent the original `PeerList` message marks the corresponding acknowledged validators as already having been transmitted to the peer, so that it's excluded from subsequent iterations of `PeerList` gossip. +Once peer metadata is received, the node will add that data to its bloom filter to prevent learning about it again. ###### Gossip Handshake messages provide a node with some knowledge of peers in the network, but offers no guarantee that learning about a subset of peers from each peer the node connects with will result in the node learning about every peer in the network. -In order to provide a probabilistic guarantee that all peers in the network will eventually learn of one another, each node periodically gossips a sample of the peers that they're aware of to a sample of the peers that they're connected to. Over time, this probabilistically guarantees that every peer will eventually learn of every other peer. +To provide an eventual guarantee that all peers learn of one another, each node periodically requests peers from a random peer. -To optimize bandwidth usage, each node tracks which peers are guaranteed to know of which peers. A node learns this information by tracking both inbound and outbound `PeerList` gossip. +To optimize bandwidth, each node tracks the most recent IPs of validators. The validator's nodeID and timestamp are inserted into a bloom filter which is used to select only necessary IPs to gossip. -- Inbound - - If a node ever receives `PeerList` from a peer, that peer _must_ have known about the peers in that `PeerList` message in order to have gossiped them. -- Outbound - - If a node sends a `PeerList` to a peer and the peer replies with an `PeerListAck` message, then all peers in the `PeerListAck` must be known by the peer. +As the number of entries increases in the bloom filter, the probability of a false positive increases. False positives can cause recent IPs not to be gossiped when they otherwise should be, slowing down the rate of `PeerList` gossip. To prevent the bloom filter from having too many false positives, a new bloom filter is periodically generated and the number of entries a validator is allowed to have in the bloom filter is capped. Generating the new bloom filter both removes stale entries and modifies the hash functions to avoid persistent hash collisions. -To efficiently track which peers know of which peers, the peers that each peer is aware of is represented in a [bit set](https://en.wikipedia.org/wiki/Bit_array). A peer is represented by either a `0` if it isn't known by the peer yet, or a `1` if it is known by the peer. - -An node follows the following steps for every cycle of `PeerList` gossip: - -1. Get a sample of peers in the network that the node is connected to -2. For each peer: - 1. Figure out which peers the node hasn't gossiped to them yet. - 2. Take a random sample of these unknown peers. - 3. Send a message describing these peers to the peer. +A node follows the following steps for of `PeerList` gossip: ```mermaid sequenceDiagram -Note left of Node: Initialize gossip bit set for Peer-123 -Note left of Node: Peer-123: [0, 0, 0] -Node->>Peer-123: PeerList - Peer-1 -Peer-123->>Node: PeerListAck - Peer-1 -Note left of Node: Peer-123: [1, 0, 0] -Node->>Peer-123: PeerList - Peer-3 -Peer-123->>Node: PeerListAck - Peer-3 -Note left of Node: Peer-123: [1, 0, 1] -Node->>Peer-123: PeerList - Peer-2 -Peer-123->>Node: PeerListAck - Peer-2 -Note left of Node: Peer-123: [1, 1, 1] -Note left of Node: No more gossip left to send to Peer-123! +Note left of Node: Initialize bloom filter +Note left of Node: Bloom: [0, 0, 0] +Node->>Peer-123: GetPeerList [0, 0, 0] +Note right of Peer-123: Any peers can be sent. +Peer-123->>Node: PeerList - Peer-1 +Note left of Node: Bloom: [1, 0, 0] +Node->>Peer-123: GetPeerList [1, 0, 0] +Note right of Peer-123: Either Peer-2 or Peer-3 can be sent. +Peer-123->>Node: PeerList - Peer-3 +Note left of Node: Bloom: [1, 0, 1] +Node->>Peer-123: GetPeerList [1, 0, 1] +Note right of Peer-123: Only Peer-2 can be sent. +Peer-123->>Node: PeerList - Peer-2 +Note left of Node: Bloom: [1, 1, 1] +Node->>Peer-123: GetPeerList [1, 1, 1] +Note right of Peer-123: There are no more peers left to send! ``` - -Because network state is generally expected to be stable (i.e nodes are not continuously flickering online/offline), as more and more gossip messages are exchanged nodes eventually realize that the peers that they are connected to have learned about every other peer. - -A node eventually stops gossiping peers when there's no more new peers to gossip about. `PeerList` gossip only resumes once: - -1. a new peer joins -2. a peer disconnects and reconnects -3. a new validator joins the network -4. a validator's IP is updated diff --git a/network/camino_test.go b/network/camino_test.go new file mode 100644 index 000000000000..314862c436e6 --- /dev/null +++ b/network/camino_test.go @@ -0,0 +1,69 @@ +// Copyright (C) 2023, Chain4Travel AG. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils/perms" +) + +// Convinient way to run generateTestKeyAndCertFile. Comment out SkipNow before run. +func TestGenerateTestCert(t *testing.T) { + t.SkipNow() + for i := 1; i <= 3; i++ { + require.NoError(t, generateTestKeyAndCertFile( + fmt.Sprintf("test_key_%d.key", i), + fmt.Sprintf("test_cert_%d.crt", i), + )) + } +} + +// Creates key and cert files. Those are used by tests in this package. +func generateTestKeyAndCertFile(keyPath, certPath string) error { + certBytes, keyBytes, err := staking.NewCertAndKeyBytesWithSecpKey(nil) + if err != nil { + return err + } + + // Ensure directory where key/cert will live exist + if err := os.MkdirAll(filepath.Dir(certPath), perms.ReadWriteExecute); err != nil { + return fmt.Errorf("couldn't create path for cert: %w", err) + } + if err := os.MkdirAll(filepath.Dir(keyPath), perms.ReadWriteExecute); err != nil { + return fmt.Errorf("couldn't create path for key: %w", err) + } + + // Write cert to disk + certFile, err := os.Create(certPath) + if err != nil { + return fmt.Errorf("couldn't create cert file: %w", err) + } + if _, err := certFile.Write(certBytes); err != nil { + return fmt.Errorf("couldn't write cert file: %w", err) + } + if err := certFile.Close(); err != nil { + return fmt.Errorf("couldn't close cert file: %w", err) + } + + // Write key to disk + keyOut, err := os.Create(keyPath) + if err != nil { + return fmt.Errorf("couldn't create key file: %w", err) + } + if _, err := keyOut.Write(keyBytes); err != nil { + return fmt.Errorf("couldn't write private key: %w", err) + } + if err := keyOut.Close(); err != nil { + return fmt.Errorf("couldn't close key file: %w", err) + } + + return nil +} diff --git a/network/certs_test.go b/network/certs_test.go index 90455460f2f4..543fd5eccb8f 100644 --- a/network/certs_test.go +++ b/network/certs_test.go @@ -8,29 +8,96 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network import ( "crypto/tls" + "net" "sync" "testing" + _ "embed" + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils/ips" ) var ( + //go:embed test_cert_1.crt + testCertBytes1 []byte + //go:embed test_key_1.key + testKeyBytes1 []byte + //go:embed test_cert_2.crt + testCertBytes2 []byte + //go:embed test_key_2.key + testKeyBytes2 []byte + //go:embed test_cert_3.crt + testCertBytes3 []byte + //go:embed test_key_3.key + testKeyBytes3 []byte + + ip *ips.ClaimedIPPort + otherIP *ips.ClaimedIPPort + certLock sync.Mutex tlsCerts []*tls.Certificate tlsConfigs []*tls.Config ) +func init() { + cert1, err := staking.LoadTLSCertFromBytes(testKeyBytes1, testCertBytes1) + if err != nil { + panic(err) + } + cert2, err := staking.LoadTLSCertFromBytes(testKeyBytes2, testCertBytes2) + if err != nil { + panic(err) + } + cert3, err := staking.LoadTLSCertFromBytes(testKeyBytes3, testCertBytes3) + if err != nil { + panic(err) + } + tlsCerts = []*tls.Certificate{ + cert1, cert2, cert3, + } + + stakingCert1, err := staking.CertificateFromX509(cert1.Leaf) + if err != nil { + panic(err) + } + + ip = ips.NewClaimedIPPort( + stakingCert1, + ips.IPPort{ + IP: net.IPv4(127, 0, 0, 1), + Port: 9651, + }, + 1, // timestamp + nil, // signature + ) + + stakingCert2, err := staking.CertificateFromX509(cert2.Leaf) + if err != nil { + panic(err) + } + otherIP = ips.NewClaimedIPPort( + stakingCert2, + ips.IPPort{ + IP: net.IPv4(127, 0, 0, 1), + Port: 9651, + }, + 1, // timestamp + nil, // signature + ) +} + func getTLS(t *testing.T, index int) (ids.NodeID, *tls.Certificate, *tls.Config) { certLock.Lock() defer certLock.Unlock() @@ -38,14 +105,16 @@ func getTLS(t *testing.T, index int) (ids.NodeID, *tls.Certificate, *tls.Config) for len(tlsCerts) <= index { cert, err := staking.NewTLSCert() require.NoError(t, err) - tlsConfig := peer.TLSConfig(*cert, nil) - tlsCerts = append(tlsCerts, cert) + } + for len(tlsConfigs) <= index { + cert := tlsCerts[len(tlsConfigs)] + tlsConfig := peer.TLSConfig(*cert, nil) tlsConfigs = append(tlsConfigs, tlsConfig) } tlsCert := tlsCerts[index] - nodeID, err := peer.CertToID(tlsCert.Leaf) + nodeID, err := staking.TLSCertToID(tlsCert.Leaf) require.NoError(t, err) return nodeID, tlsCert, tlsConfigs[index] diff --git a/network/config.go b/network/config.go index 1ca1addc0117..5cb014741f56 100644 --- a/network/config.go +++ b/network/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -10,7 +10,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/dialer" - "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/network/throttling" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/uptime" @@ -73,6 +72,14 @@ type PeerListGossipConfig struct { // PeerListGossipFreq is the frequency that this node will attempt to gossip // signed IPs to its peers. PeerListGossipFreq time.Duration `json:"peerListGossipFreq"` + + // PeerListPullGossipFreq is the frequency that this node will attempt to + // request signed IPs from its peers. + PeerListPullGossipFreq time.Duration `json:"peerListPullGossipFreq"` + + // PeerListBloomResetFreq is how frequently this node will recalculate the + // IP tracker's bloom filter. + PeerListBloomResetFreq time.Duration `json:"peerListBloomResetFreq"` } type TimeoutConfig struct { @@ -126,6 +133,9 @@ type Config struct { PingFrequency time.Duration `json:"pingFrequency"` AllowPrivateIPs bool `json:"allowPrivateIPs"` + SupportedACPs set.Set[uint32] `json:"supportedACPs"` + ObjectedACPs set.Set[uint32] `json:"objectedACPs"` + // The compression type to use when compressing outbound messages. // Assumes all peers support this compression type. CompressionType compression.Type `json:"compressionType"` @@ -179,7 +189,4 @@ type Config struct { // Specifies how much disk usage each peer can cause before // we rate-limit them. DiskTargeter tracker.Targeter `json:"-"` - - // Tracks which validators have been sent to which peers - GossipTracker peer.GossipTracker `json:"-"` } diff --git a/network/conn_test.go b/network/conn_test.go index 4394cd885e3c..6a44c6153992 100644 --- a/network/conn_test.go +++ b/network/conn_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network diff --git a/network/dialer/dialer.go b/network/dialer/dialer.go index 22e8c3ba1bfe..109b63cc2002 100644 --- a/network/dialer/dialer.go +++ b/network/dialer/dialer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dialer diff --git a/network/dialer/dialer_test.go b/network/dialer/dialer_test.go index 95011996bcdf..a824b8b03e08 100644 --- a/network/dialer/dialer_test.go +++ b/network/dialer/dialer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dialer diff --git a/network/dialer_test.go b/network/dialer_test.go index b6f2eef15def..7a60d056d66d 100644 --- a/network/dialer_test.go +++ b/network/dialer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -33,7 +33,7 @@ func (d *testDialer) NewListener() (ips.DynamicIPPort, *testListener) { // Uses a private IP to easily enable testing AllowPrivateIPs ip := ips.NewDynamicIPPort( net.IPv4(10, 0, 0, 0), - uint16(len(d.listeners)), + uint16(len(d.listeners)+1), ) staticIP := ip.IPPort() listener := newTestListener(staticIP) @@ -55,22 +55,22 @@ func (d *testDialer) Dial(ctx context.Context, ip ips.IPPort) (net.Conn, error) Conn: serverConn, localAddr: &net.TCPAddr{ IP: net.IPv6loopback, - Port: 0, + Port: 1, }, remoteAddr: &net.TCPAddr{ IP: net.IPv6loopback, - Port: 1, + Port: 2, }, } client := &testConn{ Conn: clientConn, localAddr: &net.TCPAddr{ IP: net.IPv6loopback, - Port: 2, + Port: 3, }, remoteAddr: &net.TCPAddr{ IP: net.IPv6loopback, - Port: 3, + Port: 4, }, } select { diff --git a/network/example_test.go b/network/example_test.go index 8f20900a7e8d..bfac03fba44f 100644 --- a/network/example_test.go +++ b/network/example_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network diff --git a/network/handler_test.go b/network/handler_test.go index 64350b3b289a..08c99a0d4e70 100644 --- a/network/handler_test.go +++ b/network/handler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network diff --git a/network/ip_tracker.go b/network/ip_tracker.go new file mode 100644 index 000000000000..758c53494580 --- /dev/null +++ b/network/ip_tracker.go @@ -0,0 +1,406 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "crypto/rand" + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "go.uber.org/zap" + + "golang.org/x/exp/maps" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/bloom" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/ips" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/metric" + "github.com/ava-labs/avalanchego/utils/sampler" + "github.com/ava-labs/avalanchego/utils/set" +) + +const ( + saltSize = 32 + minCountEstimate = 128 + targetFalsePositiveProbability = .001 + maxFalsePositiveProbability = .01 + // By setting maxIPEntriesPerValidator > 1, we allow validators to update + // their IP at least once per bloom filter reset. + maxIPEntriesPerValidator = 2 +) + +var _ validators.SetCallbackListener = (*ipTracker)(nil) + +func newIPTracker( + log logging.Logger, + namespace string, + registerer prometheus.Registerer, +) (*ipTracker, error) { + bloomNamespace := metric.AppendNamespace(namespace, "ip_bloom") + bloomMetrics, err := bloom.NewMetrics(bloomNamespace, registerer) + if err != nil { + return nil, err + } + tracker := &ipTracker{ + log: log, + numValidatorIPs: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "validator_ips", + Help: "Number of known validator IPs", + }), + numGossipable: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "gossipable_ips", + Help: "Number of IPs this node is willing to gossip", + }), + bloomMetrics: bloomMetrics, + connected: make(map[ids.NodeID]*ips.ClaimedIPPort), + mostRecentValidatorIPs: make(map[ids.NodeID]*ips.ClaimedIPPort), + gossipableIndicies: make(map[ids.NodeID]int), + bloomAdditions: make(map[ids.NodeID]int), + } + err = utils.Err( + registerer.Register(tracker.numValidatorIPs), + registerer.Register(tracker.numGossipable), + ) + if err != nil { + return nil, err + } + return tracker, tracker.resetBloom() +} + +type ipTracker struct { + log logging.Logger + numValidatorIPs prometheus.Gauge + numGossipable prometheus.Gauge + bloomMetrics *bloom.Metrics + + lock sync.RWMutex + // Manually tracked nodes are always treated like validators + manuallyTracked set.Set[ids.NodeID] + // Connected tracks the currently connected peers, including validators and + // non-validators. The IP is not necessarily the same IP as in + // mostRecentIPs. + connected map[ids.NodeID]*ips.ClaimedIPPort + mostRecentValidatorIPs map[ids.NodeID]*ips.ClaimedIPPort + validators set.Set[ids.NodeID] + + // An IP is marked as gossipable if: + // - The node is a validator + // - The node is connected + // - The IP the node connected with is its latest IP + gossipableIndicies map[ids.NodeID]int + gossipableIPs []*ips.ClaimedIPPort + + // The bloom filter contains the most recent validator IPs to avoid + // unnecessary IP gossip. + bloom *bloom.Filter + // To prevent validators from causing the bloom filter to have too many + // false positives, we limit each validator to maxIPEntriesPerValidator in + // the bloom filter. + bloomAdditions map[ids.NodeID]int // Number of IPs added to the bloom + bloomSalt []byte + maxBloomCount int +} + +func (i *ipTracker) ManuallyTrack(nodeID ids.NodeID) { + i.lock.Lock() + defer i.lock.Unlock() + + // We treat manually tracked nodes as if they were validators. + if !i.validators.Contains(nodeID) { + i.onValidatorAdded(nodeID) + } + // Now that the node is marked as a validator, freeze it's validation + // status. Future calls to OnValidatorAdded or OnValidatorRemoved will be + // treated as noops. + i.manuallyTracked.Add(nodeID) +} + +func (i *ipTracker) WantsConnection(nodeID ids.NodeID) bool { + i.lock.RLock() + defer i.lock.RUnlock() + + return i.validators.Contains(nodeID) +} + +func (i *ipTracker) ShouldVerifyIP(ip *ips.ClaimedIPPort) bool { + i.lock.RLock() + defer i.lock.RUnlock() + + if !i.validators.Contains(ip.NodeID) { + return false + } + + prevIP, ok := i.mostRecentValidatorIPs[ip.NodeID] + return !ok || // This would be the first IP + prevIP.Timestamp < ip.Timestamp // This would be a newer IP +} + +// AddIP returns true if the addition of the provided IP updated the most +// recently known IP of a validator. +func (i *ipTracker) AddIP(ip *ips.ClaimedIPPort) bool { + i.lock.Lock() + defer i.lock.Unlock() + + if !i.validators.Contains(ip.NodeID) { + return false + } + + prevIP, ok := i.mostRecentValidatorIPs[ip.NodeID] + if !ok { + // This is the first IP we've heard from the validator, so it is the + // most recent. + i.updateMostRecentValidatorIP(ip) + // Because we didn't previously have an IP, we know we aren't currently + // connected to them. + return true + } + + if prevIP.Timestamp >= ip.Timestamp { + // This IP is not newer than the previously known IP. + return false + } + + i.updateMostRecentValidatorIP(ip) + i.removeGossipableIP(ip.NodeID) + return true +} + +func (i *ipTracker) GetIP(nodeID ids.NodeID) (*ips.ClaimedIPPort, bool) { + i.lock.RLock() + defer i.lock.RUnlock() + + ip, ok := i.mostRecentValidatorIPs[nodeID] + return ip, ok +} + +func (i *ipTracker) Connected(ip *ips.ClaimedIPPort) { + i.lock.Lock() + defer i.lock.Unlock() + + i.connected[ip.NodeID] = ip + if !i.validators.Contains(ip.NodeID) { + return + } + + prevIP, ok := i.mostRecentValidatorIPs[ip.NodeID] + if !ok { + // This is the first IP we've heard from the validator, so it is the + // most recent. + i.updateMostRecentValidatorIP(ip) + i.addGossipableIP(ip) + return + } + + if prevIP.Timestamp > ip.Timestamp { + // There is a more up-to-date IP than the one that was used to connect. + return + } + + if prevIP.Timestamp < ip.Timestamp { + i.updateMostRecentValidatorIP(ip) + } + i.addGossipableIP(ip) +} + +func (i *ipTracker) Disconnected(nodeID ids.NodeID) { + i.lock.Lock() + defer i.lock.Unlock() + + delete(i.connected, nodeID) + i.removeGossipableIP(nodeID) +} + +func (i *ipTracker) OnValidatorAdded(nodeID ids.NodeID, _ *bls.PublicKey, _ ids.ID, _ uint64) { + i.lock.Lock() + defer i.lock.Unlock() + + i.onValidatorAdded(nodeID) +} + +func (i *ipTracker) onValidatorAdded(nodeID ids.NodeID) { + if i.manuallyTracked.Contains(nodeID) { + return + } + + i.validators.Add(nodeID) + ip, connected := i.connected[nodeID] + if !connected { + return + } + + // Because we only track validator IPs, the from the connection is + // guaranteed to be the most up-to-date IP that we know. + i.updateMostRecentValidatorIP(ip) + i.addGossipableIP(ip) +} + +func (*ipTracker) OnValidatorWeightChanged(ids.NodeID, uint64, uint64) {} + +func (i *ipTracker) OnValidatorRemoved(nodeID ids.NodeID, _ uint64) { + i.lock.Lock() + defer i.lock.Unlock() + + if i.manuallyTracked.Contains(nodeID) { + return + } + + delete(i.mostRecentValidatorIPs, nodeID) + i.numValidatorIPs.Set(float64(len(i.mostRecentValidatorIPs))) + + i.validators.Remove(nodeID) + i.removeGossipableIP(nodeID) +} + +func (i *ipTracker) updateMostRecentValidatorIP(ip *ips.ClaimedIPPort) { + i.mostRecentValidatorIPs[ip.NodeID] = ip + i.numValidatorIPs.Set(float64(len(i.mostRecentValidatorIPs))) + + oldCount := i.bloomAdditions[ip.NodeID] + if oldCount >= maxIPEntriesPerValidator { + return + } + + // If the validator set is growing rapidly, we should increase the size of + // the bloom filter. + if count := i.bloom.Count(); count >= i.maxBloomCount { + if err := i.resetBloom(); err != nil { + i.log.Error("failed to reset validator tracker bloom filter", + zap.Int("maxCount", i.maxBloomCount), + zap.Int("currentCount", count), + zap.Error(err), + ) + } else { + i.log.Info("reset validator tracker bloom filter", + zap.Int("currentCount", count), + ) + } + return + } + + i.bloomAdditions[ip.NodeID] = oldCount + 1 + bloom.Add(i.bloom, ip.GossipID[:], i.bloomSalt) + i.bloomMetrics.Count.Inc() +} + +func (i *ipTracker) addGossipableIP(ip *ips.ClaimedIPPort) { + i.gossipableIndicies[ip.NodeID] = len(i.gossipableIPs) + i.gossipableIPs = append(i.gossipableIPs, ip) + i.numGossipable.Inc() +} + +func (i *ipTracker) removeGossipableIP(nodeID ids.NodeID) { + indexToRemove, wasGossipable := i.gossipableIndicies[nodeID] + if !wasGossipable { + return + } + + newNumGossipable := len(i.gossipableIPs) - 1 + if newNumGossipable != indexToRemove { + replacementIP := i.gossipableIPs[newNumGossipable] + i.gossipableIndicies[replacementIP.NodeID] = indexToRemove + i.gossipableIPs[indexToRemove] = replacementIP + } + + delete(i.gossipableIndicies, nodeID) + i.gossipableIPs[newNumGossipable] = nil + i.gossipableIPs = i.gossipableIPs[:newNumGossipable] + i.numGossipable.Dec() +} + +// GetGossipableIPs returns the latest IPs of connected validators. The returned +// IPs will not contain [exceptNodeID] or any IPs contained in [exceptIPs]. If +// the number of eligible IPs to return low, it's possible that every IP will be +// iterated over while handling this call. +func (i *ipTracker) GetGossipableIPs( + exceptNodeID ids.NodeID, + exceptIPs *bloom.ReadFilter, + salt []byte, + maxNumIPs int, +) []*ips.ClaimedIPPort { + var ( + uniform = sampler.NewUniform() + ips = make([]*ips.ClaimedIPPort, 0, maxNumIPs) + ) + + i.lock.RLock() + defer i.lock.RUnlock() + + uniform.Initialize(uint64(len(i.gossipableIPs))) + for len(ips) < maxNumIPs { + index, err := uniform.Next() + if err != nil { + return ips + } + + ip := i.gossipableIPs[index] + if ip.NodeID == exceptNodeID { + continue + } + + if !bloom.Contains(exceptIPs, ip.GossipID[:], salt) { + ips = append(ips, ip) + } + } + return ips +} + +// ResetBloom prunes the current bloom filter. This must be called periodically +// to ensure that validators that change their IPs are updated correctly and +// that validators that left the validator set are removed. +func (i *ipTracker) ResetBloom() error { + i.lock.Lock() + defer i.lock.Unlock() + + return i.resetBloom() +} + +// Bloom returns the binary representation of the bloom filter along with the +// random salt. +func (i *ipTracker) Bloom() ([]byte, []byte) { + i.lock.RLock() + defer i.lock.RUnlock() + + return i.bloom.Marshal(), i.bloomSalt +} + +// resetBloom creates a new bloom filter with a reasonable size for the current +// validator set size. This function additionally populates the new bloom filter +// with the current most recently known IPs of validators. +func (i *ipTracker) resetBloom() error { + newSalt := make([]byte, saltSize) + _, err := rand.Reader.Read(newSalt) + if err != nil { + return err + } + + count := math.Max(maxIPEntriesPerValidator*i.validators.Len(), minCountEstimate) + numHashes, numEntries := bloom.OptimalParameters( + count, + targetFalsePositiveProbability, + ) + newFilter, err := bloom.New(numHashes, numEntries) + if err != nil { + return err + } + + i.bloom = newFilter + maps.Clear(i.bloomAdditions) + i.bloomSalt = newSalt + i.maxBloomCount = bloom.EstimateCount(numHashes, numEntries, maxFalsePositiveProbability) + + for nodeID, ip := range i.mostRecentValidatorIPs { + bloom.Add(newFilter, ip.GossipID[:], newSalt) + i.bloomAdditions[nodeID] = 1 + } + i.bloomMetrics.Reset(newFilter, i.maxBloomCount) + return nil +} diff --git a/network/ip_tracker_test.go b/network/ip_tracker_test.go new file mode 100644 index 000000000000..052797d749dd --- /dev/null +++ b/network/ip_tracker_test.go @@ -0,0 +1,711 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bloom" + "github.com/ava-labs/avalanchego/utils/ips" + "github.com/ava-labs/avalanchego/utils/logging" +) + +func newTestIPTracker(t *testing.T) *ipTracker { + tracker, err := newIPTracker(logging.NoLog{}, "", prometheus.NewRegistry()) + require.NoError(t, err) + return tracker +} + +func newerTestIP(ip *ips.ClaimedIPPort) *ips.ClaimedIPPort { + return ips.NewClaimedIPPort( + ip.Cert, + ip.IPPort, + ip.Timestamp+1, + ip.Signature, + ) +} + +func requireEqual(t *testing.T, expected, actual *ipTracker) { + require := require.New(t) + require.Equal(expected.manuallyTracked, actual.manuallyTracked) + require.Equal(expected.connected, actual.connected) + require.Equal(expected.mostRecentValidatorIPs, actual.mostRecentValidatorIPs) + require.Equal(expected.validators, actual.validators) + require.Equal(expected.gossipableIndicies, actual.gossipableIndicies) + require.Equal(expected.gossipableIPs, actual.gossipableIPs) + require.Equal(expected.bloomAdditions, actual.bloomAdditions) + require.Equal(expected.maxBloomCount, actual.maxBloomCount) +} + +func requireMetricsConsistent(t *testing.T, tracker *ipTracker) { + require := require.New(t) + require.Equal(float64(len(tracker.mostRecentValidatorIPs)), testutil.ToFloat64(tracker.numValidatorIPs)) + require.Equal(float64(len(tracker.gossipableIPs)), testutil.ToFloat64(tracker.numGossipable)) + require.Equal(float64(tracker.bloom.Count()), testutil.ToFloat64(tracker.bloomMetrics.Count)) + require.Equal(float64(tracker.maxBloomCount), testutil.ToFloat64(tracker.bloomMetrics.MaxCount)) +} + +func TestIPTracker_ManuallyTrack(t *testing.T) { + tests := []struct { + name string + initialState *ipTracker + nodeID ids.NodeID + expectedState *ipTracker + }{ + { + name: "non-connected non-validator", + initialState: newTestIPTracker(t), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.validators.Add(ip.NodeID) + tracker.manuallyTracked.Add(ip.NodeID) + return tracker + }(), + }, + { + name: "connected non-validator", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.mostRecentValidatorIPs[ip.NodeID] = ip + tracker.bloomAdditions[ip.NodeID] = 1 + tracker.gossipableIndicies[ip.NodeID] = 0 + tracker.gossipableIPs = []*ips.ClaimedIPPort{ + ip, + } + tracker.validators.Add(ip.NodeID) + tracker.manuallyTracked.Add(ip.NodeID) + return tracker + }(), + }, + { + name: "non-connected validator", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.manuallyTracked.Add(ip.NodeID) + return tracker + }(), + }, + { + name: "connected validator", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.onValidatorAdded(ip.NodeID) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.onValidatorAdded(ip.NodeID) + tracker.manuallyTracked.Add(ip.NodeID) + return tracker + }(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.initialState.ManuallyTrack(test.nodeID) + requireEqual(t, test.expectedState, test.initialState) + requireMetricsConsistent(t, test.initialState) + }) + } +} + +func TestIPTracker_AddIP(t *testing.T) { + newerIP := newerTestIP(ip) + tests := []struct { + name string + initialState *ipTracker + ip *ips.ClaimedIPPort + expectedUpdated bool + expectedState *ipTracker + }{ + { + name: "non-validator", + initialState: newTestIPTracker(t), + ip: ip, + expectedUpdated: false, + expectedState: newTestIPTracker(t), + }, + { + name: "first known IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + return tracker + }(), + ip: ip, + expectedUpdated: true, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.mostRecentValidatorIPs[ip.NodeID] = ip + tracker.bloomAdditions[ip.NodeID] = 1 + return tracker + }(), + }, + { + name: "older IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(newerIP.NodeID) + require.True(t, tracker.AddIP(newerIP)) + return tracker + }(), + ip: ip, + expectedUpdated: false, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(newerIP.NodeID) + require.True(t, tracker.AddIP(newerIP)) + return tracker + }(), + }, + { + name: "same IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + return tracker + }(), + ip: ip, + expectedUpdated: false, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + return tracker + }(), + }, + { + name: "disconnected newer IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + return tracker + }(), + ip: newerIP, + expectedUpdated: true, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + tracker.mostRecentValidatorIPs[newerIP.NodeID] = newerIP + tracker.bloomAdditions[newerIP.NodeID] = 2 + return tracker + }(), + }, + { + name: "connected newer IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + return tracker + }(), + ip: newerIP, + expectedUpdated: true, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + tracker.mostRecentValidatorIPs[newerIP.NodeID] = newerIP + tracker.bloomAdditions[newerIP.NodeID] = 2 + delete(tracker.gossipableIndicies, newerIP.NodeID) + tracker.gossipableIPs = tracker.gossipableIPs[:0] + return tracker + }(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + updated := test.initialState.AddIP(test.ip) + require.Equal(t, test.expectedUpdated, updated) + requireEqual(t, test.expectedState, test.initialState) + requireMetricsConsistent(t, test.initialState) + }) + } +} + +func TestIPTracker_Connected(t *testing.T) { + newerIP := newerTestIP(ip) + tests := []struct { + name string + initialState *ipTracker + ip *ips.ClaimedIPPort + expectedState *ipTracker + }{ + { + name: "non-validator", + initialState: newTestIPTracker(t), + ip: ip, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.connected[ip.NodeID] = ip + return tracker + }(), + }, + { + name: "first known IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + return tracker + }(), + ip: ip, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.connected[ip.NodeID] = ip + tracker.mostRecentValidatorIPs[ip.NodeID] = ip + tracker.bloomAdditions[ip.NodeID] = 1 + tracker.gossipableIndicies[ip.NodeID] = 0 + tracker.gossipableIPs = []*ips.ClaimedIPPort{ + ip, + } + return tracker + }(), + }, + { + name: "connected with older IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(newerIP.NodeID) + require.True(t, tracker.AddIP(newerIP)) + return tracker + }(), + ip: ip, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(newerIP.NodeID) + require.True(t, tracker.AddIP(newerIP)) + tracker.connected[ip.NodeID] = ip + return tracker + }(), + }, + { + name: "connected with newer IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + return tracker + }(), + ip: newerIP, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + tracker.connected[newerIP.NodeID] = newerIP + tracker.mostRecentValidatorIPs[newerIP.NodeID] = newerIP + tracker.bloomAdditions[newerIP.NodeID] = 2 + tracker.gossipableIndicies[newerIP.NodeID] = 0 + tracker.gossipableIPs = []*ips.ClaimedIPPort{ + newerIP, + } + return tracker + }(), + }, + { + name: "connected with same IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + return tracker + }(), + ip: ip, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + tracker.connected[ip.NodeID] = ip + tracker.gossipableIndicies[ip.NodeID] = 0 + tracker.gossipableIPs = []*ips.ClaimedIPPort{ + ip, + } + return tracker + }(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.initialState.Connected(test.ip) + requireEqual(t, test.expectedState, test.initialState) + requireMetricsConsistent(t, test.initialState) + }) + } +} + +func TestIPTracker_Disconnected(t *testing.T) { + tests := []struct { + name string + initialState *ipTracker + nodeID ids.NodeID + expectedState *ipTracker + }{ + { + name: "not gossipable", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: newTestIPTracker(t), + }, + { + name: "latest gossipable", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + delete(tracker.connected, ip.NodeID) + delete(tracker.gossipableIndicies, ip.NodeID) + tracker.gossipableIPs = tracker.gossipableIPs[:0] + return tracker + }(), + }, + { + name: "non-latest gossipable", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + tracker.onValidatorAdded(otherIP.NodeID) + tracker.Connected(otherIP) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + tracker.onValidatorAdded(otherIP.NodeID) + tracker.Connected(otherIP) + delete(tracker.connected, ip.NodeID) + tracker.gossipableIndicies = map[ids.NodeID]int{ + otherIP.NodeID: 0, + } + tracker.gossipableIPs = []*ips.ClaimedIPPort{ + otherIP, + } + return tracker + }(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.initialState.Disconnected(test.nodeID) + requireEqual(t, test.expectedState, test.initialState) + requireMetricsConsistent(t, test.initialState) + }) + } +} + +func TestIPTracker_OnValidatorAdded(t *testing.T) { + tests := []struct { + name string + initialState *ipTracker + nodeID ids.NodeID + expectedState *ipTracker + }{ + { + name: "manually tracked", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyTrack(ip.NodeID) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyTrack(ip.NodeID) + return tracker + }(), + }, + { + name: "disconnected", + initialState: newTestIPTracker(t), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.validators.Add(ip.NodeID) + return tracker + }(), + }, + { + name: "connected", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.validators.Add(ip.NodeID) + tracker.mostRecentValidatorIPs[ip.NodeID] = ip + tracker.bloomAdditions[ip.NodeID] = 1 + tracker.gossipableIndicies[ip.NodeID] = 0 + tracker.gossipableIPs = []*ips.ClaimedIPPort{ + ip, + } + return tracker + }(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.initialState.OnValidatorAdded(test.nodeID, nil, ids.Empty, 0) + requireEqual(t, test.expectedState, test.initialState) + requireMetricsConsistent(t, test.initialState) + }) + } +} + +func TestIPTracker_OnValidatorRemoved(t *testing.T) { + tests := []struct { + name string + initialState *ipTracker + nodeID ids.NodeID + expectedState *ipTracker + }{ + { + name: "manually tracked", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyTrack(ip.NodeID) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyTrack(ip.NodeID) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + return tracker + }(), + }, + { + name: "not gossipable", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + delete(tracker.mostRecentValidatorIPs, ip.NodeID) + tracker.validators.Remove(ip.NodeID) + return tracker + }(), + }, + { + name: "latest gossipable", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + delete(tracker.mostRecentValidatorIPs, ip.NodeID) + tracker.validators.Remove(ip.NodeID) + delete(tracker.gossipableIndicies, ip.NodeID) + tracker.gossipableIPs = tracker.gossipableIPs[:0] + return tracker + }(), + }, + { + name: "non-latest gossipable", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + tracker.onValidatorAdded(otherIP.NodeID) + tracker.Connected(otherIP) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + tracker.onValidatorAdded(otherIP.NodeID) + tracker.Connected(otherIP) + delete(tracker.mostRecentValidatorIPs, ip.NodeID) + tracker.validators.Remove(ip.NodeID) + tracker.gossipableIndicies = map[ids.NodeID]int{ + otherIP.NodeID: 0, + } + tracker.gossipableIPs = []*ips.ClaimedIPPort{ + otherIP, + } + return tracker + }(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.initialState.OnValidatorRemoved(test.nodeID, 0) + requireEqual(t, test.expectedState, test.initialState) + requireMetricsConsistent(t, test.initialState) + }) + } +} + +func TestIPTracker_GetGossipableIPs(t *testing.T) { + require := require.New(t) + + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.Connected(otherIP) + tracker.onValidatorAdded(ip.NodeID) + tracker.onValidatorAdded(otherIP.NodeID) + + gossipableIPs := tracker.GetGossipableIPs(ids.EmptyNodeID, bloom.EmptyFilter, nil, 2) + require.ElementsMatch([]*ips.ClaimedIPPort{ip, otherIP}, gossipableIPs) + + gossipableIPs = tracker.GetGossipableIPs(ip.NodeID, bloom.EmptyFilter, nil, 2) + require.Equal([]*ips.ClaimedIPPort{otherIP}, gossipableIPs) + + gossipableIPs = tracker.GetGossipableIPs(ids.EmptyNodeID, bloom.FullFilter, nil, 2) + require.Empty(gossipableIPs) + + filter, err := bloom.New(8, 1024) + require.NoError(err) + bloom.Add(filter, ip.GossipID[:], nil) + + readFilter, err := bloom.Parse(filter.Marshal()) + require.NoError(err) + + gossipableIPs = tracker.GetGossipableIPs(ip.NodeID, readFilter, nil, 2) + require.Equal([]*ips.ClaimedIPPort{otherIP}, gossipableIPs) +} + +func TestIPTracker_BloomFiltersEverything(t *testing.T) { + require := require.New(t) + + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.Connected(otherIP) + tracker.onValidatorAdded(ip.NodeID) + tracker.onValidatorAdded(otherIP.NodeID) + + bloomBytes, salt := tracker.Bloom() + readFilter, err := bloom.Parse(bloomBytes) + require.NoError(err) + + gossipableIPs := tracker.GetGossipableIPs(ids.EmptyNodeID, readFilter, salt, 2) + require.Empty(gossipableIPs) + + require.NoError(tracker.ResetBloom()) +} + +func TestIPTracker_BloomGrowsWithValidatorSet(t *testing.T) { + require := require.New(t) + + tracker := newTestIPTracker(t) + initialMaxBloomCount := tracker.maxBloomCount + for i := 0; i < 2048; i++ { + tracker.onValidatorAdded(ids.GenerateTestNodeID()) + } + requireMetricsConsistent(t, tracker) + + require.NoError(tracker.ResetBloom()) + require.Greater(tracker.maxBloomCount, initialMaxBloomCount) + requireMetricsConsistent(t, tracker) +} + +func TestIPTracker_BloomResetsDynamically(t *testing.T) { + require := require.New(t) + + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorRemoved(ip.NodeID, 0) + tracker.maxBloomCount = 1 + tracker.Connected(otherIP) + tracker.onValidatorAdded(otherIP.NodeID) + requireMetricsConsistent(t, tracker) + + bloomBytes, salt := tracker.Bloom() + readFilter, err := bloom.Parse(bloomBytes) + require.NoError(err) + + require.False(bloom.Contains(readFilter, ip.GossipID[:], salt)) + require.True(bloom.Contains(readFilter, otherIP.GossipID[:], salt)) +} + +func TestIPTracker_PreventBloomFilterAddition(t *testing.T) { + require := require.New(t) + + newerIP := newerTestIP(ip) + newestIP := newerTestIP(newerIP) + + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(tracker.AddIP(ip)) + require.True(tracker.AddIP(newerIP)) + require.True(tracker.AddIP(newestIP)) + require.Equal(maxIPEntriesPerValidator, tracker.bloomAdditions[ip.NodeID]) + requireMetricsConsistent(t, tracker) +} + +func TestIPTracker_ShouldVerifyIP(t *testing.T) { + require := require.New(t) + + newerIP := newerTestIP(ip) + + tracker := newTestIPTracker(t) + require.False(tracker.ShouldVerifyIP(ip)) + tracker.onValidatorAdded(ip.NodeID) + require.True(tracker.ShouldVerifyIP(ip)) + require.True(tracker.AddIP(ip)) + require.False(tracker.ShouldVerifyIP(ip)) + require.True(tracker.ShouldVerifyIP(newerIP)) +} diff --git a/network/listener_test.go b/network/listener_test.go index 1b15b0062536..5d6073c6b383 100644 --- a/network/listener_test.go +++ b/network/listener_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network diff --git a/network/metrics.go b/network/metrics.go index 3e566a31c99f..e2a3a363b403 100644 --- a/network/metrics.go +++ b/network/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network diff --git a/network/network.go b/network/network.go index a5ce7bf9ac7c..d733bce20b88 100644 --- a/network/network.go +++ b/network/network.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -30,22 +30,20 @@ import ( "go.uber.org/zap" - "golang.org/x/exp/maps" - "github.com/ava-labs/avalanchego/api/health" + "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/network/dialer" "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/network/throttling" - "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/sender" "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" @@ -89,12 +87,6 @@ type Network interface { // or the network is closed. Dispatch() error - // WantsConnection returns true if this node is willing to attempt to - // connect to the provided nodeID. If the node is attempting to connect to - // the minimum number of peers, then it should only connect if the peer is a - // validator or beacon. - WantsConnection(ids.NodeID) bool - // Attempt to connect to this IP. The network will never stop attempting to // connect to this ID. ManuallyTrack(nodeID ids.NodeID, ip ips.IPPort) @@ -156,18 +148,13 @@ type network struct { // Cancelled on close onCloseCtx context.Context // Call [onCloseCtxCancel] to cancel [onCloseCtx] during close() - onCloseCtxCancel func() + onCloseCtxCancel context.CancelFunc sendFailRateCalculator safemath.Averager // Tracks which peers know about which peers - gossipTracker peer.GossipTracker - peersLock sync.RWMutex - // peerIPs contains the most up to date set of signed IPs for nodes we are - // currently connected or attempting to connect to. - // Note: The txID provided inside of a claimed IP is not verified and should - // not be accessed from this map. - peerIPs map[ids.NodeID]*ips.ClaimedIPPort + ipTracker *ipTracker + peersLock sync.RWMutex // trackedIPs contains the set of IPs that we are currently attempting to // connect to. An entry is added to this set when we first start attempting // to connect to the peer. An entry is deleted from this set once we have @@ -177,10 +164,6 @@ type network struct { connectedPeers peer.Set closing bool - // Tracks special peers that the network should always track - manuallyTrackedIDsLock sync.RWMutex - manuallyTrackedIDs set.Set[ids.NodeID] - // router is notified about all peer [Connected] and [Disconnected] events // as well as all non-handshake peer messages. // @@ -264,6 +247,18 @@ func NewNetwork( return nil, fmt.Errorf("initializing network metrics failed with: %w", err) } + ipTracker, err := newIPTracker(log, config.Namespace, metricsRegisterer) + if err != nil { + return nil, fmt.Errorf("initializing ip tracker failed with: %w", err) + } + config.Validators.RegisterCallbackListener(constants.PrimaryNetworkID, ipTracker) + + // Track all default bootstrappers to ensure their current IPs are gossiped + // like validator IPs. + for _, bootstrapper := range genesis.GetBootstrappers(config.NetworkID) { + ipTracker.ManuallyTrack(bootstrapper.ID) + } + peerConfig := &peer.Config{ ReadBufferSize: config.PeerReadBufferSize, WriteBufferSize: config.PeerWriteBufferSize, @@ -281,11 +276,19 @@ func NewNetwork( PingFrequency: config.PingFrequency, PongTimeout: config.PingPongTimeout, MaxClockDifference: config.MaxClockDifference, + SupportedACPs: config.SupportedACPs.List(), + ObjectedACPs: config.ObjectedACPs.List(), ResourceTracker: config.ResourceTracker, UptimeCalculator: config.UptimeCalculator, IPSigner: peer.NewIPSigner(config.MyIPPort, config.TLSKey), } + // Invariant: We delay the activation of durango during the TLS handshake to + // avoid gossiping any TLS certs that anyone else in the network may + // consider invalid. Recall that if a peer gossips an invalid cert, the + // connection is terminated. + durangoTime := version.GetDurangoTime(config.NetworkID) + durangoTimeWithClockSkew := durangoTime.Add(config.MaxClockDifference) onCloseCtx, cancel := context.WithCancel(context.Background()) n := &network{ config: config, @@ -296,8 +299,8 @@ func NewNetwork( inboundConnUpgradeThrottler: throttling.NewInboundConnUpgradeThrottler(log, config.ThrottlerConfig.InboundConnUpgradeThrottlerConfig), listener: listener, dialer: dialer, - serverUpgrader: peer.NewTLSServerUpgrader(config.TLSConfig, metrics.tlsConnRejected), - clientUpgrader: peer.NewTLSClientUpgrader(config.TLSConfig, metrics.tlsConnRejected), + serverUpgrader: peer.NewTLSServerUpgrader(config.TLSConfig, metrics.tlsConnRejected, durangoTimeWithClockSkew), + clientUpgrader: peer.NewTLSClientUpgrader(config.TLSConfig, metrics.tlsConnRejected, durangoTimeWithClockSkew), onCloseCtx: onCloseCtx, onCloseCtxCancel: cancel, @@ -308,9 +311,8 @@ func NewNetwork( time.Now(), )), - peerIPs: make(map[ids.NodeID]*ips.ClaimedIPPort), trackedIPs: make(map[ids.NodeID]*trackedIP), - gossipTracker: config.GossipTracker, + ipTracker: ipTracker, connectingPeers: peer.NewSet(), connectedPeers: peer.NewSet(), router: router, @@ -432,32 +434,6 @@ func (n *network) Connected(nodeID ids.NodeID) { return } - peerIP := peer.IP() - newIP := &ips.ClaimedIPPort{ - Cert: peer.Cert(), - IPPort: peerIP.IPPort, - Timestamp: peerIP.Timestamp, - Signature: peerIP.Signature, - } - prevIP, ok := n.peerIPs[nodeID] - if !ok { - // If the IP wasn't previously tracked, then we never could have - // gossiped it. This means we don't need to reset the validator's - // tracked set. - n.peerIPs[nodeID] = newIP - } else if prevIP.Timestamp < newIP.Timestamp { - // The previous IP was stale, so we should gossip the newer IP. - n.peerIPs[nodeID] = newIP - - if !prevIP.IPPort.Equal(newIP.IPPort) { - // This IP is actually different, so we should gossip it. - n.peerConfig.Log.Debug("resetting gossip due to ip change", - zap.Stringer("nodeID", nodeID), - ) - _ = n.gossipTracker.ResetValidator(nodeID) - } - } - if tracked, ok := n.trackedIPs[nodeID]; ok { tracked.stopTracking() delete(n.trackedIPs, nodeID) @@ -466,6 +442,15 @@ func (n *network) Connected(nodeID ids.NodeID) { n.connectedPeers.Add(peer) n.peersLock.Unlock() + peerIP := peer.IP() + newIP := ips.NewClaimedIPPort( + peer.Cert(), + peerIP.IPPort, + peerIP.Timestamp, + peerIP.Signature, + ) + n.ipTracker.Connected(newIP) + n.metrics.markConnected(peer) peerVersion := peer.Version() @@ -483,177 +468,15 @@ func (n *network) AllowConnection(nodeID ids.NodeID) bool { if !n.config.RequireValidatorToConnect { return true } - _, isValidator := n.config.Validators.GetValidator(constants.PrimaryNetworkID, n.config.MyNodeID) - return isValidator || n.WantsConnection(nodeID) + _, iAmAValidator := n.config.Validators.GetValidator(constants.PrimaryNetworkID, n.config.MyNodeID) + return iAmAValidator || n.ipTracker.WantsConnection(nodeID) } -func (n *network) Track(peerID ids.NodeID, claimedIPPorts []*ips.ClaimedIPPort) ([]*p2p.PeerAck, error) { - // Perform all signature verification and hashing before grabbing the peer - // lock. - // Note: Avoiding signature verification when the IP isn't needed is a - // **significant** performance optimization. - // Note: To avoid signature verification when the IP isn't needed, we - // optimistically filter out IPs. This can result in us not tracking an IP - // that we otherwise would have. This case can only happen if the node - // became a validator between the time we verified the signature and when we - // processed the IP; which should be very rare. - ipAuths, err := n.authenticateIPs(claimedIPPorts) - if err != nil { - n.peerConfig.Log.Debug("authenticating claimed IPs failed", - zap.Stringer("nodeID", peerID), - zap.Error(err), - ) - return nil, err - } - - // Information for them to update about us - ipLen := len(claimedIPPorts) - newestTimestamp := make(map[ids.ID]uint64, ipLen) - // Information for us to update about them - txIDsWithUpToDateIP := make([]ids.ID, 0, ipLen) - - // Atomically modify peer data - n.peersLock.Lock() - defer n.peersLock.Unlock() - for i, ip := range claimedIPPorts { - ipAuth := ipAuths[i] - nodeID := ipAuth.nodeID - // Invariant: [ip] is only used to modify local node state if - // [verifiedIP] is true. - // Note: modifying peer-level state is allowed regardless of - // [verifiedIP]. - verifiedIP := ipAuth.verified - - // Re-fetch latest info for a [nodeID] in case it changed since we last - // held [peersLock]. - prevIP, previouslyTracked, shouldUpdateOurIP, shouldDial := n.peerIPStatus(nodeID, ip) - tracked, isTracked := n.trackedIPs[nodeID] - - // Evaluate if the gossiped IP is useful to us or to the peer that - // shared it with us. - switch { - case previouslyTracked && prevIP.Timestamp > ip.Timestamp: - // Our previous IP was more up to date. We should tell the peer - // not to gossip their IP to us. We should still gossip our IP to - // them. - newestTimestamp[ip.TxID] = prevIP.Timestamp - - n.metrics.numUselessPeerListBytes.Add(float64(ip.BytesLen())) - case previouslyTracked && prevIP.Timestamp == ip.Timestamp: - // Our previous IP was equally fresh. We should tell the peer - // not to gossip this IP to us. We should not gossip our IP to them. - newestTimestamp[ip.TxID] = prevIP.Timestamp - txIDsWithUpToDateIP = append(txIDsWithUpToDateIP, ip.TxID) - - n.metrics.numUselessPeerListBytes.Add(float64(ip.BytesLen())) - case verifiedIP && shouldUpdateOurIP: - // This IP is more up to date. We should tell the peer not to gossip - // this IP to us. We should not gossip our IP to them. - newestTimestamp[ip.TxID] = ip.Timestamp - txIDsWithUpToDateIP = append(txIDsWithUpToDateIP, ip.TxID) - - // In the future, we should gossip this IP rather than the old IP. - n.peerIPs[nodeID] = ip - - // If the new IP is equal to the old IP, there is no reason to - // refresh the references to it. This can happen when a node - // restarts but does not change their IP. - if prevIP.IPPort.Equal(ip.IPPort) { - continue - } - - // We should gossip this new IP to all our peers. - n.peerConfig.Log.Debug("resetting gossip due to ip change", - zap.Stringer("nodeID", nodeID), - ) - _ = n.gossipTracker.ResetValidator(nodeID) - - // We should update any existing outbound connection attempts. - if isTracked { - // Stop tracking the old IP and start tracking the new one. - tracked := tracked.trackNewIP(ip.IPPort) - n.trackedIPs[nodeID] = tracked - n.dial(nodeID, tracked) - } - case verifiedIP && shouldDial: - // Invariant: [isTracked] is false here. - - // This is the first we've heard of this IP and we want to connect - // to it. We should tell the peer not to gossip this IP to us again. - newestTimestamp[ip.TxID] = ip.Timestamp - // We should not gossip this IP back to them. - txIDsWithUpToDateIP = append(txIDsWithUpToDateIP, ip.TxID) - - // We don't need to reset gossip about this validator because - // we've never gossiped it before. - n.peerIPs[nodeID] = ip - - tracked := newTrackedIP(ip.IPPort) - n.trackedIPs[nodeID] = tracked - n.dial(nodeID, tracked) - default: - // This IP isn't desired - n.metrics.numUselessPeerListBytes.Add(float64(ip.BytesLen())) - } - } - - txIDsToAck := maps.Keys(newestTimestamp) - txIDsToAck, ok := n.gossipTracker.AddKnown(peerID, txIDsWithUpToDateIP, txIDsToAck) - if !ok { - n.peerConfig.Log.Error("failed to update known peers", - zap.Stringer("nodeID", peerID), - ) - return nil, nil - } - - peerAcks := make([]*p2p.PeerAck, len(txIDsToAck)) - for i, txID := range txIDsToAck { - txID := txID - peerAcks[i] = &p2p.PeerAck{ - TxId: txID[:], - // By responding with the highest timestamp, not just the timestamp - // the peer provided us, we may be able to avoid some unnecessary - // gossip in the case that the peer is about to update this - // validator's IP. - Timestamp: newestTimestamp[txID], - } - } - return peerAcks, nil -} - -func (n *network) MarkTracked(peerID ids.NodeID, ips []*p2p.PeerAck) error { - txIDs := make([]ids.ID, 0, len(ips)) - - n.peersLock.RLock() - defer n.peersLock.RUnlock() - - for _, ip := range ips { - txID, err := ids.ToID(ip.TxId) - if err != nil { +func (n *network) Track(claimedIPPorts []*ips.ClaimedIPPort) error { + for _, ip := range claimedIPPorts { + if err := n.track(ip); err != nil { return err } - - // If [txID]'s corresponding nodeID isn't known, then they must no - // longer be a validator. Therefore we wouldn't gossip their IP anyways. - nodeID, ok := n.gossipTracker.GetNodeID(txID) - if !ok { - continue - } - - // If the peer returns a lower timestamp than I currently have, then I - // have updated the IP since I sent the PeerList message this is in - // response to. That means that I should re-gossip this node's IP to the - // peer. - myIP, previouslyTracked := n.peerIPs[nodeID] - if previouslyTracked && myIP.Timestamp <= ip.Timestamp { - txIDs = append(txIDs, txID) - } - } - - if _, ok := n.gossipTracker.AddKnown(peerID, txIDs, nil); !ok { - n.peerConfig.Log.Error("failed to update known peers", - zap.Stringer("nodeID", peerID), - ) } return nil } @@ -664,13 +487,6 @@ func (n *network) MarkTracked(peerID ids.NodeID, ips []*p2p.PeerAck) error { // call. Note that this is from the perspective of a single peer object, because // a peer with the same ID can reconnect to this network instance. func (n *network) Disconnected(nodeID ids.NodeID) { - if !n.gossipTracker.StopTrackingPeer(nodeID) { - n.peerConfig.Log.Error( - "stopped non-existent peer tracker", - zap.Stringer("nodeID", nodeID), - ) - } - n.peersLock.RLock() _, connecting := n.connectingPeers.GetByID(nodeID) peer, connected := n.connectedPeers.GetByID(nodeID) @@ -684,57 +500,17 @@ func (n *network) Disconnected(nodeID ids.NodeID) { } } -func (n *network) Peers(peerID ids.NodeID) ([]ips.ClaimedIPPort, error) { - // Only select validators that we haven't already sent to this peer - unknownValidators, ok := n.gossipTracker.GetUnknown(peerID) - if !ok { - n.peerConfig.Log.Debug( - "unable to find peer to gossip to", - zap.Stringer("nodeID", peerID), - ) - return nil, nil - } - - // We select a random sample of validators to gossip to avoid starving out a - // validator from being gossiped for an extended period of time. - s := sampler.NewUniform() - s.Initialize(uint64(len(unknownValidators))) - - // Calculate the unknown information we need to send to this peer. - validatorIPs := make([]ips.ClaimedIPPort, 0, int(n.config.PeerListNumValidatorIPs)) - for i := 0; i < len(unknownValidators) && len(validatorIPs) < int(n.config.PeerListNumValidatorIPs); i++ { - drawn, err := s.Next() - if err != nil { - return nil, err - } - - validator := unknownValidators[drawn] - n.peersLock.RLock() - _, isConnected := n.connectedPeers.GetByID(validator.NodeID) - peerIP := n.peerIPs[validator.NodeID] - n.peersLock.RUnlock() - if !isConnected { - n.peerConfig.Log.Verbo( - "unable to find validator in connected peers", - zap.Stringer("nodeID", validator.NodeID), - ) - continue - } - - // Note: peerIP isn't used directly here because the TxID may be - // incorrect. - validatorIPs = append(validatorIPs, - ips.ClaimedIPPort{ - Cert: peerIP.Cert, - IPPort: peerIP.IPPort, - Timestamp: peerIP.Timestamp, - Signature: peerIP.Signature, - TxID: validator.TxID, - }, - ) - } +func (n *network) KnownPeers() ([]byte, []byte) { + return n.ipTracker.Bloom() +} - return validatorIPs, nil +func (n *network) Peers(except ids.NodeID, knownPeers *bloom.ReadFilter, salt []byte) []*ips.ClaimedIPPort { + return n.ipTracker.GetGossipableIPs( + except, + knownPeers, + salt, + int(n.config.PeerListNumValidatorIPs), + ) } // Dispatch starts accepting connections from other nodes attempting to connect @@ -814,21 +590,8 @@ func (n *network) Dispatch() error { return errs.Err } -func (n *network) WantsConnection(nodeID ids.NodeID) bool { - if _, ok := n.config.Validators.GetValidator(constants.PrimaryNetworkID, nodeID); ok { - return true - } - - n.manuallyTrackedIDsLock.RLock() - defer n.manuallyTrackedIDsLock.RUnlock() - - return n.manuallyTrackedIDs.Contains(nodeID) -} - func (n *network) ManuallyTrack(nodeID ids.NodeID, ip ips.IPPort) { - n.manuallyTrackedIDsLock.Lock() - n.manuallyTrackedIDs.Add(nodeID) - n.manuallyTrackedIDsLock.Unlock() + n.ipTracker.ManuallyTrack(nodeID) n.peersLock.Lock() defer n.peersLock.Unlock() @@ -849,6 +612,59 @@ func (n *network) ManuallyTrack(nodeID ids.NodeID, ip ips.IPPort) { } } +func (n *network) track(ip *ips.ClaimedIPPort) error { + // To avoid signature verification when the IP isn't needed, we + // optimistically filter out IPs. This can result in us not tracking an IP + // that we otherwise would have. This case can only happen if the node + // became a validator between the time we verified the signature and when we + // processed the IP; which should be very rare. + // + // Note: Avoiding signature verification when the IP isn't needed is a + // **significant** performance optimization. + if !n.ipTracker.ShouldVerifyIP(ip) { + n.metrics.numUselessPeerListBytes.Add(float64(ip.Size())) + return nil + } + + // Perform all signature verification and hashing before grabbing the peer + // lock. + signedIP := peer.SignedIP{ + UnsignedIP: peer.UnsignedIP{ + IPPort: ip.IPPort, + Timestamp: ip.Timestamp, + }, + Signature: ip.Signature, + } + maxTimestamp := n.peerConfig.Clock.Time().Add(n.peerConfig.MaxClockDifference) + if err := signedIP.Verify(ip.Cert, maxTimestamp); err != nil { + return err + } + + n.peersLock.Lock() + defer n.peersLock.Unlock() + + if !n.ipTracker.AddIP(ip) { + return nil + } + + if _, connected := n.connectedPeers.GetByID(ip.NodeID); connected { + // If I'm currently connected to [nodeID] then I'll attempt to dial them + // when we disconnect. + return nil + } + + tracked, isTracked := n.trackedIPs[ip.NodeID] + if isTracked { + // Stop tracking the old IP and start tracking the new one. + tracked = tracked.trackNewIP(ip.IPPort) + } else { + tracked = newTrackedIP(ip.IPPort) + } + n.trackedIPs[ip.NodeID] = tracked + n.dial(ip.NodeID, tracked) + return nil +} + // getPeers returns a slice of connected peers from a set of [nodeIDs]. // // - [nodeIDs] the IDs of the peers that should be returned if they are @@ -973,13 +789,12 @@ func (n *network) disconnectedFromConnecting(nodeID ids.NodeID) { // The peer that is disconnecting from us didn't finish the handshake tracked, ok := n.trackedIPs[nodeID] if ok { - if n.WantsConnection(nodeID) { + if n.ipTracker.WantsConnection(nodeID) { tracked := tracked.trackNewIP(tracked.ip) n.trackedIPs[nodeID] = tracked n.dial(nodeID, tracked) } else { tracked.stopTracking() - delete(n.peerIPs, nodeID) delete(n.trackedIPs, nodeID) } } @@ -988,6 +803,7 @@ func (n *network) disconnectedFromConnecting(nodeID ids.NodeID) { } func (n *network) disconnectedFromConnected(peer peer.Peer, nodeID ids.NodeID) { + n.ipTracker.Disconnected(nodeID) n.router.Disconnected(nodeID) n.peersLock.Lock() @@ -996,73 +812,15 @@ func (n *network) disconnectedFromConnected(peer peer.Peer, nodeID ids.NodeID) { n.connectedPeers.Remove(nodeID) // The peer that is disconnecting from us finished the handshake - if n.WantsConnection(nodeID) { - prevIP := n.peerIPs[nodeID] - tracked := newTrackedIP(prevIP.IPPort) + if ip, wantsConnection := n.ipTracker.GetIP(nodeID); wantsConnection { + tracked := newTrackedIP(ip.IPPort) n.trackedIPs[nodeID] = tracked n.dial(nodeID, tracked) - } else { - delete(n.peerIPs, nodeID) } n.metrics.markDisconnected(peer) } -// ipAuth is a helper struct used to convey information about an -// [*ips.ClaimedIPPort]. -type ipAuth struct { - nodeID ids.NodeID - verified bool -} - -func (n *network) authenticateIPs(ips []*ips.ClaimedIPPort) ([]*ipAuth, error) { - ipAuths := make([]*ipAuth, len(ips)) - for i, ip := range ips { - nodeID, err := peer.StakingCertToID(ip.Cert) - if err != nil { - n.peerConfig.Log.Debug("failed to create nodeID from certificate: %s", - zap.Stringer("nodeID", nodeID), - zap.Error(err), - ) - return nil, err - } - n.peersLock.RLock() - _, _, shouldUpdateOurIP, shouldDial := n.peerIPStatus(nodeID, ip) - n.peersLock.RUnlock() - if !shouldUpdateOurIP && !shouldDial { - ipAuths[i] = &ipAuth{ - nodeID: nodeID, - } - continue - } - - // Verify signature if needed - signedIP := peer.SignedIP{ - UnsignedIP: peer.UnsignedIP{ - IPPort: ip.IPPort, - Timestamp: ip.Timestamp, - }, - Signature: ip.Signature, - } - if err := signedIP.Verify(ip.Cert); err != nil { - return nil, err - } - ipAuths[i] = &ipAuth{ - nodeID: nodeID, - verified: true, - } - } - return ipAuths, nil -} - -// peerIPStatus assumes the caller holds [peersLock] -func (n *network) peerIPStatus(nodeID ids.NodeID, ip *ips.ClaimedIPPort) (*ips.ClaimedIPPort, bool, bool, bool) { - prevIP, previouslyTracked := n.peerIPs[nodeID] - shouldUpdateOurIP := previouslyTracked && prevIP.Timestamp < ip.Timestamp - shouldDial := !previouslyTracked && n.WantsConnection(nodeID) - return prevIP, previouslyTracked, shouldUpdateOurIP, shouldDial -} - // dial will spin up a new goroutine and attempt to establish a connection with // [nodeID] at [ip]. // @@ -1083,6 +841,10 @@ func (n *network) peerIPStatus(nodeID ids.NodeID, ip *ips.ClaimedIPPort) (*ips.C // there is a randomized exponential backoff to avoid spamming connection // attempts. func (n *network) dial(nodeID ids.NodeID, ip *trackedIP) { + n.peerConfig.Log.Verbo("attempting to dial node", + zap.Stringer("nodeID", nodeID), + zap.Stringer("ip", ip.ip), + ) go func() { n.metrics.numTracked.Inc() defer n.metrics.numTracked.Dec() @@ -1105,13 +867,12 @@ func (n *network) dial(nodeID ids.NodeID, ip *trackedIP) { // trackedIPs and this goroutine. This prevents a memory leak when // the tracked nodeID leaves the validator set and is never able to // be connected to. - if !n.WantsConnection(nodeID) { + if !n.ipTracker.WantsConnection(nodeID) { // Typically [n.trackedIPs[nodeID]] will already equal [ip], but // the reference to [ip] is refreshed to avoid any potential // race conditions before removing the entry. if ip, exists := n.trackedIPs[nodeID]; exists { ip.stopTracking() - delete(n.peerIPs, nodeID) delete(n.trackedIPs, nodeID) } n.peersLock.Unlock() @@ -1156,7 +917,7 @@ func (n *network) dial(nodeID ids.NodeID, ip *trackedIP) { n.peerConfig.Log.Verbo("skipping connection dial", zap.String("reason", "outbound connections to private IPs are prohibited"), zap.Stringer("nodeID", nodeID), - zap.Stringer("peerIP", ip.ip.IP), + zap.Stringer("peerIP", ip.ip), zap.Duration("delay", ip.delay), ) continue @@ -1166,7 +927,8 @@ func (n *network) dial(nodeID ids.NodeID, ip *trackedIP) { if err != nil { n.peerConfig.Log.Verbo( "failed to reach peer, attempting again", - zap.Stringer("peerIP", ip.ip.IP), + zap.Stringer("nodeID", nodeID), + zap.Stringer("peerIP", ip.ip), zap.Duration("delay", ip.delay), ) continue @@ -1174,14 +936,16 @@ func (n *network) dial(nodeID ids.NodeID, ip *trackedIP) { n.peerConfig.Log.Verbo("starting to upgrade connection", zap.String("direction", "outbound"), - zap.Stringer("peerIP", ip.ip.IP), + zap.Stringer("nodeID", nodeID), + zap.Stringer("peerIP", ip.ip), ) err = n.upgrade(conn, n.clientUpgrader) if err != nil { n.peerConfig.Log.Verbo( "failed to upgrade, attempting again", - zap.Stringer("peerIP", ip.ip.IP), + zap.Stringer("nodeID", nodeID), + zap.Stringer("peerIP", ip.ip), zap.Duration("delay", ip.delay), ) continue @@ -1285,13 +1049,6 @@ func (n *network) upgrade(conn net.Conn, upgrader peer.Upgrader) error { zap.Stringer("nodeID", nodeID), ) - if !n.gossipTracker.StartTrackingPeer(nodeID) { - n.peerConfig.Log.Error( - "started duplicate peer tracker", - zap.Stringer("nodeID", nodeID), - ) - } - // peer.Start requires there is only ever one peer instance running with the // same [peerConfig.InboundMsgThrottler]. This is guaranteed by the above // de-duplications for [connectingPeers] and [connectedPeers]. @@ -1340,7 +1097,6 @@ func (n *network) StartClose() { for nodeID, tracked := range n.trackedIPs { tracked.stopTracking() - delete(n.peerIPs, nodeID) delete(n.trackedIPs, nodeID) } @@ -1412,10 +1168,13 @@ func (n *network) NodeUptime(subnetID ids.ID) (UptimeResult, error) { } func (n *network) runTimers() { - gossipPeerlists := time.NewTicker(n.config.PeerListGossipFreq) + pushGossipPeerlists := time.NewTicker(n.config.PeerListGossipFreq) + pullGossipPeerlists := time.NewTicker(n.config.PeerListPullGossipFreq) + resetPeerListBloom := time.NewTicker(n.config.PeerListBloomResetFreq) updateUptimes := time.NewTicker(n.config.UptimeMetricFreq) defer func() { - gossipPeerlists.Stop() + pushGossipPeerlists.Stop() + resetPeerListBloom.Stop() updateUptimes.Stop() }() @@ -1423,8 +1182,18 @@ func (n *network) runTimers() { select { case <-n.onCloseCtx.Done(): return - case <-gossipPeerlists.C: - n.gossipPeerLists() + case <-pushGossipPeerlists.C: + n.pushGossipPeerLists() + case <-pullGossipPeerlists.C: + n.pullGossipPeerLists() + case <-resetPeerListBloom.C: + if err := n.ipTracker.ResetBloom(); err != nil { + n.peerConfig.Log.Error("failed to reset ip tracker bloom filter", + zap.Error(err), + ) + } else { + n.peerConfig.Log.Debug("reset ip tracker bloom filter") + } case <-updateUptimes.C: primaryUptime, err := n.NodeUptime(constants.PrimaryNetworkID) if err != nil { @@ -1451,8 +1220,8 @@ func (n *network) runTimers() { } } -// gossipPeerLists gossips validators to peers in the network -func (n *network) gossipPeerLists() { +// pushGossipPeerLists gossips validators to peers in the network +func (n *network) pushGossipPeerLists() { peers := n.samplePeers( constants.PrimaryNetworkID, int(n.config.PeerListValidatorGossipSize), @@ -1466,6 +1235,21 @@ func (n *network) gossipPeerLists() { } } +// pullGossipPeerLists requests validators from peers in the network +func (n *network) pullGossipPeerLists() { + peers := n.samplePeers( + constants.PrimaryNetworkID, + 1, // numValidatorsToSample + 0, // numNonValidatorsToSample + 0, // numPeersToSample + subnets.NoOpAllower, + ) + + for _, p := range peers { + p.StartSendGetPeerList() + } +} + func (n *network) getLastReceived() (time.Time, bool) { lastReceived := atomic.LoadInt64(&n.peerConfig.LastReceived) if lastReceived == 0 { diff --git a/network/network_test.go b/network/network_test.go index 65d4809101fb..79864075c32b 100644 --- a/network/network_test.go +++ b/network/network_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -54,6 +54,8 @@ var ( PeerListNonValidatorGossipSize: 100, PeerListPeersGossipSize: 100, PeerListGossipFreq: time.Second, + PeerListPullGossipFreq: time.Second, + PeerListBloomResetFreq: constants.DefaultNetworkPeerListBloomResetFreq, } defaultTimeoutConfig = TimeoutConfig{ PingPongTimeout: 30 * time.Second, @@ -215,27 +217,16 @@ func newFullyConnectedTestNetwork(t *testing.T, handlers []router.InboundHandler msgCreator := newMessageCreator(t) registry := prometheus.NewRegistry() - g, err := peer.NewGossipTracker(registry, "foobar") - require.NoError(err) - - log := logging.NoLog{} - gossipTrackerCallback := peer.GossipTrackerCallback{ - Log: log, - GossipTracker: g, - } - beacons := validators.NewManager() require.NoError(beacons.AddStaker(constants.PrimaryNetworkID, nodeIDs[0], nil, ids.GenerateTestID(), 1)) vdrs := validators.NewManager() - vdrs.RegisterCallbackListener(constants.PrimaryNetworkID, &gossipTrackerCallback) for _, nodeID := range nodeIDs { require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, nodeID, nil, ids.GenerateTestID(), 1)) } config := config - config.GossipTracker = g config.Beacons = beacons config.Validators = vdrs @@ -244,7 +235,7 @@ func newFullyConnectedTestNetwork(t *testing.T, handlers []router.InboundHandler config, msgCreator, registry, - log, + logging.NoLog{}, listeners[i], dialer, &testHandler{ @@ -405,15 +396,20 @@ func TestTrackVerifiesSignatures(t *testing.T) { nodeID, tlsCert, _ := getTLS(t, 1) require.NoError(network.config.Validators.AddStaker(constants.PrimaryNetworkID, nodeID, nil, ids.Empty, 1)) - _, err := network.Track(ids.EmptyNodeID, []*ips.ClaimedIPPort{{ - Cert: staking.CertificateFromX509(tlsCert.Leaf), - IPPort: ips.IPPort{ - IP: net.IPv4(123, 132, 123, 123), - Port: 10000, - }, - Timestamp: 1000, - Signature: nil, - }}) + stakingCert, err := staking.CertificateFromX509(tlsCert.Leaf) + require.NoError(err) + + err = network.Track([]*ips.ClaimedIPPort{ + ips.NewClaimedIPPort( + stakingCert, + ips.IPPort{ + IP: net.IPv4(123, 132, 123, 123), + Port: 10000, + }, + 1000, // timestamp + nil, // signature + ), + }) // The signature is wrong so this peer tracking info isn't useful. require.ErrorIs(err, rsa.ErrVerification) @@ -437,27 +433,16 @@ func TestTrackDoesNotDialPrivateIPs(t *testing.T) { msgCreator := newMessageCreator(t) registry := prometheus.NewRegistry() - g, err := peer.NewGossipTracker(registry, "foobar") - require.NoError(err) - - log := logging.NoLog{} - gossipTrackerCallback := peer.GossipTrackerCallback{ - Log: log, - GossipTracker: g, - } - beacons := validators.NewManager() require.NoError(beacons.AddStaker(constants.PrimaryNetworkID, nodeIDs[0], nil, ids.GenerateTestID(), 1)) vdrs := validators.NewManager() - vdrs.RegisterCallbackListener(constants.PrimaryNetworkID, &gossipTrackerCallback) for _, nodeID := range nodeIDs { require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, nodeID, nil, ids.GenerateTestID(), 1)) } config := config - config.GossipTracker = g config.Beacons = beacons config.Validators = vdrs config.AllowPrivateIPs = false @@ -466,7 +451,7 @@ func TestTrackDoesNotDialPrivateIPs(t *testing.T) { config, msgCreator, registry, - log, + logging.NoLog{}, listeners[i], dialer, &testHandler{ @@ -532,23 +517,11 @@ func TestDialDeletesNonValidators(t *testing.T) { msgCreator := newMessageCreator(t) registry := prometheus.NewRegistry() - g, err := peer.NewGossipTracker(registry, "foobar") - require.NoError(err) - - log := logging.NoLog{} - gossipTrackerCallback := peer.GossipTrackerCallback{ - Log: log, - GossipTracker: g, - } - beacons := validators.NewManager() require.NoError(beacons.AddStaker(constants.PrimaryNetworkID, nodeIDs[0], nil, ids.GenerateTestID(), 1)) - vdrs.RegisterCallbackListener(constants.PrimaryNetworkID, &gossipTrackerCallback) - config := config - config.GossipTracker = g config.Beacons = beacons config.Validators = vdrs config.AllowPrivateIPs = false @@ -557,7 +530,7 @@ func TestDialDeletesNonValidators(t *testing.T) { config, msgCreator, registry, - log, + logging.NoLog{}, listeners[i], dialer, &testHandler{ @@ -581,16 +554,16 @@ func TestDialDeletesNonValidators(t *testing.T) { wg.Add(len(networks)) for i, net := range networks { if i != 0 { - peerAcks, err := net.Track(config.MyNodeID, []*ips.ClaimedIPPort{{ - Cert: staking.CertificateFromX509(config.TLSConfig.Certificates[0].Leaf), - IPPort: ip.IPPort, - Timestamp: ip.Timestamp, - Signature: ip.Signature, - }}) + stakingCert, err := staking.CertificateFromX509(config.TLSConfig.Certificates[0].Leaf) require.NoError(err) - // peerAcks is empty because we aren't actually connected to - // MyNodeID yet - require.Empty(peerAcks) + require.NoError(net.Track([]*ips.ClaimedIPPort{ + ips.NewClaimedIPPort( + stakingCert, + ip.IPPort, + ip.Timestamp, + ip.Signature, + ), + })) } go func(net Network) { @@ -694,25 +667,14 @@ func TestAllowConnectionAsAValidator(t *testing.T) { msgCreator := newMessageCreator(t) registry := prometheus.NewRegistry() - g, err := peer.NewGossipTracker(registry, "foobar") - require.NoError(err) - - log := logging.NoLog{} - gossipTrackerCallback := peer.GossipTrackerCallback{ - Log: log, - GossipTracker: g, - } - beacons := validators.NewManager() require.NoError(beacons.AddStaker(constants.PrimaryNetworkID, nodeIDs[0], nil, ids.GenerateTestID(), 1)) vdrs := validators.NewManager() - vdrs.RegisterCallbackListener(constants.PrimaryNetworkID, &gossipTrackerCallback) require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, nodeIDs[0], nil, ids.GenerateTestID(), 1)) config := config - config.GossipTracker = g config.Beacons = beacons config.Validators = vdrs config.RequireValidatorToConnect = true @@ -721,7 +683,7 @@ func TestAllowConnectionAsAValidator(t *testing.T) { config, msgCreator, registry, - log, + logging.NoLog{}, listeners[i], dialer, &testHandler{ diff --git a/network/p2p/client.go b/network/p2p/client.go index d950a4b0a227..b506baf9c630 100644 --- a/network/p2p/client.go +++ b/network/p2p/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p2p @@ -14,9 +14,8 @@ import ( ) var ( - ErrAppRequestFailed = errors.New("app request failed") - ErrRequestPending = errors.New("request pending") - ErrNoPeers = errors.New("no peers") + ErrRequestPending = errors.New("request pending") + ErrNoPeers = errors.New("no peers") ) // AppResponseCallback is called upon receiving an AppResponse for an AppRequest @@ -41,11 +40,11 @@ type CrossChainAppResponseCallback func( type Client struct { handlerID uint64 + handlerIDStr string handlerPrefix []byte - router *Router + router *router sender common.AppSender - // nodeSampler is used to select nodes to route AppRequestAny to - nodeSampler NodeSampler + options *clientOptions } // AppRequestAny issues an AppRequest to an arbitrary node decided by Client. @@ -56,7 +55,7 @@ func (c *Client) AppRequestAny( appRequestBytes []byte, onResponse AppResponseCallback, ) error { - sampled := c.nodeSampler.Sample(ctx, 1) + sampled := c.options.nodeSampler.Sample(ctx, 1) if len(sampled) != 1 { return ErrNoPeers } @@ -76,7 +75,7 @@ func (c *Client) AppRequest( c.router.lock.Lock() defer c.router.lock.Unlock() - appRequestBytes = c.prefixMessage(appRequestBytes) + appRequestBytes = PrefixMessage(c.handlerPrefix, appRequestBytes) for nodeID := range nodeIDs { requestID := c.router.requestID if _, ok := c.router.pendingAppRequests[requestID]; ok { @@ -97,8 +96,8 @@ func (c *Client) AppRequest( } c.router.pendingAppRequests[requestID] = pendingAppRequest{ - AppResponseCallback: onResponse, - metrics: c.router.handlers[c.handlerID].metrics, + handlerID: c.handlerIDStr, + callback: onResponse, } c.router.requestID += 2 } @@ -113,7 +112,7 @@ func (c *Client) AppGossip( ) error { return c.sender.SendAppGossip( ctx, - c.prefixMessage(appGossipBytes), + PrefixMessage(c.handlerPrefix, appGossipBytes), ) } @@ -126,7 +125,7 @@ func (c *Client) AppGossipSpecific( return c.sender.SendAppGossipSpecific( ctx, nodeIDs, - c.prefixMessage(appGossipBytes), + PrefixMessage(c.handlerPrefix, appGossipBytes), ) } @@ -154,29 +153,28 @@ func (c *Client) CrossChainAppRequest( ctx, chainID, requestID, - c.prefixMessage(appRequestBytes), + PrefixMessage(c.handlerPrefix, appRequestBytes), ); err != nil { return err } c.router.pendingCrossChainAppRequests[requestID] = pendingCrossChainAppRequest{ - CrossChainAppResponseCallback: onResponse, - metrics: c.router.handlers[c.handlerID].metrics, + handlerID: c.handlerIDStr, + callback: onResponse, } c.router.requestID += 2 return nil } -// prefixMessage prefixes the original message with the handler identifier -// corresponding to this client. +// PrefixMessage prefixes the original message with the protocol identifier. // // Only gossip and request messages need to be prefixed. // Response messages don't need to be prefixed because request ids are tracked // which map to the expected response handler. -func (c *Client) prefixMessage(src []byte) []byte { - messageBytes := make([]byte, len(c.handlerPrefix)+len(src)) - copy(messageBytes, c.handlerPrefix) - copy(messageBytes[len(c.handlerPrefix):], src) +func PrefixMessage(prefix, msg []byte) []byte { + messageBytes := make([]byte, len(prefix)+len(msg)) + copy(messageBytes, prefix) + copy(messageBytes[len(prefix):], msg) return messageBytes } diff --git a/network/p2p/gossip/bloom.go b/network/p2p/gossip/bloom.go index 5b1c6bc8c390..14d7ece6db85 100644 --- a/network/p2p/gossip/bloom.go +++ b/network/p2p/gossip/bloom.go @@ -1,130 +1,132 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gossip import ( "crypto/rand" - "encoding/binary" - "hash" - bloomfilter "github.com/holiman/bloomfilter/v2" + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bloom" + "github.com/ava-labs/avalanchego/utils/math" ) -var _ hash.Hash64 = (*hasher)(nil) - -// NewBloomFilter returns a new instance of a bloom filter with at most -// [maxExpectedElements] elements anticipated at any moment, and a false -// positive probability of [falsePositiveProbability]. +// NewBloomFilter returns a new instance of a bloom filter with at least [minTargetElements] elements +// anticipated at any moment, and a false positive probability of [targetFalsePositiveProbability]. If the +// false positive probability exceeds [resetFalsePositiveProbability], the bloom filter will be reset. +// +// Invariant: The returned bloom filter is not safe to reset concurrently with +// other operations. However, it is otherwise safe to access concurrently. func NewBloomFilter( - maxExpectedElements uint64, - falsePositiveProbability float64, + registerer prometheus.Registerer, + namespace string, + minTargetElements int, + targetFalsePositiveProbability, + resetFalsePositiveProbability float64, ) (*BloomFilter, error) { - bloom, err := bloomfilter.NewOptimal( - maxExpectedElements, - falsePositiveProbability, - ) + metrics, err := bloom.NewMetrics(namespace, registerer) if err != nil { return nil, err } + filter := &BloomFilter{ + minTargetElements: minTargetElements, + targetFalsePositiveProbability: targetFalsePositiveProbability, + resetFalsePositiveProbability: resetFalsePositiveProbability, - salt, err := randomSalt() - return &BloomFilter{ - Bloom: bloom, - Salt: salt, - }, err + metrics: metrics, + } + err = resetBloomFilter( + filter, + minTargetElements, + targetFalsePositiveProbability, + resetFalsePositiveProbability, + ) + return filter, err } type BloomFilter struct { - Bloom *bloomfilter.Filter - // Salt is provided to eventually unblock collisions in Bloom. It's possible + minTargetElements int + targetFalsePositiveProbability float64 + resetFalsePositiveProbability float64 + + metrics *bloom.Metrics + + maxCount int + bloom *bloom.Filter + // salt is provided to eventually unblock collisions in Bloom. It's possible // that conflicting Gossipable items collide in the bloom filter, so a salt // is generated to eventually resolve collisions. - Salt ids.ID + salt ids.ID } func (b *BloomFilter) Add(gossipable Gossipable) { - h := gossipable.GetID() - salted := &hasher{ - hash: h[:], - salt: b.Salt, - } - b.Bloom.Add(salted) + h := gossipable.GossipID() + bloom.Add(b.bloom, h[:], b.salt[:]) + b.metrics.Count.Inc() } func (b *BloomFilter) Has(gossipable Gossipable) bool { - h := gossipable.GetID() - salted := &hasher{ - hash: h[:], - salt: b.Salt, - } - return b.Bloom.Contains(salted) + h := gossipable.GossipID() + return bloom.Contains(b.bloom, h[:], b.salt[:]) } -// ResetBloomFilterIfNeeded resets a bloom filter if it breaches a target false -// positive probability. Returns true if the bloom filter was reset. +func (b *BloomFilter) Marshal() ([]byte, []byte) { + bloomBytes := b.bloom.Marshal() + // salt must be copied here to ensure the bytes aren't overwritten if salt + // is later modified. + salt := b.salt + return bloomBytes, salt[:] +} + +// ResetBloomFilterIfNeeded resets a bloom filter if it breaches [targetFalsePositiveProbability]. +// +// If [targetElements] exceeds [minTargetElements], the size of the bloom filter will grow to maintain +// the same [targetFalsePositiveProbability]. +// +// Returns true if the bloom filter was reset. func ResetBloomFilterIfNeeded( bloomFilter *BloomFilter, - falsePositiveProbability float64, + targetElements int, ) (bool, error) { - if bloomFilter.Bloom.FalsePosititveProbability() < falsePositiveProbability { + if bloomFilter.bloom.Count() <= bloomFilter.maxCount { return false, nil } - newBloom, err := bloomfilter.New(bloomFilter.Bloom.M(), bloomFilter.Bloom.K()) - if err != nil { - return false, err - } - salt, err := randomSalt() - if err != nil { - return false, err - } - - bloomFilter.Bloom = newBloom - bloomFilter.Salt = salt - return true, nil -} - -func randomSalt() (ids.ID, error) { - salt := ids.ID{} - _, err := rand.Read(salt[:]) - return salt, err -} - -type hasher struct { - hash []byte - salt ids.ID -} - -func (h *hasher) Write(p []byte) (n int, err error) { - h.hash = append(h.hash, p...) - return len(p), nil -} - -func (h *hasher) Sum(b []byte) []byte { - h.hash = append(h.hash, b...) - return h.hash -} - -func (h *hasher) Reset() { - h.hash = ids.Empty[:] -} - -func (*hasher) BlockSize() int { - return ids.IDLen + targetElements = math.Max(bloomFilter.minTargetElements, targetElements) + err := resetBloomFilter( + bloomFilter, + targetElements, + bloomFilter.targetFalsePositiveProbability, + bloomFilter.resetFalsePositiveProbability, + ) + return err == nil, err } -func (h *hasher) Sum64() uint64 { - salted := ids.ID{} - for i := 0; i < len(h.hash) && i < ids.IDLen; i++ { - salted[i] = h.hash[i] ^ h.salt[i] +func resetBloomFilter( + bloomFilter *BloomFilter, + targetElements int, + targetFalsePositiveProbability, + resetFalsePositiveProbability float64, +) error { + numHashes, numEntries := bloom.OptimalParameters( + targetElements, + targetFalsePositiveProbability, + ) + newBloom, err := bloom.New(numHashes, numEntries) + if err != nil { + return err + } + var newSalt ids.ID + if _, err := rand.Read(newSalt[:]); err != nil { + return err } - return binary.BigEndian.Uint64(salted[:]) -} + bloomFilter.maxCount = bloom.EstimateCount(numHashes, numEntries, resetFalsePositiveProbability) + bloomFilter.bloom = newBloom + bloomFilter.salt = newSalt -func (h *hasher) Size() int { - return len(h.hash) + bloomFilter.metrics.Reset(newBloom, bloomFilter.maxCount) + return nil } diff --git a/network/p2p/gossip/bloom_test.go b/network/p2p/gossip/bloom_test.go index 860d2d5e936e..00f75165b467 100644 --- a/network/p2p/gossip/bloom_test.go +++ b/network/p2p/gossip/bloom_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gossip @@ -6,39 +6,73 @@ package gossip import ( "testing" - bloomfilter "github.com/holiman/bloomfilter/v2" - "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/ava-labs/avalanchego/ids" ) func TestBloomFilterRefresh(t *testing.T) { tests := []struct { - name string - falsePositiveProbability float64 - add []*testTx - expected []*testTx + name string + minTargetElements int + targetFalsePositiveProbability float64 + resetFalsePositiveProbability float64 + resetCount uint64 + add []*testTx + expected []*testTx }{ { - name: "no refresh", - falsePositiveProbability: 1, + name: "no refresh", + minTargetElements: 1, + targetFalsePositiveProbability: 0.01, + resetFalsePositiveProbability: 1, + resetCount: 0, // maxCount = 9223372036854775807 add: []*testTx{ {id: ids.ID{0}}, + {id: ids.ID{1}}, + {id: ids.ID{2}}, }, expected: []*testTx{ {id: ids.ID{0}}, + {id: ids.ID{1}}, + {id: ids.ID{2}}, }, }, { - name: "refresh", - falsePositiveProbability: 0.1, + name: "refresh", + minTargetElements: 1, + targetFalsePositiveProbability: 0.01, + resetFalsePositiveProbability: 0.0000000000000001, // maxCount = 1 + resetCount: 1, add: []*testTx{ {id: ids.ID{0}}, {id: ids.ID{1}}, + {id: ids.ID{2}}, }, expected: []*testTx{ + {id: ids.ID{2}}, + }, + }, + { + name: "multiple refresh", + minTargetElements: 1, + targetFalsePositiveProbability: 0.01, + resetFalsePositiveProbability: 0.0000000000000001, // maxCount = 1 + resetCount: 2, + add: []*testTx{ + {id: ids.ID{0}}, {id: ids.ID{1}}, + {id: ids.ID{2}}, + {id: ids.ID{3}}, + {id: ids.ID{4}}, + }, + expected: []*testTx{ + {id: ids.ID{4}}, }, }, } @@ -46,20 +80,28 @@ func TestBloomFilterRefresh(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - b, err := bloomfilter.New(10, 1) + bloom, err := NewBloomFilter(prometheus.NewRegistry(), "", tt.minTargetElements, tt.targetFalsePositiveProbability, tt.resetFalsePositiveProbability) require.NoError(err) - bloom := BloomFilter{ - Bloom: b, - } + var resetCount uint64 for _, item := range tt.add { - _, err = ResetBloomFilterIfNeeded(&bloom, tt.falsePositiveProbability) + bloomBytes, saltBytes := bloom.Marshal() + initialBloomBytes := slices.Clone(bloomBytes) + initialSaltBytes := slices.Clone(saltBytes) + + reset, err := ResetBloomFilterIfNeeded(bloom, len(tt.add)) require.NoError(err) + if reset { + resetCount++ + } bloom.Add(item) - } - require.Equal(uint64(len(tt.expected)), bloom.Bloom.N()) + require.Equal(initialBloomBytes, bloomBytes) + require.Equal(initialSaltBytes, saltBytes) + } + require.Equal(tt.resetCount, resetCount) + require.Equal(float64(tt.resetCount+1), testutil.ToFloat64(bloom.metrics.ResetCount)) for _, expected := range tt.expected { require.True(bloom.Has(expected)) } diff --git a/network/p2p/gossip/gossip.go b/network/p2p/gossip/gossip.go index 94d49260da40..ab90e593b5a3 100644 --- a/network/p2p/gossip/gossip.go +++ b/network/p2p/gossip/gossip.go @@ -1,28 +1,49 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gossip import ( "context" + "errors" + "fmt" + "sync" "time" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" - "google.golang.org/protobuf/proto" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" - "github.com/ava-labs/avalanchego/proto/pb/sdk" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/buffer" "github.com/ava-labs/avalanchego/utils/logging" ) +const ( + typeLabel = "type" + pushType = "push" + pullType = "pull" +) + var ( _ Gossiper = (*ValidatorGossiper)(nil) - _ Gossiper = (*PullGossiper[testTx, *testTx])(nil) + _ Gossiper = (*PullGossiper[*testTx])(nil) + _ Gossiper = (*NoOpGossiper)(nil) + _ Gossiper = (*TestGossiper)(nil) + + _ Accumulator[*testTx] = (*PushGossiper[*testTx])(nil) + _ Accumulator[*testTx] = (*NoOpAccumulator[*testTx])(nil) + _ Accumulator[*testTx] = (*TestAccumulator[*testTx])(nil) + + metricLabels = []string{typeLabel} + pushLabels = prometheus.Labels{ + typeLabel: pushType, + } + pullLabels = prometheus.Labels{ + typeLabel: pullType, + } ) // Gossiper gossips Gossipables to other nodes @@ -31,11 +52,11 @@ type Gossiper interface { Gossip(ctx context.Context) error } -// GossipableAny exists to help create non-nil pointers to a concrete Gossipable -// ref: https://stackoverflow.com/questions/69573113/how-can-i-instantiate-a-non-nil-pointer-of-type-argument-with-generic-go -type GossipableAny[T any] interface { - *T - Gossipable +// Accumulator allows a caller to accumulate gossipables to be gossiped +type Accumulator[T Gossipable] interface { + Gossiper + // Add queues gossipables to be gossiped + Add(gossipables ...T) } // ValidatorGossiper only calls [Gossip] if the given node is a validator @@ -46,76 +67,95 @@ type ValidatorGossiper struct { Validators p2p.ValidatorSet } -func (v ValidatorGossiper) Gossip(ctx context.Context) error { - if !v.Validators.Has(ctx, v.NodeID) { - return nil - } - - return v.Gossiper.Gossip(ctx) -} - -type Config struct { - Namespace string - PollSize int +// Metrics that are tracked across a gossip protocol. A given protocol should +// only use a single instance of Metrics. +type Metrics struct { + sentCount *prometheus.CounterVec + sentBytes *prometheus.CounterVec + receivedCount *prometheus.CounterVec + receivedBytes *prometheus.CounterVec } -func NewPullGossiper[T any, U GossipableAny[T]]( - config Config, - log logging.Logger, - set Set[U], - client *p2p.Client, +// NewMetrics returns a common set of metrics +func NewMetrics( metrics prometheus.Registerer, -) (*PullGossiper[T, U], error) { - p := &PullGossiper[T, U]{ - config: config, - log: log, - set: set, - client: client, - receivedN: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: config.Namespace, - Name: "gossip_received_n", + namespace string, +) (Metrics, error) { + m := Metrics{ + sentCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "gossip_sent_count", + Help: "amount of gossip sent (n)", + }, metricLabels), + sentBytes: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "gossip_sent_bytes", + Help: "amount of gossip sent (bytes)", + }, metricLabels), + receivedCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "gossip_received_count", Help: "amount of gossip received (n)", - }), - receivedBytes: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: config.Namespace, + }, metricLabels), + receivedBytes: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, Name: "gossip_received_bytes", Help: "amount of gossip received (bytes)", - }), + }, metricLabels), } - err := utils.Err( - metrics.Register(p.receivedN), - metrics.Register(p.receivedBytes), + metrics.Register(m.sentCount), + metrics.Register(m.sentBytes), + metrics.Register(m.receivedCount), + metrics.Register(m.receivedBytes), ) - return p, err + return m, err } -type PullGossiper[T any, U GossipableAny[T]] struct { - config Config - log logging.Logger - set Set[U] - client *p2p.Client - receivedN prometheus.Counter - receivedBytes prometheus.Counter +func (v ValidatorGossiper) Gossip(ctx context.Context) error { + if !v.Validators.Has(ctx, v.NodeID) { + return nil + } + + return v.Gossiper.Gossip(ctx) } -func (p *PullGossiper[_, _]) Gossip(ctx context.Context) error { - bloom, salt, err := p.set.GetFilter() - if err != nil { - return err +func NewPullGossiper[T Gossipable]( + log logging.Logger, + marshaller Marshaller[T], + set Set[T], + client *p2p.Client, + metrics Metrics, + pollSize int, +) *PullGossiper[T] { + return &PullGossiper[T]{ + log: log, + marshaller: marshaller, + set: set, + client: client, + metrics: metrics, + pollSize: pollSize, } +} - request := &sdk.PullGossipRequest{ - Filter: bloom, - Salt: salt, - } - msgBytes, err := proto.Marshal(request) +type PullGossiper[T Gossipable] struct { + log logging.Logger + marshaller Marshaller[T] + set Set[T] + client *p2p.Client + metrics Metrics + pollSize int +} + +func (p *PullGossiper[_]) Gossip(ctx context.Context) error { + msgBytes, err := MarshalAppRequest(p.set.GetFilter()) if err != nil { return err } - for i := 0; i < p.config.PollSize; i++ { - if err := p.client.AppRequestAny(ctx, msgBytes, p.handleResponse); err != nil { + for i := 0; i < p.pollSize; i++ { + err := p.client.AppRequestAny(ctx, msgBytes, p.handleResponse) + if err != nil && !errors.Is(err, p2p.ErrNoPeers) { return err } } @@ -123,7 +163,7 @@ func (p *PullGossiper[_, _]) Gossip(ctx context.Context) error { return nil } -func (p *PullGossiper[T, U]) handleResponse( +func (p *PullGossiper[_]) handleResponse( _ context.Context, nodeID ids.NodeID, responseBytes []byte, @@ -138,18 +178,18 @@ func (p *PullGossiper[T, U]) handleResponse( return } - response := &sdk.PullGossipResponse{} - if err := proto.Unmarshal(responseBytes, response); err != nil { + gossip, err := ParseAppResponse(responseBytes) + if err != nil { p.log.Debug("failed to unmarshal gossip response", zap.Error(err)) return } receivedBytes := 0 - for _, bytes := range response.Gossip { + for _, bytes := range gossip { receivedBytes += len(bytes) - gossipable := U(new(T)) - if err := gossipable.Unmarshal(bytes); err != nil { + gossipable, err := p.marshaller.UnmarshalGossip(bytes) + if err != nil { p.log.Debug( "failed to unmarshal gossip", zap.Stringer("nodeID", nodeID), @@ -158,7 +198,7 @@ func (p *PullGossiper[T, U]) handleResponse( continue } - hash := gossipable.GetID() + hash := gossipable.GossipID() p.log.Debug( "received gossip", zap.Stringer("nodeID", nodeID), @@ -175,8 +215,101 @@ func (p *PullGossiper[T, U]) handleResponse( } } - p.receivedN.Add(float64(len(response.Gossip))) - p.receivedBytes.Add(float64(receivedBytes)) + receivedCountMetric, err := p.metrics.receivedCount.GetMetricWith(pullLabels) + if err != nil { + p.log.Error("failed to get received count metric", zap.Error(err)) + return + } + + receivedBytesMetric, err := p.metrics.receivedBytes.GetMetricWith(pullLabels) + if err != nil { + p.log.Error("failed to get received bytes metric", zap.Error(err)) + return + } + + receivedCountMetric.Add(float64(len(gossip))) + receivedBytesMetric.Add(float64(receivedBytes)) +} + +// NewPushGossiper returns an instance of PushGossiper +func NewPushGossiper[T Gossipable](marshaller Marshaller[T], client *p2p.Client, metrics Metrics, targetGossipSize int) *PushGossiper[T] { + return &PushGossiper[T]{ + marshaller: marshaller, + client: client, + metrics: metrics, + targetGossipSize: targetGossipSize, + pending: buffer.NewUnboundedDeque[T](0), + } +} + +// PushGossiper broadcasts gossip to peers randomly in the network +type PushGossiper[T Gossipable] struct { + marshaller Marshaller[T] + client *p2p.Client + metrics Metrics + targetGossipSize int + + lock sync.Mutex + pending buffer.Deque[T] +} + +// Gossip flushes any queued gossipables +func (p *PushGossiper[T]) Gossip(ctx context.Context) error { + p.lock.Lock() + defer p.lock.Unlock() + + if p.pending.Len() == 0 { + return nil + } + + sentBytes := 0 + gossip := make([][]byte, 0, p.pending.Len()) + for sentBytes < p.targetGossipSize { + gossipable, ok := p.pending.PeekLeft() + if !ok { + break + } + + bytes, err := p.marshaller.MarshalGossip(gossipable) + if err != nil { + // remove this item so we don't get stuck in a loop + _, _ = p.pending.PopLeft() + return err + } + + gossip = append(gossip, bytes) + sentBytes += len(bytes) + p.pending.PopLeft() + } + + msgBytes, err := MarshalAppGossip(gossip) + if err != nil { + return err + } + + sentCountMetric, err := p.metrics.sentCount.GetMetricWith(pushLabels) + if err != nil { + return fmt.Errorf("failed to get sent count metric: %w", err) + } + + sentBytesMetric, err := p.metrics.sentBytes.GetMetricWith(pushLabels) + if err != nil { + return fmt.Errorf("failed to get sent bytes metric: %w", err) + } + + sentCountMetric.Add(float64(len(gossip))) + sentBytesMetric.Add(float64(sentBytes)) + + return p.client.AppGossip(ctx, msgBytes) +} + +func (p *PushGossiper[T]) Add(gossipables ...T) { + p.lock.Lock() + defer p.lock.Unlock() + + for _, gossipable := range gossipables { + p.pending.PushRight(gossipable) + } } // Every calls [Gossip] every [frequency] amount of time. @@ -196,3 +329,46 @@ func Every(ctx context.Context, log logging.Logger, gossiper Gossiper, frequency } } } + +type NoOpGossiper struct{} + +func (NoOpGossiper) Gossip(context.Context) error { + return nil +} + +type NoOpAccumulator[T Gossipable] struct{} + +func (NoOpAccumulator[_]) Gossip(context.Context) error { + return nil +} + +func (NoOpAccumulator[T]) Add(...T) {} + +type TestGossiper struct { + GossipF func(ctx context.Context) error +} + +func (t *TestGossiper) Gossip(ctx context.Context) error { + return t.GossipF(ctx) +} + +type TestAccumulator[T Gossipable] struct { + GossipF func(ctx context.Context) error + AddF func(...T) +} + +func (t TestAccumulator[T]) Gossip(ctx context.Context) error { + if t.GossipF == nil { + return nil + } + + return t.GossipF(ctx) +} + +func (t TestAccumulator[T]) Add(gossipables ...T) { + if t.AddF == nil { + return + } + + t.AddF(gossipables...) +} diff --git a/network/p2p/gossip/gossip_test.go b/network/p2p/gossip/gossip_test.go index eb4b23ecd9c8..a58d98f8fe58 100644 --- a/network/p2p/gossip/gossip_test.go +++ b/network/p2p/gossip/gossip_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gossip @@ -11,34 +11,30 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + + "google.golang.org/protobuf/proto" - "go.uber.org/mock/gomock" + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/proto/pb/sdk" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" ) -var ( - _ p2p.ValidatorSet = (*testValidatorSet)(nil) - _ Gossiper = (*testGossiper)(nil) -) - -func TestGossiperShutdown(t *testing.T) { - require := require.New(t) - - metrics := prometheus.NewRegistry() - gossiper, err := NewPullGossiper[testTx]( - Config{}, +func TestGossiperShutdown(*testing.T) { + gossiper := NewPullGossiper[*testTx]( logging.NoLog{}, nil, nil, - metrics, + nil, + Metrics{}, + 0, ) - require.NoError(err) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} @@ -56,7 +52,7 @@ func TestGossiperShutdown(t *testing.T) { func TestGossiperGossip(t *testing.T) { tests := []struct { name string - config HandlerConfig + targetResponseSize int requester []*testTx // what we have responder []*testTx // what the peer we're requesting gossip from has expectedPossibleValues []*testTx // possible values we can have @@ -66,48 +62,38 @@ func TestGossiperGossip(t *testing.T) { name: "no gossip - no one knows anything", }, { - name: "no gossip - requester knows more than responder", - config: HandlerConfig{ - TargetResponseSize: 1024, - }, + name: "no gossip - requester knows more than responder", + targetResponseSize: 1024, requester: []*testTx{{id: ids.ID{0}}}, expectedPossibleValues: []*testTx{{id: ids.ID{0}}}, expectedLen: 1, }, { - name: "no gossip - requester knows everything responder knows", - config: HandlerConfig{ - TargetResponseSize: 1024, - }, + name: "no gossip - requester knows everything responder knows", + targetResponseSize: 1024, requester: []*testTx{{id: ids.ID{0}}}, responder: []*testTx{{id: ids.ID{0}}}, expectedPossibleValues: []*testTx{{id: ids.ID{0}}}, expectedLen: 1, }, { - name: "gossip - requester knows nothing", - config: HandlerConfig{ - TargetResponseSize: 1024, - }, + name: "gossip - requester knows nothing", + targetResponseSize: 1024, responder: []*testTx{{id: ids.ID{0}}}, expectedPossibleValues: []*testTx{{id: ids.ID{0}}}, expectedLen: 1, }, { - name: "gossip - requester knows less than responder", - config: HandlerConfig{ - TargetResponseSize: 1024, - }, + name: "gossip - requester knows less than responder", + targetResponseSize: 1024, requester: []*testTx{{id: ids.ID{0}}}, responder: []*testTx{{id: ids.ID{0}}, {id: ids.ID{1}}}, expectedPossibleValues: []*testTx{{id: ids.ID{0}}, {id: ids.ID{1}}}, expectedLen: 2, }, { - name: "gossip - target response size exceeded", - config: HandlerConfig{ - TargetResponseSize: 32, - }, + name: "gossip - target response size exceeded", + targetResponseSize: 32, responder: []*testTx{{id: ids.ID{0}}, {id: ids.ID{1}}, {id: ids.ID{2}}}, expectedPossibleValues: []*testTx{{id: ids.ID{0}}, {id: ids.ID{1}}, {id: ids.ID{2}}}, expectedLen: 2, @@ -117,67 +103,66 @@ func TestGossiperGossip(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) + ctx := context.Background() + + responseSender := &common.FakeSender{ + SentAppResponse: make(chan []byte, 1), + } + responseNetwork, err := p2p.NewNetwork(logging.NoLog{}, responseSender, prometheus.NewRegistry(), "") + require.NoError(err) - responseSender := common.NewMockSender(ctrl) - responseRouter := p2p.NewRouter(logging.NoLog{}, responseSender, prometheus.NewRegistry(), "") - responseBloom, err := NewBloomFilter(1000, 0.01) + responseBloom, err := NewBloomFilter(prometheus.NewRegistry(), "", 1000, 0.01, 0.05) require.NoError(err) - responseSet := testSet{ - set: set.Set[*testTx]{}, + responseSet := &testSet{ + txs: make(map[ids.ID]*testTx), bloom: responseBloom, } for _, item := range tt.responder { require.NoError(responseSet.Add(item)) } - peers := &p2p.Peers{} - require.NoError(peers.Connected(context.Background(), ids.EmptyNodeID, nil)) - handler, err := NewHandler[*testTx](responseSet, tt.config, prometheus.NewRegistry()) + metrics, err := NewMetrics(prometheus.NewRegistry(), "") require.NoError(err) - _, err = responseRouter.RegisterAppProtocol(0x0, handler, peers) + marshaller := testMarshaller{} + handler := NewHandler[*testTx]( + logging.NoLog{}, + marshaller, + NoOpAccumulator[*testTx]{}, + responseSet, + metrics, + tt.targetResponseSize, + ) + require.NoError(err) + require.NoError(responseNetwork.AddHandler(0x0, handler)) + + requestSender := &common.FakeSender{ + SentAppRequest: make(chan []byte, 1), + } + + requestNetwork, err := p2p.NewNetwork(logging.NoLog{}, requestSender, prometheus.NewRegistry(), "") require.NoError(err) + require.NoError(requestNetwork.Connected(context.Background(), ids.EmptyNodeID, nil)) - requestSender := common.NewMockSender(ctrl) - requestRouter := p2p.NewRouter(logging.NoLog{}, requestSender, prometheus.NewRegistry(), "") - - gossiped := make(chan struct{}) - requestSender.EXPECT().SendAppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, request []byte) { - go func() { - require.NoError(responseRouter.AppRequest(ctx, ids.EmptyNodeID, requestID, time.Time{}, request)) - }() - }).AnyTimes() - - responseSender.EXPECT(). - SendAppResponse(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) { - require.NoError(requestRouter.AppResponse(ctx, nodeID, requestID, appResponseBytes)) - close(gossiped) - }).AnyTimes() - - bloom, err := NewBloomFilter(1000, 0.01) + bloom, err := NewBloomFilter(prometheus.NewRegistry(), "", 1000, 0.01, 0.05) require.NoError(err) - requestSet := testSet{ - set: set.Set[*testTx]{}, + requestSet := &testSet{ + txs: make(map[ids.ID]*testTx), bloom: bloom, } for _, item := range tt.requester { require.NoError(requestSet.Add(item)) } - requestClient, err := requestRouter.RegisterAppProtocol(0x0, nil, peers) - require.NoError(err) + requestClient := requestNetwork.NewClient(0x0) - config := Config{ - PollSize: 1, - } - gossiper, err := NewPullGossiper[testTx, *testTx]( - config, + require.NoError(err) + gossiper := NewPullGossiper[*testTx]( logging.NoLog{}, + marshaller, requestSet, requestClient, - prometheus.NewRegistry(), + metrics, + 1, ) require.NoError(err) received := set.Set[*testTx]{} @@ -185,11 +170,12 @@ func TestGossiperGossip(t *testing.T) { received.Add(tx) } - require.NoError(gossiper.Gossip(context.Background())) - <-gossiped + require.NoError(gossiper.Gossip(ctx)) + require.NoError(responseNetwork.AppRequest(ctx, ids.EmptyNodeID, 1, time.Time{}, <-requestSender.SentAppRequest)) + require.NoError(requestNetwork.AppResponse(ctx, ids.EmptyNodeID, 1, <-responseSender.SentAppResponse)) - require.Len(requestSet.set, tt.expectedLen) - require.Subset(tt.expectedPossibleValues, requestSet.set.List()) + require.Len(requestSet.txs, tt.expectedLen) + require.Subset(tt.expectedPossibleValues, maps.Values(requestSet.txs)) // we should not receive anything that we already had before we // requested the gossip @@ -203,8 +189,8 @@ func TestGossiperGossip(t *testing.T) { func TestEvery(*testing.T) { ctx, cancel := context.WithCancel(context.Background()) calls := 0 - gossiper := &testGossiper{ - gossipF: func(context.Context) error { + gossiper := &TestGossiper{ + GossipF: func(context.Context) error { if calls >= 10 { cancel() return nil @@ -230,8 +216,8 @@ func TestValidatorGossiper(t *testing.T) { calls := 0 gossiper := ValidatorGossiper{ - Gossiper: &testGossiper{ - gossipF: func(context.Context) error { + Gossiper: &TestGossiper{ + GossipF: func(context.Context) error { calls++ return nil }, @@ -250,12 +236,225 @@ func TestValidatorGossiper(t *testing.T) { require.Equal(2, calls) } -type testGossiper struct { - gossipF func(ctx context.Context) error +// Tests that the outgoing gossip is equivalent to what was accumulated +func TestPushGossiper(t *testing.T) { + tests := []struct { + name string + cycles [][]*testTx + }{ + { + name: "single cycle", + cycles: [][]*testTx{ + { + &testTx{ + id: ids.ID{0}, + }, + &testTx{ + id: ids.ID{1}, + }, + &testTx{ + id: ids.ID{2}, + }, + }, + }, + }, + { + name: "multiple cycles", + cycles: [][]*testTx{ + { + &testTx{ + id: ids.ID{0}, + }, + }, + { + &testTx{ + id: ids.ID{1}, + }, + &testTx{ + id: ids.ID{2}, + }, + }, + { + &testTx{ + id: ids.ID{3}, + }, + &testTx{ + id: ids.ID{4}, + }, + &testTx{ + id: ids.ID{5}, + }, + }, + { + &testTx{ + id: ids.ID{6}, + }, + &testTx{ + id: ids.ID{7}, + }, + &testTx{ + id: ids.ID{8}, + }, + &testTx{ + id: ids.ID{9}, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sender := &common.FakeSender{ + SentAppGossip: make(chan []byte, 1), + } + network, err := p2p.NewNetwork( + logging.NoLog{}, + sender, + prometheus.NewRegistry(), + "", + ) + require.NoError(err) + client := network.NewClient(0) + metrics, err := NewMetrics(prometheus.NewRegistry(), "") + require.NoError(err) + marshaller := testMarshaller{} + gossiper := NewPushGossiper[*testTx]( + marshaller, + client, + metrics, + units.MiB, + ) + + for _, gossipables := range tt.cycles { + gossiper.Add(gossipables...) + require.NoError(gossiper.Gossip(ctx)) + + want := &sdk.PushGossip{ + Gossip: make([][]byte, 0, len(tt.cycles)), + } + + for _, gossipable := range gossipables { + bytes, err := marshaller.MarshalGossip(gossipable) + require.NoError(err) + + want.Gossip = append(want.Gossip, bytes) + } + + // remove the handler prefix + sentMsg := <-sender.SentAppGossip + got := &sdk.PushGossip{} + require.NoError(proto.Unmarshal(sentMsg[1:], got)) + + require.Equal(want.Gossip, got.Gossip) + } + }) + } } -func (t *testGossiper) Gossip(ctx context.Context) error { - return t.gossipF(ctx) +// Tests that gossip to a peer should forward the gossip if it was not +// previously known +func TestPushGossipE2E(t *testing.T) { + require := require.New(t) + + // tx known by both the sender and the receiver which should not be + // forwarded + knownTx := &testTx{id: ids.GenerateTestID()} + + log := logging.NoLog{} + bloom, err := NewBloomFilter(prometheus.NewRegistry(), "", 100, 0.01, 0.05) + require.NoError(err) + set := &testSet{ + txs: make(map[ids.ID]*testTx), + bloom: bloom, + } + require.NoError(set.Add(knownTx)) + + forwarder := &common.FakeSender{ + SentAppGossip: make(chan []byte, 1), + } + forwarderNetwork, err := p2p.NewNetwork(log, forwarder, prometheus.NewRegistry(), "") + require.NoError(err) + handlerID := uint64(123) + client := forwarderNetwork.NewClient(handlerID) + + metrics, err := NewMetrics(prometheus.NewRegistry(), "") + require.NoError(err) + marshaller := testMarshaller{} + forwarderGossiper := NewPushGossiper[*testTx]( + marshaller, + client, + metrics, + units.MiB, + ) + + handler := NewHandler[*testTx]( + log, + marshaller, + forwarderGossiper, + set, + metrics, + 0, + ) + require.NoError(err) + require.NoError(forwarderNetwork.AddHandler(handlerID, handler)) + + issuer := &common.FakeSender{ + SentAppGossip: make(chan []byte, 1), + } + issuerNetwork, err := p2p.NewNetwork(log, issuer, prometheus.NewRegistry(), "") + require.NoError(err) + issuerClient := issuerNetwork.NewClient(handlerID) + require.NoError(err) + issuerGossiper := NewPushGossiper[*testTx]( + marshaller, + issuerClient, + metrics, + units.MiB, + ) + + want := []*testTx{ + {id: ids.GenerateTestID()}, + {id: ids.GenerateTestID()}, + {id: ids.GenerateTestID()}, + } + + // gossip both some unseen txs and one the receiver already knows about + var gossiped []*testTx + gossiped = append(gossiped, want...) + gossiped = append(gossiped, knownTx) + + issuerGossiper.Add(gossiped...) + addedToSet := make([]*testTx, 0, len(want)) + set.onAdd = func(tx *testTx) { + addedToSet = append(addedToSet, tx) + } + + ctx := context.Background() + require.NoError(issuerGossiper.Gossip(ctx)) + + // make sure that we only add new txs someone gossips to us + require.NoError(forwarderNetwork.AppGossip(ctx, ids.EmptyNodeID, <-issuer.SentAppGossip)) + require.Equal(want, addedToSet) + + // make sure that we only forward txs we have not already seen before + forwardedBytes := <-forwarder.SentAppGossip + forwardedMsg := &sdk.PushGossip{} + require.NoError(proto.Unmarshal(forwardedBytes[1:], forwardedMsg)) + require.Len(forwardedMsg.Gossip, len(want)) + + gotForwarded := make([]*testTx, 0, len(addedToSet)) + + for _, bytes := range forwardedMsg.Gossip { + tx, err := marshaller.UnmarshalGossip(bytes) + require.NoError(err) + gotForwarded = append(gotForwarded, tx) + } + + require.Equal(want, gotForwarded) } type testValidatorSet struct { diff --git a/network/p2p/gossip/gossipable.go b/network/p2p/gossip/gossipable.go index 84c37e2d6b84..238c62b4641c 100644 --- a/network/p2p/gossip/gossipable.go +++ b/network/p2p/gossip/gossipable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gossip @@ -7,18 +7,23 @@ import "github.com/ava-labs/avalanchego/ids" // Gossipable is an item that can be gossiped across the network type Gossipable interface { - GetID() ids.ID - Marshal() ([]byte, error) - Unmarshal(bytes []byte) error + GossipID() ids.ID +} + +// Marshaller handles parsing logic for a concrete Gossipable type +type Marshaller[T Gossipable] interface { + MarshalGossip(T) ([]byte, error) + UnmarshalGossip([]byte) (T, error) } // Set holds a set of known Gossipable items type Set[T Gossipable] interface { - // Add adds a Gossipable to the set + // Add adds a Gossipable to the set. Returns an error if gossipable was not + // added. Add(gossipable T) error // Iterate iterates over elements until [f] returns false Iterate(f func(gossipable T) bool) // GetFilter returns the byte representation of bloom filter and its // corresponding salt. - GetFilter() (bloom []byte, salt []byte, err error) + GetFilter() (bloom []byte, salt []byte) } diff --git a/network/p2p/gossip/handler.go b/network/p2p/gossip/handler.go index ecaf58434bc2..38e883926366 100644 --- a/network/p2p/gossip/handler.go +++ b/network/p2p/gossip/handler.go @@ -1,102 +1,70 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gossip import ( "context" - "errors" + "fmt" "time" - bloomfilter "github.com/holiman/bloomfilter/v2" - - "github.com/prometheus/client_golang/prometheus" - - "google.golang.org/protobuf/proto" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" - "github.com/ava-labs/avalanchego/proto/pb/sdk" - "github.com/ava-labs/avalanchego/utils" -) - -var ( - _ p2p.Handler = (*Handler[Gossipable])(nil) - - ErrInvalidID = errors.New("invalid id") + "github.com/ava-labs/avalanchego/utils/bloom" + "github.com/ava-labs/avalanchego/utils/logging" ) -type HandlerConfig struct { - Namespace string - TargetResponseSize int -} +var _ p2p.Handler = (*Handler[*testTx])(nil) func NewHandler[T Gossipable]( + log logging.Logger, + marshaller Marshaller[T], + accumulator Accumulator[T], set Set[T], - config HandlerConfig, - metrics prometheus.Registerer, -) (*Handler[T], error) { - h := &Handler[T]{ + metrics Metrics, + targetResponseSize int, +) *Handler[T] { + return &Handler[T]{ Handler: p2p.NoOpHandler{}, + log: log, + marshaller: marshaller, + accumulator: accumulator, set: set, - targetResponseSize: config.TargetResponseSize, - sentN: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: config.Namespace, - Name: "gossip_sent_n", - Help: "amount of gossip sent (n)", - }), - sentBytes: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: config.Namespace, - Name: "gossip_sent_bytes", - Help: "amount of gossip sent (bytes)", - }), + metrics: metrics, + targetResponseSize: targetResponseSize, } - - err := utils.Err( - metrics.Register(h.sentN), - metrics.Register(h.sentBytes), - ) - return h, err } type Handler[T Gossipable] struct { p2p.Handler + marshaller Marshaller[T] + accumulator Accumulator[T] + log logging.Logger set Set[T] + metrics Metrics targetResponseSize int - - sentN prometheus.Counter - sentBytes prometheus.Counter } func (h Handler[T]) AppRequest(_ context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, error) { - request := &sdk.PullGossipRequest{} - if err := proto.Unmarshal(requestBytes, request); err != nil { - return nil, err - } - - salt, err := ids.ToID(request.Salt) + filter, salt, err := ParseAppRequest(requestBytes) if err != nil { return nil, err } - filter := &BloomFilter{ - Bloom: &bloomfilter.Filter{}, - Salt: salt, - } - if err := filter.Bloom.UnmarshalBinary(request.Filter); err != nil { - return nil, err - } - responseSize := 0 gossipBytes := make([][]byte, 0) h.set.Iterate(func(gossipable T) bool { + gossipID := gossipable.GossipID() + // filter out what the requesting peer already knows about - if filter.Has(gossipable) { + if bloom.Contains(filter, gossipID[:], salt[:]) { return true } var bytes []byte - bytes, err = gossipable.Marshal() + bytes, err = h.marshaller.MarshalGossip(gossipable) if err != nil { return false } @@ -113,12 +81,72 @@ func (h Handler[T]) AppRequest(_ context.Context, _ ids.NodeID, _ time.Time, req return nil, err } - response := &sdk.PullGossipResponse{ - Gossip: gossipBytes, + sentCountMetric, err := h.metrics.sentCount.GetMetricWith(pullLabels) + if err != nil { + return nil, fmt.Errorf("failed to get sent count metric: %w", err) + } + + sentBytesMetric, err := h.metrics.sentBytes.GetMetricWith(pullLabels) + if err != nil { + return nil, fmt.Errorf("failed to get sent bytes metric: %w", err) + } + + sentCountMetric.Add(float64(len(gossipBytes))) + sentBytesMetric.Add(float64(responseSize)) + + return MarshalAppResponse(gossipBytes) +} + +func (h Handler[_]) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { + gossip, err := ParseAppGossip(gossipBytes) + if err != nil { + h.log.Debug("failed to unmarshal gossip", zap.Error(err)) + return + } + + receivedBytes := 0 + for _, bytes := range gossip { + receivedBytes += len(bytes) + gossipable, err := h.marshaller.UnmarshalGossip(bytes) + if err != nil { + h.log.Debug("failed to unmarshal gossip", + zap.Stringer("nodeID", nodeID), + zap.Error(err), + ) + continue + } + + if err := h.set.Add(gossipable); err != nil { + h.log.Debug( + "failed to add gossip to the known set", + zap.Stringer("nodeID", nodeID), + zap.Stringer("id", gossipable.GossipID()), + zap.Error(err), + ) + continue + } + + // continue gossiping messages we have not seen to other peers + h.accumulator.Add(gossipable) + } + + if err := h.accumulator.Gossip(ctx); err != nil { + h.log.Error("failed to forward gossip", zap.Error(err)) + return } - h.sentN.Add(float64(len(response.Gossip))) - h.sentBytes.Add(float64(responseSize)) + receivedCountMetric, err := h.metrics.receivedCount.GetMetricWith(pushLabels) + if err != nil { + h.log.Error("failed to get received count metric", zap.Error(err)) + return + } + + receivedBytesMetric, err := h.metrics.receivedBytes.GetMetricWith(pushLabels) + if err != nil { + h.log.Error("failed to get received bytes metric", zap.Error(err)) + return + } - return proto.Marshal(response) + receivedCountMetric.Add(float64(len(gossip))) + receivedBytesMetric.Add(float64(receivedBytes)) } diff --git a/network/p2p/gossip/message.go b/network/p2p/gossip/message.go new file mode 100644 index 000000000000..47e6784e43d8 --- /dev/null +++ b/network/p2p/gossip/message.go @@ -0,0 +1,59 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gossip + +import ( + "google.golang.org/protobuf/proto" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ava-labs/avalanchego/utils/bloom" +) + +func MarshalAppRequest(filter, salt []byte) ([]byte, error) { + request := &sdk.PullGossipRequest{ + Filter: filter, + Salt: salt, + } + return proto.Marshal(request) +} + +func ParseAppRequest(bytes []byte) (*bloom.ReadFilter, ids.ID, error) { + request := &sdk.PullGossipRequest{} + if err := proto.Unmarshal(bytes, request); err != nil { + return nil, ids.Empty, err + } + + salt, err := ids.ToID(request.Salt) + if err != nil { + return nil, ids.Empty, err + } + + filter, err := bloom.Parse(request.Filter) + return filter, salt, err +} + +func MarshalAppResponse(gossip [][]byte) ([]byte, error) { + return proto.Marshal(&sdk.PullGossipResponse{ + Gossip: gossip, + }) +} + +func ParseAppResponse(bytes []byte) ([][]byte, error) { + response := &sdk.PullGossipResponse{} + err := proto.Unmarshal(bytes, response) + return response.Gossip, err +} + +func MarshalAppGossip(gossip [][]byte) ([]byte, error) { + return proto.Marshal(&sdk.PushGossip{ + Gossip: gossip, + }) +} + +func ParseAppGossip(bytes []byte) ([][]byte, error) { + msg := &sdk.PushGossip{} + err := proto.Unmarshal(bytes, msg) + return msg.Gossip, err +} diff --git a/network/p2p/gossip/test_gossip.go b/network/p2p/gossip/test_gossip.go index ba114adf3774..03098399462e 100644 --- a/network/p2p/gossip/test_gossip.go +++ b/network/p2p/gossip/test_gossip.go @@ -1,44 +1,53 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gossip import ( + "fmt" + "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" ) var ( - _ Gossipable = (*testTx)(nil) - _ Set[*testTx] = (*testSet)(nil) + _ Gossipable = (*testTx)(nil) + _ Set[*testTx] = (*testSet)(nil) + _ Marshaller[*testTx] = (*testMarshaller)(nil) ) type testTx struct { id ids.ID } -func (t *testTx) GetID() ids.ID { +func (t *testTx) GossipID() ids.ID { return t.id } -func (t *testTx) Marshal() ([]byte, error) { - return t.id[:], nil +type testMarshaller struct{} + +func (testMarshaller) MarshalGossip(tx *testTx) ([]byte, error) { + return tx.id[:], nil } -func (t *testTx) Unmarshal(bytes []byte) error { - t.id = ids.ID{} - copy(t.id[:], bytes) - return nil +func (testMarshaller) UnmarshalGossip(bytes []byte) (*testTx, error) { + id, err := ids.ToID(bytes) + return &testTx{ + id: id, + }, err } type testSet struct { - set set.Set[*testTx] + txs map[ids.ID]*testTx bloom *BloomFilter onAdd func(tx *testTx) } -func (t testSet) Add(gossipable *testTx) error { - t.set.Add(gossipable) +func (t *testSet) Add(gossipable *testTx) error { + if _, ok := t.txs[gossipable.id]; ok { + return fmt.Errorf("%s already present", gossipable.id) + } + + t.txs[gossipable.id] = gossipable t.bloom.Add(gossipable) if t.onAdd != nil { t.onAdd(gossipable) @@ -47,15 +56,14 @@ func (t testSet) Add(gossipable *testTx) error { return nil } -func (t testSet) Iterate(f func(gossipable *testTx) bool) { - for tx := range t.set { +func (t *testSet) Iterate(f func(gossipable *testTx) bool) { + for _, tx := range t.txs { if !f(tx) { return } } } -func (t testSet) GetFilter() ([]byte, []byte, error) { - bloom, err := t.bloom.Bloom.MarshalBinary() - return bloom, t.bloom.Salt[:], err +func (t *testSet) GetFilter() ([]byte, []byte) { + return t.bloom.Marshal() } diff --git a/network/p2p/handler.go b/network/p2p/handler.go index 790893142087..3ff4de29037e 100644 --- a/network/p2p/handler.go +++ b/network/p2p/handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p2p @@ -20,6 +20,7 @@ var ( ErrNotValidator = errors.New("not a validator") _ Handler = (*NoOpHandler)(nil) + _ Handler = (*TestHandler)(nil) _ Handler = (*ValidatorHandler)(nil) ) @@ -30,7 +31,7 @@ type Handler interface { ctx context.Context, nodeID ids.NodeID, gossipBytes []byte, - ) error + ) // AppRequest is called when handling an AppRequest message. // Returns the bytes for the response corresponding to [requestBytes] AppRequest( @@ -50,11 +51,10 @@ type Handler interface { ) ([]byte, error) } +// NoOpHandler drops all messages type NoOpHandler struct{} -func (NoOpHandler) AppGossip(context.Context, ids.NodeID, []byte) error { - return nil -} +func (NoOpHandler) AppGossip(context.Context, ids.NodeID, []byte) {} func (NoOpHandler) AppRequest(context.Context, ids.NodeID, time.Time, []byte) ([]byte, error) { return nil, nil @@ -64,38 +64,61 @@ func (NoOpHandler) CrossChainAppRequest(context.Context, ids.ID, time.Time, []by return nil, nil } +func NewValidatorHandler( + handler Handler, + validatorSet ValidatorSet, + log logging.Logger, +) *ValidatorHandler { + return &ValidatorHandler{ + handler: handler, + validatorSet: validatorSet, + log: log, + } +} + // ValidatorHandler drops messages from non-validators type ValidatorHandler struct { - Handler - ValidatorSet ValidatorSet + handler Handler + validatorSet ValidatorSet + log logging.Logger } -func (v ValidatorHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) error { - if !v.ValidatorSet.Has(ctx, nodeID) { - return ErrNotValidator +func (v ValidatorHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { + if !v.validatorSet.Has(ctx, nodeID) { + v.log.Debug( + "dropping message", + zap.Stringer("nodeID", nodeID), + zap.String("reason", "not a validator"), + ) + return } - return v.Handler.AppGossip(ctx, nodeID, gossipBytes) + v.handler.AppGossip(ctx, nodeID, gossipBytes) } func (v ValidatorHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) { - if !v.ValidatorSet.Has(ctx, nodeID) { + if !v.validatorSet.Has(ctx, nodeID) { return nil, ErrNotValidator } - return v.Handler.AppRequest(ctx, nodeID, deadline, requestBytes) + return v.handler.AppRequest(ctx, nodeID, deadline, requestBytes) +} + +func (v ValidatorHandler) CrossChainAppRequest(ctx context.Context, chainID ids.ID, deadline time.Time, requestBytes []byte) ([]byte, error) { + return v.handler.CrossChainAppRequest(ctx, chainID, deadline, requestBytes) } // responder automatically sends the response for a given request type responder struct { + Handler handlerID uint64 - handler Handler log logging.Logger sender common.AppSender } +// AppRequest calls the underlying handler and sends back the response to nodeID func (r *responder) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { - appResponse, err := r.handler.AppRequest(ctx, nodeID, deadline, request) + appResponse, err := r.Handler.AppRequest(ctx, nodeID, deadline, request) if err != nil { r.log.Debug("failed to handle message", zap.Stringer("messageOp", message.AppRequestOp), @@ -111,19 +134,10 @@ func (r *responder) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID return r.sender.SendAppResponse(ctx, nodeID, requestID, appResponse) } -func (r *responder) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) { - if err := r.handler.AppGossip(ctx, nodeID, msg); err != nil { - r.log.Debug("failed to handle message", - zap.Stringer("messageOp", message.AppGossipOp), - zap.Stringer("nodeID", nodeID), - zap.Uint64("handlerID", r.handlerID), - zap.Binary("message", msg), - ) - } -} - +// CrossChainAppRequest calls the underlying handler and sends back the response +// to chainID func (r *responder) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, request []byte) error { - appResponse, err := r.handler.CrossChainAppRequest(ctx, chainID, deadline, request) + appResponse, err := r.Handler.CrossChainAppRequest(ctx, chainID, deadline, request) if err != nil { r.log.Debug("failed to handle message", zap.Stringer("messageOp", message.CrossChainAppRequestOp), @@ -138,3 +152,33 @@ func (r *responder) CrossChainAppRequest(ctx context.Context, chainID ids.ID, re return r.sender.SendCrossChainAppResponse(ctx, chainID, requestID, appResponse) } + +type TestHandler struct { + AppGossipF func(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) + AppRequestF func(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) + CrossChainAppRequestF func(ctx context.Context, chainID ids.ID, deadline time.Time, requestBytes []byte) ([]byte, error) +} + +func (t TestHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { + if t.AppGossipF == nil { + return + } + + t.AppGossipF(ctx, nodeID, gossipBytes) +} + +func (t TestHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) { + if t.AppRequestF == nil { + return nil, nil + } + + return t.AppRequestF(ctx, nodeID, deadline, requestBytes) +} + +func (t TestHandler) CrossChainAppRequest(ctx context.Context, chainID ids.ID, deadline time.Time, requestBytes []byte) ([]byte, error) { + if t.CrossChainAppRequestF == nil { + return nil, nil + } + + return t.CrossChainAppRequestF(ctx, chainID, deadline, requestBytes) +} diff --git a/network/p2p/handler_test.go b/network/p2p/handler_test.go index 539076cb7062..0633b70f00a8 100644 --- a/network/p2p/handler_test.go +++ b/network/p2p/handler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p2p @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" ) @@ -32,20 +33,20 @@ func TestValidatorHandlerAppGossip(t *testing.T) { name string validatorSet ValidatorSet nodeID ids.NodeID - expected error + expected bool }{ { name: "message dropped", validatorSet: testValidatorSet{}, nodeID: nodeID, - expected: ErrNotValidator, }, { name: "message handled", validatorSet: testValidatorSet{ validators: validatorSet, }, - nodeID: nodeID, + nodeID: nodeID, + expected: true, }, } @@ -53,13 +54,19 @@ func TestValidatorHandlerAppGossip(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - handler := ValidatorHandler{ - Handler: NoOpHandler{}, - ValidatorSet: tt.validatorSet, - } - - err := handler.AppGossip(context.Background(), tt.nodeID, []byte("foobar")) - require.ErrorIs(err, tt.expected) + called := false + handler := NewValidatorHandler( + &TestHandler{ + AppGossipF: func(context.Context, ids.NodeID, []byte) { + called = true + }, + }, + tt.validatorSet, + logging.NoLog{}, + ) + + handler.AppGossip(context.Background(), tt.nodeID, []byte("foobar")) + require.Equal(tt.expected, called) }) } } @@ -93,10 +100,11 @@ func TestValidatorHandlerAppRequest(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - handler := ValidatorHandler{ - Handler: NoOpHandler{}, - ValidatorSet: tt.validatorSet, - } + handler := NewValidatorHandler( + NoOpHandler{}, + tt.validatorSet, + logging.NoLog{}, + ) _, err := handler.AppRequest(context.Background(), tt.nodeID, time.Time{}, []byte("foobar")) require.ErrorIs(err, tt.expected) diff --git a/network/p2p/mocks/mock_handler.go b/network/p2p/mocks/mock_handler.go deleted file mode 100644 index b15c77b79896..000000000000 --- a/network/p2p/mocks/mock_handler.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/network/p2p (interfaces: Handler) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - time "time" - - ids "github.com/ava-labs/avalanchego/ids" - gomock "go.uber.org/mock/gomock" -) - -// MockHandler is a mock of Handler interface. -type MockHandler struct { - ctrl *gomock.Controller - recorder *MockHandlerMockRecorder -} - -// MockHandlerMockRecorder is the mock recorder for MockHandler. -type MockHandlerMockRecorder struct { - mock *MockHandler -} - -// NewMockHandler creates a new mock instance. -func NewMockHandler(ctrl *gomock.Controller) *MockHandler { - mock := &MockHandler{ctrl: ctrl} - mock.recorder = &MockHandlerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockHandler) EXPECT() *MockHandlerMockRecorder { - return m.recorder -} - -// AppGossip mocks base method. -func (m *MockHandler) AppGossip(arg0 context.Context, arg1 ids.NodeID, arg2 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppGossip", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// AppGossip indicates an expected call of AppGossip. -func (mr *MockHandlerMockRecorder) AppGossip(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppGossip", reflect.TypeOf((*MockHandler)(nil).AppGossip), arg0, arg1, arg2) -} - -// AppRequest mocks base method. -func (m *MockHandler) AppRequest(arg0 context.Context, arg1 ids.NodeID, arg2 time.Time, arg3 []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppRequest", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// AppRequest indicates an expected call of AppRequest. -func (mr *MockHandlerMockRecorder) AppRequest(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequest", reflect.TypeOf((*MockHandler)(nil).AppRequest), arg0, arg1, arg2, arg3) -} - -// CrossChainAppRequest mocks base method. -func (m *MockHandler) CrossChainAppRequest(arg0 context.Context, arg1 ids.ID, arg2 time.Time, arg3 []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CrossChainAppRequest", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CrossChainAppRequest indicates an expected call of CrossChainAppRequest. -func (mr *MockHandlerMockRecorder) CrossChainAppRequest(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequest", reflect.TypeOf((*MockHandler)(nil).CrossChainAppRequest), arg0, arg1, arg2, arg3) -} diff --git a/network/p2p/network.go b/network/p2p/network.go new file mode 100644 index 000000000000..a98579c44183 --- /dev/null +++ b/network/p2p/network.go @@ -0,0 +1,287 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "context" + "encoding/binary" + "strconv" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" +) + +var ( + _ validators.Connector = (*Network)(nil) + _ common.AppHandler = (*Network)(nil) + _ NodeSampler = (*peerSampler)(nil) + + handlerLabel = "handlerID" + labelNames = []string{handlerLabel} +) + +// ClientOption configures Client +type ClientOption interface { + apply(options *clientOptions) +} + +type clientOptionFunc func(options *clientOptions) + +func (o clientOptionFunc) apply(options *clientOptions) { + o(options) +} + +// WithValidatorSampling configures Client.AppRequestAny to sample validators +func WithValidatorSampling(validators *Validators) ClientOption { + return clientOptionFunc(func(options *clientOptions) { + options.nodeSampler = validators + }) +} + +// clientOptions holds client-configurable values +type clientOptions struct { + // nodeSampler is used to select nodes to route Client.AppRequestAny to + nodeSampler NodeSampler +} + +// NewNetwork returns an instance of Network +func NewNetwork( + log logging.Logger, + sender common.AppSender, + registerer prometheus.Registerer, + namespace string, +) (*Network, error) { + metrics := metrics{ + appRequestTime: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_request_time", + Help: "app request time (ns)", + }, labelNames), + appRequestCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_request_count", + Help: "app request count (n)", + }, labelNames), + appResponseTime: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_response_time", + Help: "app response time (ns)", + }, labelNames), + appResponseCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_response_count", + Help: "app response count (n)", + }, labelNames), + appRequestFailedTime: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_request_failed_time", + Help: "app request failed time (ns)", + }, labelNames), + appRequestFailedCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_request_failed_count", + Help: "app request failed count (ns)", + }, labelNames), + appGossipTime: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_gossip_time", + Help: "app gossip time (ns)", + }, labelNames), + appGossipCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_gossip_count", + Help: "app gossip count (n)", + }, labelNames), + crossChainAppRequestTime: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "cross_chain_app_request_time", + Help: "cross chain app request time (ns)", + }, labelNames), + crossChainAppRequestCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "cross_chain_app_request_count", + Help: "cross chain app request count (n)", + }, labelNames), + crossChainAppResponseTime: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "cross_chain_app_response_time", + Help: "cross chain app response time (ns)", + }, labelNames), + crossChainAppResponseCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "cross_chain_app_response_count", + Help: "cross chain app response count (n)", + }, labelNames), + crossChainAppRequestFailedTime: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "cross_chain_app_request_failed_time", + Help: "cross chain app request failed time (ns)", + }, labelNames), + crossChainAppRequestFailedCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "cross_chain_app_request_failed_count", + Help: "cross chain app request failed count (n)", + }, labelNames), + } + + err := utils.Err( + registerer.Register(metrics.appRequestTime), + registerer.Register(metrics.appRequestCount), + registerer.Register(metrics.appResponseTime), + registerer.Register(metrics.appResponseCount), + registerer.Register(metrics.appRequestFailedTime), + registerer.Register(metrics.appRequestFailedCount), + registerer.Register(metrics.appGossipTime), + registerer.Register(metrics.appGossipCount), + registerer.Register(metrics.crossChainAppRequestTime), + registerer.Register(metrics.crossChainAppRequestCount), + registerer.Register(metrics.crossChainAppResponseTime), + registerer.Register(metrics.crossChainAppResponseCount), + registerer.Register(metrics.crossChainAppRequestFailedTime), + registerer.Register(metrics.crossChainAppRequestFailedCount), + ) + if err != nil { + return nil, err + } + + return &Network{ + Peers: &Peers{}, + log: log, + sender: sender, + router: newRouter(log, sender, metrics), + }, nil +} + +// Network exposes networking state and supports building p2p application +// protocols +type Network struct { + Peers *Peers + + log logging.Logger + sender common.AppSender + + router *router +} + +func (n *Network) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { + return n.router.AppRequest(ctx, nodeID, requestID, deadline, request) +} + +func (n *Network) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { + return n.router.AppResponse(ctx, nodeID, requestID, response) +} + +func (n *Network) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *common.AppError) error { + return n.router.AppRequestFailed(ctx, nodeID, requestID, appErr) +} + +func (n *Network) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) error { + return n.router.AppGossip(ctx, nodeID, msg) +} + +func (n *Network) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, request []byte) error { + return n.router.CrossChainAppRequest(ctx, chainID, requestID, deadline, request) +} + +func (n *Network) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error { + return n.router.CrossChainAppResponse(ctx, chainID, requestID, response) +} + +func (n *Network) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32, appErr *common.AppError) error { + return n.router.CrossChainAppRequestFailed(ctx, chainID, requestID, appErr) +} + +func (n *Network) Connected(_ context.Context, nodeID ids.NodeID, _ *version.Application) error { + n.Peers.add(nodeID) + return nil +} + +func (n *Network) Disconnected(_ context.Context, nodeID ids.NodeID) error { + n.Peers.remove(nodeID) + return nil +} + +// NewClient returns a Client that can be used to send messages for the +// corresponding protocol. +func (n *Network) NewClient(handlerID uint64, options ...ClientOption) *Client { + client := &Client{ + handlerID: handlerID, + handlerIDStr: strconv.FormatUint(handlerID, 10), + handlerPrefix: ProtocolPrefix(handlerID), + sender: n.sender, + router: n.router, + options: &clientOptions{ + nodeSampler: &peerSampler{ + peers: n.Peers, + }, + }, + } + + for _, option := range options { + option.apply(client.options) + } + + return client +} + +// AddHandler reserves an identifier for an application protocol +func (n *Network) AddHandler(handlerID uint64, handler Handler) error { + return n.router.addHandler(handlerID, handler) +} + +// Peers contains metadata about the current set of connected peers +type Peers struct { + lock sync.RWMutex + set set.SampleableSet[ids.NodeID] +} + +func (p *Peers) add(nodeID ids.NodeID) { + p.lock.Lock() + defer p.lock.Unlock() + + p.set.Add(nodeID) +} + +func (p *Peers) remove(nodeID ids.NodeID) { + p.lock.Lock() + defer p.lock.Unlock() + + p.set.Remove(nodeID) +} + +func (p *Peers) has(nodeID ids.NodeID) bool { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.set.Contains(nodeID) +} + +// Sample returns a pseudo-random sample of up to limit Peers +func (p *Peers) Sample(limit int) []ids.NodeID { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.set.Sample(limit) +} + +type peerSampler struct { + peers *Peers +} + +func (p peerSampler) Sample(_ context.Context, limit int) []ids.NodeID { + return p.peers.Sample(limit) +} + +func ProtocolPrefix(handlerID uint64) []byte { + return binary.AppendUvarint(nil, handlerID) +} diff --git a/network/p2p/network_test.go b/network/p2p/network_test.go new file mode 100644 index 000000000000..40dc0ba54056 --- /dev/null +++ b/network/p2p/network_test.go @@ -0,0 +1,631 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "context" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" +) + +const ( + handlerID = 123 + handlerPrefix = byte(handlerID) +) + +var errFoo = &common.AppError{ + Code: 123, + Message: "foo", +} + +func TestMessageRouting(t *testing.T) { + require := require.New(t) + ctx := context.Background() + wantNodeID := ids.GenerateTestNodeID() + wantChainID := ids.GenerateTestID() + wantMsg := []byte("message") + + var appGossipCalled, appRequestCalled, crossChainAppRequestCalled bool + testHandler := &TestHandler{ + AppGossipF: func(_ context.Context, nodeID ids.NodeID, msg []byte) { + appGossipCalled = true + require.Equal(wantNodeID, nodeID) + require.Equal(wantMsg, msg) + }, + AppRequestF: func(_ context.Context, nodeID ids.NodeID, _ time.Time, msg []byte) ([]byte, error) { + appRequestCalled = true + require.Equal(wantNodeID, nodeID) + require.Equal(wantMsg, msg) + return nil, nil + }, + CrossChainAppRequestF: func(_ context.Context, chainID ids.ID, _ time.Time, msg []byte) ([]byte, error) { + crossChainAppRequestCalled = true + require.Equal(wantChainID, chainID) + require.Equal(wantMsg, msg) + return nil, nil + }, + } + + sender := &common.FakeSender{ + SentAppGossip: make(chan []byte, 1), + SentAppRequest: make(chan []byte, 1), + SentCrossChainAppRequest: make(chan []byte, 1), + } + + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + require.NoError(network.AddHandler(1, testHandler)) + client := network.NewClient(1) + + require.NoError(client.AppGossip(ctx, wantMsg)) + require.NoError(network.AppGossip(ctx, wantNodeID, <-sender.SentAppGossip)) + require.True(appGossipCalled) + + require.NoError(client.AppRequest(ctx, set.Of(ids.EmptyNodeID), wantMsg, func(context.Context, ids.NodeID, []byte, error) {})) + require.NoError(network.AppRequest(ctx, wantNodeID, 1, time.Time{}, <-sender.SentAppRequest)) + require.True(appRequestCalled) + + require.NoError(client.CrossChainAppRequest(ctx, ids.Empty, wantMsg, func(context.Context, ids.ID, []byte, error) {})) + require.NoError(network.CrossChainAppRequest(ctx, wantChainID, 1, time.Time{}, <-sender.SentCrossChainAppRequest)) + require.True(crossChainAppRequestCalled) +} + +// Tests that the Client prefixes messages with the handler prefix +func TestClientPrefixesMessages(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sender := common.FakeSender{ + SentAppRequest: make(chan []byte, 1), + SentAppGossip: make(chan []byte, 1), + SentAppGossipSpecific: make(chan []byte, 1), + SentCrossChainAppRequest: make(chan []byte, 1), + } + + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + require.NoError(network.Connected(ctx, ids.EmptyNodeID, nil)) + client := network.NewClient(handlerID) + + want := []byte("message") + + require.NoError(client.AppRequest( + ctx, + set.Of(ids.EmptyNodeID), + want, + func(context.Context, ids.NodeID, []byte, error) {}, + )) + gotAppRequest := <-sender.SentAppRequest + require.Equal(handlerPrefix, gotAppRequest[0]) + require.Equal(want, gotAppRequest[1:]) + + require.NoError(client.AppRequestAny( + ctx, + want, + func(context.Context, ids.NodeID, []byte, error) {}, + )) + gotAppRequest = <-sender.SentAppRequest + require.Equal(handlerPrefix, gotAppRequest[0]) + require.Equal(want, gotAppRequest[1:]) + + require.NoError(client.CrossChainAppRequest( + ctx, + ids.Empty, + want, + func(context.Context, ids.ID, []byte, error) {}, + )) + gotCrossChainAppRequest := <-sender.SentCrossChainAppRequest + require.Equal(handlerPrefix, gotCrossChainAppRequest[0]) + require.Equal(want, gotCrossChainAppRequest[1:]) + + require.NoError(client.AppGossip(ctx, want)) + gotAppGossip := <-sender.SentAppGossip + require.Equal(handlerPrefix, gotAppGossip[0]) + require.Equal(want, gotAppGossip[1:]) + + require.NoError(client.AppGossipSpecific(ctx, set.Of(ids.EmptyNodeID), want)) + gotAppGossip = <-sender.SentAppGossipSpecific + require.Equal(handlerPrefix, gotAppGossip[0]) + require.Equal(want, gotAppGossip[1:]) +} + +// Tests that the Client callback is called on a successful response +func TestAppRequestResponse(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sender := common.FakeSender{ + SentAppRequest: make(chan []byte, 1), + } + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + client := network.NewClient(handlerID) + + wantResponse := []byte("response") + wantNodeID := ids.GenerateTestNodeID() + done := make(chan struct{}) + + callback := func(_ context.Context, gotNodeID ids.NodeID, gotResponse []byte, err error) { + require.Equal(wantNodeID, gotNodeID) + require.NoError(err) + require.Equal(wantResponse, gotResponse) + + close(done) + } + + want := []byte("request") + require.NoError(client.AppRequest(ctx, set.Of(wantNodeID), want, callback)) + got := <-sender.SentAppRequest + require.Equal(handlerPrefix, got[0]) + require.Equal(want, got[1:]) + + require.NoError(network.AppResponse(ctx, wantNodeID, 1, wantResponse)) + <-done +} + +// Tests that the Client callback is given an error if the request fails +func TestAppRequestFailed(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sender := common.FakeSender{ + SentAppRequest: make(chan []byte, 1), + } + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + client := network.NewClient(handlerID) + + wantNodeID := ids.GenerateTestNodeID() + done := make(chan struct{}) + + callback := func(_ context.Context, gotNodeID ids.NodeID, gotResponse []byte, err error) { + require.Equal(wantNodeID, gotNodeID) + require.ErrorIs(err, errFoo) + require.Nil(gotResponse) + + close(done) + } + + require.NoError(client.AppRequest(ctx, set.Of(wantNodeID), []byte("request"), callback)) + <-sender.SentAppRequest + + require.NoError(network.AppRequestFailed(ctx, wantNodeID, 1, errFoo)) + <-done +} + +// Tests that the Client callback is called on a successful response +func TestCrossChainAppRequestResponse(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sender := common.FakeSender{ + SentCrossChainAppRequest: make(chan []byte, 1), + } + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + client := network.NewClient(handlerID) + + wantChainID := ids.GenerateTestID() + wantResponse := []byte("response") + done := make(chan struct{}) + + callback := func(_ context.Context, gotChainID ids.ID, gotResponse []byte, err error) { + require.Equal(wantChainID, gotChainID) + require.NoError(err) + require.Equal(wantResponse, gotResponse) + + close(done) + } + + require.NoError(client.CrossChainAppRequest(ctx, wantChainID, []byte("request"), callback)) + <-sender.SentCrossChainAppRequest + + require.NoError(network.CrossChainAppResponse(ctx, wantChainID, 1, wantResponse)) + <-done +} + +// Tests that the Client callback is given an error if the request fails +func TestCrossChainAppRequestFailed(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sender := common.FakeSender{ + SentCrossChainAppRequest: make(chan []byte, 1), + } + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + client := network.NewClient(handlerID) + + wantChainID := ids.GenerateTestID() + done := make(chan struct{}) + + callback := func(_ context.Context, gotChainID ids.ID, gotResponse []byte, err error) { + require.Equal(wantChainID, gotChainID) + require.ErrorIs(err, errFoo) + require.Nil(gotResponse) + + close(done) + } + + require.NoError(client.CrossChainAppRequest(ctx, wantChainID, []byte("request"), callback)) + <-sender.SentCrossChainAppRequest + + require.NoError(network.CrossChainAppRequestFailed(ctx, wantChainID, 1, errFoo)) + <-done +} + +// Messages for unregistered handlers should be dropped gracefully +func TestMessageForUnregisteredHandler(t *testing.T) { + tests := []struct { + name string + msg []byte + }{ + { + name: "nil", + msg: nil, + }, + { + name: "empty", + msg: []byte{}, + }, + { + name: "non-empty", + msg: []byte("foobar"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctx := context.Background() + handler := &TestHandler{ + AppGossipF: func(context.Context, ids.NodeID, []byte) { + require.Fail("should not be called") + }, + AppRequestF: func(context.Context, ids.NodeID, time.Time, []byte) ([]byte, error) { + require.Fail("should not be called") + return nil, nil + }, + CrossChainAppRequestF: func(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { + require.Fail("should not be called") + return nil, nil + }, + } + network, err := NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(err) + require.NoError(network.AddHandler(handlerID, handler)) + + require.Nil(network.AppRequest(ctx, ids.EmptyNodeID, 0, time.Time{}, tt.msg)) + require.Nil(network.AppGossip(ctx, ids.EmptyNodeID, tt.msg)) + require.Nil(network.CrossChainAppRequest(ctx, ids.Empty, 0, time.Time{}, tt.msg)) + }) + } +} + +// A response or timeout for a request we never made should return an error +func TestResponseForUnrequestedRequest(t *testing.T) { + tests := []struct { + name string + msg []byte + }{ + { + name: "nil", + msg: nil, + }, + { + name: "empty", + msg: []byte{}, + }, + { + name: "non-empty", + msg: []byte("foobar"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctx := context.Background() + handler := &TestHandler{ + AppGossipF: func(context.Context, ids.NodeID, []byte) { + require.Fail("should not be called") + }, + AppRequestF: func(context.Context, ids.NodeID, time.Time, []byte) ([]byte, error) { + require.Fail("should not be called") + return nil, nil + }, + CrossChainAppRequestF: func(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { + require.Fail("should not be called") + return nil, nil + }, + } + network, err := NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(err) + require.NoError(network.AddHandler(handlerID, handler)) + + err = network.AppResponse(ctx, ids.EmptyNodeID, 0, []byte("foobar")) + require.ErrorIs(err, ErrUnrequestedResponse) + err = network.AppRequestFailed(ctx, ids.EmptyNodeID, 0, common.ErrTimeout) + require.ErrorIs(err, ErrUnrequestedResponse) + err = network.CrossChainAppResponse(ctx, ids.Empty, 0, []byte("foobar")) + require.ErrorIs(err, ErrUnrequestedResponse) + err = network.CrossChainAppRequestFailed(ctx, ids.Empty, 0, common.ErrTimeout) + + require.ErrorIs(err, ErrUnrequestedResponse) + }) + } +} + +// It's possible for the request id to overflow and wrap around. +// If there are still pending requests with the same request id, we should +// not attempt to issue another request until the previous one has cleared. +func TestAppRequestDuplicateRequestIDs(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sender := &common.FakeSender{ + SentAppRequest: make(chan []byte, 1), + } + + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + client := network.NewClient(0x1) + + noOpCallback := func(context.Context, ids.NodeID, []byte, error) {} + // create a request that never gets a response + network.router.requestID = 1 + require.NoError(client.AppRequest(ctx, set.Of(ids.EmptyNodeID), []byte{}, noOpCallback)) + <-sender.SentAppRequest + + // force the network to use the same requestID + network.router.requestID = 1 + err = client.AppRequest(context.Background(), set.Of(ids.EmptyNodeID), []byte{}, noOpCallback) + require.ErrorIs(err, ErrRequestPending) +} + +// Sample should always return up to [limit] peers, and less if fewer than +// [limit] peers are available. +func TestPeersSample(t *testing.T) { + nodeID1 := ids.GenerateTestNodeID() + nodeID2 := ids.GenerateTestNodeID() + nodeID3 := ids.GenerateTestNodeID() + + tests := []struct { + name string + connected set.Set[ids.NodeID] + disconnected set.Set[ids.NodeID] + limit int + }{ + { + name: "no peers", + limit: 1, + }, + { + name: "one peer connected", + connected: set.Of(nodeID1), + limit: 1, + }, + { + name: "multiple peers connected", + connected: set.Of(nodeID1, nodeID2, nodeID3), + limit: 1, + }, + { + name: "peer connects and disconnects - 1", + connected: set.Of(nodeID1), + disconnected: set.Of(nodeID1), + limit: 1, + }, + { + name: "peer connects and disconnects - 2", + connected: set.Of(nodeID1, nodeID2), + disconnected: set.Of(nodeID2), + limit: 1, + }, + { + name: "peer connects and disconnects - 2", + connected: set.Of(nodeID1, nodeID2, nodeID3), + disconnected: set.Of(nodeID1, nodeID2), + limit: 1, + }, + { + name: "less than limit peers", + connected: set.Of(nodeID1, nodeID2, nodeID3), + limit: 4, + }, + { + name: "limit peers", + connected: set.Of(nodeID1, nodeID2, nodeID3), + limit: 3, + }, + { + name: "more than limit peers", + connected: set.Of(nodeID1, nodeID2, nodeID3), + limit: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + network, err := NewNetwork(logging.NoLog{}, &common.FakeSender{}, prometheus.NewRegistry(), "") + require.NoError(err) + + for connected := range tt.connected { + require.NoError(network.Connected(context.Background(), connected, nil)) + } + + for disconnected := range tt.disconnected { + require.NoError(network.Disconnected(context.Background(), disconnected)) + } + + sampleable := set.Set[ids.NodeID]{} + sampleable.Union(tt.connected) + sampleable.Difference(tt.disconnected) + + sampled := network.Peers.Sample(tt.limit) + require.Len(sampled, math.Min(tt.limit, len(sampleable))) + require.Subset(sampleable, sampled) + }) + } +} + +func TestAppRequestAnyNodeSelection(t *testing.T) { + tests := []struct { + name string + peers []ids.NodeID + expected error + }{ + { + name: "no peers", + expected: ErrNoPeers, + }, + { + name: "has peers", + peers: []ids.NodeID{ids.GenerateTestNodeID()}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + sent := set.Set[ids.NodeID]{} + sender := &common.SenderTest{ + SendAppRequestF: func(_ context.Context, nodeIDs set.Set[ids.NodeID], _ uint32, _ []byte) error { + sent = nodeIDs + return nil + }, + } + + n, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + for _, peer := range tt.peers { + require.NoError(n.Connected(context.Background(), peer, &version.Application{})) + } + + client := n.NewClient(1) + + err = client.AppRequestAny(context.Background(), []byte("foobar"), nil) + require.ErrorIs(err, tt.expected) + require.Subset(tt.peers, sent.List()) + }) + } +} + +func TestNodeSamplerClientOption(t *testing.T) { + nodeID0 := ids.GenerateTestNodeID() + nodeID1 := ids.GenerateTestNodeID() + nodeID2 := ids.GenerateTestNodeID() + + tests := []struct { + name string + peers []ids.NodeID + option func(t *testing.T, n *Network) ClientOption + expected []ids.NodeID + expectedErr error + }{ + { + name: "default", + peers: []ids.NodeID{nodeID0, nodeID1, nodeID2}, + option: func(_ *testing.T, n *Network) ClientOption { + return clientOptionFunc(func(*clientOptions) {}) + }, + expected: []ids.NodeID{nodeID0, nodeID1, nodeID2}, + }, + { + name: "validator connected", + peers: []ids.NodeID{nodeID0, nodeID1}, + option: func(t *testing.T, n *Network) ClientOption { + state := &validators.TestState{ + GetCurrentHeightF: func(context.Context) (uint64, error) { + return 0, nil + }, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + nodeID1: nil, + }, nil + }, + } + + validators := NewValidators(n.Peers, n.log, ids.Empty, state, 0) + return WithValidatorSampling(validators) + }, + expected: []ids.NodeID{nodeID1}, + }, + { + name: "validator disconnected", + peers: []ids.NodeID{nodeID0}, + option: func(t *testing.T, n *Network) ClientOption { + state := &validators.TestState{ + GetCurrentHeightF: func(context.Context) (uint64, error) { + return 0, nil + }, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + nodeID1: nil, + }, nil + }, + } + + validators := NewValidators(n.Peers, n.log, ids.Empty, state, 0) + return WithValidatorSampling(validators) + }, + expectedErr: ErrNoPeers, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + done := make(chan struct{}) + sender := &common.SenderTest{ + SendAppRequestF: func(_ context.Context, nodeIDs set.Set[ids.NodeID], _ uint32, _ []byte) error { + require.Subset(tt.expected, nodeIDs.List()) + close(done) + return nil + }, + } + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + ctx := context.Background() + for _, peer := range tt.peers { + require.NoError(network.Connected(ctx, peer, nil)) + } + + client := network.NewClient(0, tt.option(t, network)) + + if err = client.AppRequestAny(ctx, []byte("request"), nil); err != nil { + close(done) + } + + require.ErrorIs(err, tt.expectedErr) + <-done + }) + } +} + +// Tests that a given protocol can have more than one client +func TestMultipleClients(t *testing.T) { + require := require.New(t) + + n, err := NewNetwork(logging.NoLog{}, &common.SenderTest{}, prometheus.NewRegistry(), "") + require.NoError(err) + _ = n.NewClient(0) + _ = n.NewClient(0) +} diff --git a/network/p2p/node_sampler.go b/network/p2p/node_sampler.go index 057a175027a4..5bb3815e3b90 100644 --- a/network/p2p/node_sampler.go +++ b/network/p2p/node_sampler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p2p diff --git a/x/sync/peer_tracker.go b/network/p2p/peer_tracker.go similarity index 93% rename from x/sync/peer_tracker.go rename to network/p2p/peer_tracker.go index a1f8a66ae711..c0eda693859b 100644 --- a/x/sync/peer_tracker.go +++ b/network/p2p/peer_tracker.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package sync +package p2p import ( "math" @@ -45,7 +45,7 @@ type peerInfo struct { // Tracks the bandwidth of responses coming from peers, // preferring to contact peers with known good bandwidth, connecting // to new peers with an exponentially decaying probability. -type peerTracker struct { +type PeerTracker struct { // Lock to protect concurrent access to the peer tracker lock sync.Mutex // All peers we are connected to @@ -64,12 +64,12 @@ type peerTracker struct { averageBandwidthMetric prometheus.Gauge } -func newPeerTracker( +func NewPeerTracker( log logging.Logger, metricsNamespace string, registerer prometheus.Registerer, -) (*peerTracker, error) { - t := &peerTracker{ +) (*PeerTracker, error) { + t := &PeerTracker{ peers: make(map[ids.NodeID]*peerInfo), trackedPeers: make(set.Set[ids.NodeID]), responsivePeers: make(set.Set[ids.NodeID]), @@ -112,7 +112,7 @@ func newPeerTracker( // Returns true if we're not connected to enough peers. // Otherwise returns true probabilistically based on the number of tracked peers. // Assumes p.lock is held. -func (p *peerTracker) shouldTrackNewPeer() bool { +func (p *PeerTracker) shouldTrackNewPeer() bool { numResponsivePeers := p.responsivePeers.Len() if numResponsivePeers < desiredMinResponsivePeers { return true @@ -137,11 +137,12 @@ func (p *peerTracker) shouldTrackNewPeer() bool { return rand.Float64() < newPeerProbability // #nosec G404 } +// TODO get rid of minVersion // Returns a peer that we're connected to. // If we should track more peers, returns a random peer with version >= [minVersion], if any exist. // Otherwise, with probability [randomPeerProbability] returns a random peer from [p.responsivePeers]. // With probability [1-randomPeerProbability] returns the peer in [p.bandwidthHeap] with the highest bandwidth. -func (p *peerTracker) GetAnyPeer(minVersion *version.Application) (ids.NodeID, bool) { +func (p *PeerTracker) GetAnyPeer(minVersion *version.Application) (ids.NodeID, bool) { p.lock.Lock() defer p.lock.Unlock() @@ -187,7 +188,7 @@ func (p *peerTracker) GetAnyPeer(minVersion *version.Application) (ids.NodeID, b } // Record that we sent a request to [nodeID]. -func (p *peerTracker) TrackPeer(nodeID ids.NodeID) { +func (p *PeerTracker) TrackPeer(nodeID ids.NodeID) { p.lock.Lock() defer p.lock.Unlock() @@ -197,7 +198,7 @@ func (p *peerTracker) TrackPeer(nodeID ids.NodeID) { // Record that we observed that [nodeID]'s bandwidth is [bandwidth]. // Adds the peer's bandwidth averager to the bandwidth heap. -func (p *peerTracker) TrackBandwidth(nodeID ids.NodeID, bandwidth float64) { +func (p *PeerTracker) TrackBandwidth(nodeID ids.NodeID, bandwidth float64) { p.lock.Lock() defer p.lock.Unlock() @@ -229,7 +230,7 @@ func (p *peerTracker) TrackBandwidth(nodeID ids.NodeID, bandwidth float64) { } // Connected should be called when [nodeID] connects to this node -func (p *peerTracker) Connected(nodeID ids.NodeID, nodeVersion *version.Application) { +func (p *PeerTracker) Connected(nodeID ids.NodeID, nodeVersion *version.Application) { p.lock.Lock() defer p.lock.Unlock() @@ -264,7 +265,7 @@ func (p *peerTracker) Connected(nodeID ids.NodeID, nodeVersion *version.Applicat } // Disconnected should be called when [nodeID] disconnects from this node -func (p *peerTracker) Disconnected(nodeID ids.NodeID) { +func (p *PeerTracker) Disconnected(nodeID ids.NodeID) { p.lock.Lock() defer p.lock.Unlock() @@ -277,7 +278,7 @@ func (p *peerTracker) Disconnected(nodeID ids.NodeID) { } // Returns the number of peers the node is connected to. -func (p *peerTracker) Size() int { +func (p *PeerTracker) Size() int { p.lock.Lock() defer p.lock.Unlock() diff --git a/network/p2p/peer_tracker_test.go b/network/p2p/peer_tracker_test.go new file mode 100644 index 000000000000..bf771d177841 --- /dev/null +++ b/network/p2p/peer_tracker_test.go @@ -0,0 +1,99 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/version" +) + +func TestPeerTracker(t *testing.T) { + require := require.New(t) + p, err := NewPeerTracker(logging.NoLog{}, "", prometheus.NewRegistry()) + require.NoError(err) + + // Connect some peers + numExtraPeers := 10 + numPeers := desiredMinResponsivePeers + numExtraPeers + peerIDs := make([]ids.NodeID, numPeers) + peerVersion := &version.Application{ + Major: 1, + Minor: 2, + Patch: 3, + } + + for i := range peerIDs { + peerIDs[i] = ids.GenerateTestNodeID() + p.Connected(peerIDs[i], peerVersion) + } + + responsivePeers := make(map[ids.NodeID]bool) + + // Expect requests to go to new peers until we have desiredMinResponsivePeers responsive peers. + for i := 0; i < desiredMinResponsivePeers+numExtraPeers/2; i++ { + peer, ok := p.GetAnyPeer(nil) + require.True(ok) + require.NotNil(peer) + + _, exists := responsivePeers[peer] + require.Falsef(exists, "expected connecting to a new peer, but got the same peer twice: peer %s iteration %d", peer, i) + responsivePeers[peer] = true + + p.TrackPeer(peer) // mark the peer as having a message sent to it + } + + // Mark some peers as responsive and others as not responsive + i := 0 + for peer := range responsivePeers { + if i < desiredMinResponsivePeers { + p.TrackBandwidth(peer, 10) + } else { + responsivePeers[peer] = false // remember which peers were not responsive + p.TrackBandwidth(peer, 0) + } + i++ + } + + // Expect requests to go to responsive or new peers, so long as they are available + numRequests := 50 + for i := 0; i < numRequests; i++ { + peer, ok := p.GetAnyPeer(nil) + require.True(ok) + require.NotNil(peer) + + responsive, ok := responsivePeers[peer] + if ok { + require.Truef(responsive, "expected connecting to a responsive peer, but got a peer that was not responsive: peer %s iteration %d", peer, i) + p.TrackBandwidth(peer, 10) + } else { + responsivePeers[peer] = false // remember that we connected to this peer + p.TrackPeer(peer) // mark the peer as having a message sent to it + p.TrackBandwidth(peer, 0) // mark the peer as non-responsive + } + } + + // Disconnect from peers that were previously responsive and ones we didn't connect to yet. + for _, peer := range peerIDs { + responsive, ok := responsivePeers[peer] + if ok && responsive || !ok { + p.Disconnected(peer) + } + } + + // Requests should fall back on non-responsive peers when no other choice is left + peer, ok := p.GetAnyPeer(nil) + require.True(ok) + require.NotNil(peer) + + responsive, ok := responsivePeers[peer] + require.True(ok) + require.Falsef(responsive, "expected connecting to a non-responsive peer, but got a peer that was responsive: peer %s", peer) +} diff --git a/network/p2p/peers.go b/network/p2p/peers.go deleted file mode 100644 index 47982aeb2dc4..000000000000 --- a/network/p2p/peers.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package p2p - -import ( - "context" - "sync" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/version" -) - -var ( - _ validators.Connector = (*Peers)(nil) - _ NodeSampler = (*Peers)(nil) -) - -// Peers contains a set of nodes that we are connected to. -type Peers struct { - lock sync.RWMutex - peers set.SampleableSet[ids.NodeID] -} - -func (p *Peers) Connected(_ context.Context, nodeID ids.NodeID, _ *version.Application) error { - p.lock.Lock() - defer p.lock.Unlock() - - p.peers.Add(nodeID) - - return nil -} - -func (p *Peers) Disconnected(_ context.Context, nodeID ids.NodeID) error { - p.lock.Lock() - defer p.lock.Unlock() - - p.peers.Remove(nodeID) - - return nil -} - -func (p *Peers) Sample(_ context.Context, limit int) []ids.NodeID { - p.lock.RLock() - defer p.lock.RUnlock() - - return p.peers.Sample(limit) -} diff --git a/network/p2p/peers_test.go b/network/p2p/peers_test.go deleted file mode 100644 index 9835cf065b0b..000000000000 --- a/network/p2p/peers_test.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package p2p - -import ( - "context" - "testing" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "go.uber.org/mock/gomock" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/avalanchego/utils/set" -) - -// Sample should always return up to [limit] peers, and less if fewer than -// [limit] peers are available. -func TestPeersSample(t *testing.T) { - nodeID1 := ids.GenerateTestNodeID() - nodeID2 := ids.GenerateTestNodeID() - nodeID3 := ids.GenerateTestNodeID() - - tests := []struct { - name string - connected set.Set[ids.NodeID] - disconnected set.Set[ids.NodeID] - limit int - }{ - { - name: "no peers", - limit: 1, - }, - { - name: "one peer connected", - connected: set.Of(nodeID1), - limit: 1, - }, - { - name: "multiple peers connected", - connected: set.Of(nodeID1, nodeID2, nodeID3), - limit: 1, - }, - { - name: "peer connects and disconnects - 1", - connected: set.Of(nodeID1), - disconnected: set.Of(nodeID1), - limit: 1, - }, - { - name: "peer connects and disconnects - 2", - connected: set.Of(nodeID1, nodeID2), - disconnected: set.Of(nodeID2), - limit: 1, - }, - { - name: "peer connects and disconnects - 2", - connected: set.Of(nodeID1, nodeID2, nodeID3), - disconnected: set.Of(nodeID1, nodeID2), - limit: 1, - }, - { - name: "less than limit peers", - connected: set.Of(nodeID1, nodeID2, nodeID3), - limit: 4, - }, - { - name: "limit peers", - connected: set.Of(nodeID1, nodeID2, nodeID3), - limit: 3, - }, - { - name: "more than limit peers", - connected: set.Of(nodeID1, nodeID2, nodeID3), - limit: 2, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - peers := &Peers{} - - for connected := range tt.connected { - require.NoError(peers.Connected(context.Background(), connected, nil)) - } - - for disconnected := range tt.disconnected { - require.NoError(peers.Disconnected(context.Background(), disconnected)) - } - - sampleable := set.Set[ids.NodeID]{} - sampleable.Union(tt.connected) - sampleable.Difference(tt.disconnected) - - sampled := peers.Sample(context.Background(), tt.limit) - require.Len(sampled, math.Min(tt.limit, len(sampleable))) - require.Subset(sampleable, sampled) - }) - } -} - -func TestAppRequestAnyNodeSelection(t *testing.T) { - tests := []struct { - name string - peers []ids.NodeID - expected error - }{ - { - name: "no peers", - expected: ErrNoPeers, - }, - { - name: "has peers", - peers: []ids.NodeID{ids.GenerateTestNodeID()}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - mockAppSender := common.NewMockSender(ctrl) - - expectedCalls := 0 - if tt.expected == nil { - expectedCalls = 1 - } - mockAppSender.EXPECT().SendAppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(expectedCalls) - - r := NewRouter(logging.NoLog{}, mockAppSender, prometheus.NewRegistry(), "") - peers := &Peers{} - for _, peer := range tt.peers { - require.NoError(peers.Connected(context.Background(), peer, nil)) - } - - client, err := r.RegisterAppProtocol(1, nil, peers) - require.NoError(err) - - err = client.AppRequestAny(context.Background(), []byte("foobar"), nil) - require.ErrorIs(err, tt.expected) - }) - } -} diff --git a/network/p2p/router.go b/network/p2p/router.go index 1da66a7d2d4e..13a38abc56c5 100644 --- a/network/p2p/router.go +++ b/network/p2p/router.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p2p @@ -8,6 +8,7 @@ import ( "encoding/binary" "errors" "fmt" + "strconv" "sync" "time" @@ -19,49 +20,55 @@ import ( "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/metric" ) var ( ErrExistingAppProtocol = errors.New("existing app protocol") ErrUnrequestedResponse = errors.New("unrequested response") - _ common.AppHandler = (*Router)(nil) + _ common.AppHandler = (*router)(nil) ) -type metrics struct { - appRequestTime metric.Averager - appRequestFailedTime metric.Averager - appResponseTime metric.Averager - appGossipTime metric.Averager - crossChainAppRequestTime metric.Averager - crossChainAppRequestFailedTime metric.Averager - crossChainAppResponseTime metric.Averager -} - type pendingAppRequest struct { - *metrics - AppResponseCallback + handlerID string + callback AppResponseCallback } type pendingCrossChainAppRequest struct { - *metrics - CrossChainAppResponseCallback + handlerID string + callback CrossChainAppResponseCallback } +// meteredHandler emits metrics for a Handler type meteredHandler struct { *responder - *metrics + metrics } -// Router routes incoming application messages to the corresponding registered +type metrics struct { + appRequestTime *prometheus.CounterVec + appRequestCount *prometheus.CounterVec + appResponseTime *prometheus.CounterVec + appResponseCount *prometheus.CounterVec + appRequestFailedTime *prometheus.CounterVec + appRequestFailedCount *prometheus.CounterVec + appGossipTime *prometheus.CounterVec + appGossipCount *prometheus.CounterVec + crossChainAppRequestTime *prometheus.CounterVec + crossChainAppRequestCount *prometheus.CounterVec + crossChainAppResponseTime *prometheus.CounterVec + crossChainAppResponseCount *prometheus.CounterVec + crossChainAppRequestFailedTime *prometheus.CounterVec + crossChainAppRequestFailedCount *prometheus.CounterVec +} + +// router routes incoming application messages to the corresponding registered // app handler. App messages must be made using the registered handler's // corresponding Client. -type Router struct { - log logging.Logger - sender common.AppSender - metrics prometheus.Registerer - namespace string +type router struct { + log logging.Logger + sender common.AppSender + metrics metrics lock sync.RWMutex handlers map[uint64]*meteredHandler @@ -70,18 +77,16 @@ type Router struct { requestID uint32 } -// NewRouter returns a new instance of Router -func NewRouter( +// newRouter returns a new instance of Router +func newRouter( log logging.Logger, sender common.AppSender, - metrics prometheus.Registerer, - namespace string, -) *Router { - return &Router{ + metrics metrics, +) *router { + return &router{ log: log, sender: sender, metrics: metrics, - namespace: namespace, handlers: make(map[uint64]*meteredHandler), pendingAppRequests: make(map[uint32]pendingAppRequest), pendingCrossChainAppRequests: make(map[uint32]pendingCrossChainAppRequest), @@ -90,117 +95,35 @@ func NewRouter( } } -// RegisterAppProtocol reserves an identifier for an application protocol and -// returns a Client that can be used to send messages for the corresponding -// protocol. -func (r *Router) RegisterAppProtocol(handlerID uint64, handler Handler, nodeSampler NodeSampler) (*Client, error) { +func (r *router) addHandler(handlerID uint64, handler Handler) error { r.lock.Lock() defer r.lock.Unlock() if _, ok := r.handlers[handlerID]; ok { - return nil, fmt.Errorf("failed to register handler id %d: %w", handlerID, ErrExistingAppProtocol) - } - - appRequestTime, err := metric.NewAverager( - r.namespace, - fmt.Sprintf("handler_%d_app_request", handlerID), - "app request time (ns)", - r.metrics, - ) - if err != nil { - return nil, fmt.Errorf("failed to register app request metric for handler_%d: %w", handlerID, err) - } - - appRequestFailedTime, err := metric.NewAverager( - r.namespace, - fmt.Sprintf("handler_%d_app_request_failed", handlerID), - "app request failed time (ns)", - r.metrics, - ) - if err != nil { - return nil, fmt.Errorf("failed to register app request failed metric for handler_%d: %w", handlerID, err) - } - - appResponseTime, err := metric.NewAverager( - r.namespace, - fmt.Sprintf("handler_%d_app_response", handlerID), - "app response time (ns)", - r.metrics, - ) - if err != nil { - return nil, fmt.Errorf("failed to register app response metric for handler_%d: %w", handlerID, err) - } - - appGossipTime, err := metric.NewAverager( - r.namespace, - fmt.Sprintf("handler_%d_app_gossip", handlerID), - "app gossip time (ns)", - r.metrics, - ) - if err != nil { - return nil, fmt.Errorf("failed to register app gossip metric for handler_%d: %w", handlerID, err) - } - - crossChainAppRequestTime, err := metric.NewAverager( - r.namespace, - fmt.Sprintf("handler_%d_cross_chain_app_request", handlerID), - "cross chain app request time (ns)", - r.metrics, - ) - if err != nil { - return nil, fmt.Errorf("failed to register cross-chain app request metric for handler_%d: %w", handlerID, err) - } - - crossChainAppRequestFailedTime, err := metric.NewAverager( - r.namespace, - fmt.Sprintf("handler_%d_cross_chain_app_request_failed", handlerID), - "app request failed time (ns)", - r.metrics, - ) - if err != nil { - return nil, fmt.Errorf("failed to register cross-chain app request failed metric for handler_%d: %w", handlerID, err) - } - - crossChainAppResponseTime, err := metric.NewAverager( - r.namespace, - fmt.Sprintf("handler_%d_cross_chain_app_response", handlerID), - "cross chain app response time (ns)", - r.metrics, - ) - if err != nil { - return nil, fmt.Errorf("failed to register cross-chain app response metric for handler_%d: %w", handlerID, err) + return fmt.Errorf("failed to register handler id %d: %w", handlerID, ErrExistingAppProtocol) } r.handlers[handlerID] = &meteredHandler{ responder: &responder{ + Handler: handler, handlerID: handlerID, - handler: handler, log: r.log, sender: r.sender, }, - metrics: &metrics{ - appRequestTime: appRequestTime, - appRequestFailedTime: appRequestFailedTime, - appResponseTime: appResponseTime, - appGossipTime: appGossipTime, - crossChainAppRequestTime: crossChainAppRequestTime, - crossChainAppRequestFailedTime: crossChainAppRequestFailedTime, - crossChainAppResponseTime: crossChainAppResponseTime, - }, + metrics: r.metrics, } - return &Client{ - handlerID: handlerID, - handlerPrefix: binary.AppendUvarint(nil, handlerID), - sender: r.sender, - router: r, - nodeSampler: nodeSampler, - }, nil + return nil } -func (r *Router) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { +// AppRequest routes an AppRequest to a Handler based on the handler prefix. The +// message is dropped if no matching handler can be found. +// +// Any error condition propagated outside Handler application logic is +// considered fatal +func (r *router) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { start := time.Now() - parsedMsg, handler, ok := r.parse(request) + parsedMsg, handler, handlerID, ok := r.parse(request) if !ok { r.log.Debug("failed to process message", zap.Stringer("messageOp", message.AppRequestOp), @@ -212,41 +135,109 @@ func (r *Router) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID ui return nil } + // call the corresponding handler and send back a response to nodeID if err := handler.AppRequest(ctx, nodeID, requestID, deadline, parsedMsg); err != nil { return err } - handler.metrics.appRequestTime.Observe(float64(time.Since(start))) + labels := prometheus.Labels{ + handlerLabel: handlerID, + } + + metricCount, err := r.metrics.appRequestCount.GetMetricWith(labels) + if err != nil { + return err + } + + metricTime, err := r.metrics.appRequestTime.GetMetricWith(labels) + if err != nil { + return err + } + + metricCount.Inc() + metricTime.Add(float64(time.Since(start))) + return nil } -func (r *Router) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { +// AppRequestFailed routes an AppRequestFailed message to the callback +// corresponding to requestID. +// +// Any error condition propagated outside Handler application logic is +// considered fatal +func (r *router) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *common.AppError) error { start := time.Now() pending, ok := r.clearAppRequest(requestID) if !ok { + // we should never receive a timeout without a corresponding requestID return ErrUnrequestedResponse } - pending.AppResponseCallback(ctx, nodeID, nil, ErrAppRequestFailed) - pending.appRequestFailedTime.Observe(float64(time.Since(start))) + pending.callback(ctx, nodeID, nil, appErr) + + labels := prometheus.Labels{ + handlerLabel: pending.handlerID, + } + + metricCount, err := r.metrics.appRequestFailedCount.GetMetricWith(labels) + if err != nil { + return err + } + + metricTime, err := r.metrics.appRequestFailedTime.GetMetricWith(labels) + if err != nil { + return err + } + + metricCount.Inc() + metricTime.Add(float64(time.Since(start))) + return nil } -func (r *Router) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { +// AppResponse routes an AppResponse message to the callback corresponding to +// requestID. +// +// Any error condition propagated outside Handler application logic is +// considered fatal +func (r *router) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { start := time.Now() pending, ok := r.clearAppRequest(requestID) if !ok { + // we should never receive a timeout without a corresponding requestID return ErrUnrequestedResponse } - pending.AppResponseCallback(ctx, nodeID, response, nil) - pending.appResponseTime.Observe(float64(time.Since(start))) + pending.callback(ctx, nodeID, response, nil) + + labels := prometheus.Labels{ + handlerLabel: pending.handlerID, + } + + metricCount, err := r.metrics.appResponseCount.GetMetricWith(labels) + if err != nil { + return err + } + + metricTime, err := r.metrics.appResponseTime.GetMetricWith(labels) + if err != nil { + return err + } + + metricCount.Inc() + metricTime.Add(float64(time.Since(start))) + return nil } -func (r *Router) AppGossip(ctx context.Context, nodeID ids.NodeID, gossip []byte) error { +// AppGossip routes an AppGossip message to a Handler based on the handler +// prefix. The message is dropped if no matching handler can be found. +// +// Any error condition propagated outside Handler application logic is +// considered fatal +func (r *router) AppGossip(ctx context.Context, nodeID ids.NodeID, gossip []byte) error { start := time.Now() - parsedMsg, handler, ok := r.parse(gossip) + parsedMsg, handler, handlerID, ok := r.parse(gossip) if !ok { r.log.Debug("failed to process message", zap.Stringer("messageOp", message.AppGossipOp), @@ -258,11 +249,33 @@ func (r *Router) AppGossip(ctx context.Context, nodeID ids.NodeID, gossip []byte handler.AppGossip(ctx, nodeID, parsedMsg) - handler.metrics.appGossipTime.Observe(float64(time.Since(start))) + labels := prometheus.Labels{ + handlerLabel: handlerID, + } + + metricCount, err := r.metrics.appGossipCount.GetMetricWith(labels) + if err != nil { + return err + } + + metricTime, err := r.metrics.appGossipTime.GetMetricWith(labels) + if err != nil { + return err + } + + metricCount.Inc() + metricTime.Add(float64(time.Since(start))) + return nil } -func (r *Router) CrossChainAppRequest( +// CrossChainAppRequest routes a CrossChainAppRequest message to a Handler +// based on the handler prefix. The message is dropped if no matching handler +// can be found. +// +// Any error condition propagated outside Handler application logic is +// considered fatal +func (r *router) CrossChainAppRequest( ctx context.Context, chainID ids.ID, requestID uint32, @@ -270,7 +283,7 @@ func (r *Router) CrossChainAppRequest( msg []byte, ) error { start := time.Now() - parsedMsg, handler, ok := r.parse(msg) + parsedMsg, handler, handlerID, ok := r.parse(msg) if !ok { r.log.Debug("failed to process message", zap.Stringer("messageOp", message.CrossChainAppRequestOp), @@ -286,31 +299,93 @@ func (r *Router) CrossChainAppRequest( return err } - handler.metrics.crossChainAppRequestTime.Observe(float64(time.Since(start))) + labels := prometheus.Labels{ + handlerLabel: handlerID, + } + + metricCount, err := r.metrics.crossChainAppRequestCount.GetMetricWith(labels) + if err != nil { + return err + } + + metricTime, err := r.metrics.crossChainAppRequestTime.GetMetricWith(labels) + if err != nil { + return err + } + + metricCount.Inc() + metricTime.Add(float64(time.Since(start))) + return nil } -func (r *Router) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { +// CrossChainAppRequestFailed routes a CrossChainAppRequestFailed message to +// the callback corresponding to requestID. +// +// Any error condition propagated outside Handler application logic is +// considered fatal +func (r *router) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32, appErr *common.AppError) error { start := time.Now() pending, ok := r.clearCrossChainAppRequest(requestID) if !ok { + // we should never receive a timeout without a corresponding requestID return ErrUnrequestedResponse } - pending.CrossChainAppResponseCallback(ctx, chainID, nil, ErrAppRequestFailed) - pending.crossChainAppRequestFailedTime.Observe(float64(time.Since(start))) + pending.callback(ctx, chainID, nil, appErr) + + labels := prometheus.Labels{ + handlerLabel: pending.handlerID, + } + + metricCount, err := r.metrics.crossChainAppRequestFailedCount.GetMetricWith(labels) + if err != nil { + return err + } + + metricTime, err := r.metrics.crossChainAppRequestFailedTime.GetMetricWith(labels) + if err != nil { + return err + } + + metricCount.Inc() + metricTime.Add(float64(time.Since(start))) + return nil } -func (r *Router) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error { +// CrossChainAppResponse routes a CrossChainAppResponse message to the callback +// corresponding to requestID. +// +// Any error condition propagated outside Handler application logic is +// considered fatal +func (r *router) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error { start := time.Now() pending, ok := r.clearCrossChainAppRequest(requestID) if !ok { + // we should never receive a timeout without a corresponding requestID return ErrUnrequestedResponse } - pending.CrossChainAppResponseCallback(ctx, chainID, response, nil) - pending.crossChainAppResponseTime.Observe(float64(time.Since(start))) + pending.callback(ctx, chainID, response, nil) + + labels := prometheus.Labels{ + handlerLabel: pending.handlerID, + } + + metricCount, err := r.metrics.crossChainAppResponseCount.GetMetricWith(labels) + if err != nil { + return err + } + + metricTime, err := r.metrics.crossChainAppResponseTime.GetMetricWith(labels) + if err != nil { + return err + } + + metricCount.Inc() + metricTime.Add(float64(time.Since(start))) + return nil } @@ -320,24 +395,27 @@ func (r *Router) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requ // Returns: // - The unprefixed protocol message. // - The protocol responder. +// - The protocol metric name. // - A boolean indicating that parsing succeeded. // // Invariant: Assumes [r.lock] isn't held. -func (r *Router) parse(msg []byte) ([]byte, *meteredHandler, bool) { - handlerID, bytesRead := binary.Uvarint(msg) - if bytesRead <= 0 { - return nil, nil, false +func (r *router) parse(prefixedMsg []byte) ([]byte, *meteredHandler, string, bool) { + handlerID, msg, ok := ParseMessage(prefixedMsg) + if !ok { + return nil, nil, "", false } + handlerStr := strconv.FormatUint(handlerID, 10) + r.lock.RLock() defer r.lock.RUnlock() handler, ok := r.handlers[handlerID] - return msg[bytesRead:], handler, ok + return msg, handler, handlerStr, ok } // Invariant: Assumes [r.lock] isn't held. -func (r *Router) clearAppRequest(requestID uint32) (pendingAppRequest, bool) { +func (r *router) clearAppRequest(requestID uint32) (pendingAppRequest, bool) { r.lock.Lock() defer r.lock.Unlock() @@ -347,7 +425,7 @@ func (r *Router) clearAppRequest(requestID uint32) (pendingAppRequest, bool) { } // Invariant: Assumes [r.lock] isn't held. -func (r *Router) clearCrossChainAppRequest(requestID uint32) (pendingCrossChainAppRequest, bool) { +func (r *router) clearCrossChainAppRequest(requestID uint32) (pendingCrossChainAppRequest, bool) { r.lock.Lock() defer r.lock.Unlock() @@ -355,3 +433,17 @@ func (r *Router) clearCrossChainAppRequest(requestID uint32) (pendingCrossChainA delete(r.pendingCrossChainAppRequests, requestID) return callback, ok } + +// Parse a gossip or request message. +// +// Returns: +// - The protocol ID. +// - The unprefixed protocol message. +// - A boolean indicating that parsing succeeded. +func ParseMessage(msg []byte) (uint64, []byte, bool) { + handlerID, bytesRead := binary.Uvarint(msg) + if bytesRead <= 0 { + return 0, nil, false + } + return handlerID, msg[bytesRead:], true +} diff --git a/network/p2p/router_test.go b/network/p2p/router_test.go deleted file mode 100644 index 924a72b0b70a..000000000000 --- a/network/p2p/router_test.go +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package p2p - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "go.uber.org/mock/gomock" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/network/p2p/mocks" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/set" -) - -func TestAppRequestResponse(t *testing.T) { - handlerID := uint64(0x0) - request := []byte("request") - response := []byte("response") - nodeID := ids.GenerateTestNodeID() - chainID := ids.GenerateTestID() - - ctxKey := new(string) - ctxVal := new(string) - *ctxKey = "foo" - *ctxVal = "bar" - - tests := []struct { - name string - requestFunc func(t *testing.T, router *Router, client *Client, sender *common.MockSender, handler *mocks.MockHandler, wg *sync.WaitGroup) - }{ - { - name: "app request", - requestFunc: func(t *testing.T, router *Router, client *Client, sender *common.MockSender, handler *mocks.MockHandler, wg *sync.WaitGroup) { - sender.EXPECT().SendAppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, request []byte) { - for range nodeIDs { - go func() { - require.NoError(t, router.AppRequest(ctx, nodeID, requestID, time.Time{}, request)) - }() - } - }).AnyTimes() - sender.EXPECT().SendAppResponse(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, _ ids.NodeID, requestID uint32, response []byte) { - go func() { - ctx = context.WithValue(ctx, ctxKey, ctxVal) - require.NoError(t, router.AppResponse(ctx, nodeID, requestID, response)) - }() - }).AnyTimes() - handler.EXPECT(). - AppRequest(context.Background(), nodeID, gomock.Any(), request). - DoAndReturn(func(context.Context, ids.NodeID, time.Time, []byte) ([]byte, error) { - return response, nil - }) - - callback := func(ctx context.Context, actualNodeID ids.NodeID, actualResponse []byte, err error) { - defer wg.Done() - - require.NoError(t, err) - require.Equal(t, ctxVal, ctx.Value(ctxKey)) - require.Equal(t, nodeID, actualNodeID) - require.Equal(t, response, actualResponse) - } - - require.NoError(t, client.AppRequestAny(context.Background(), request, callback)) - }, - }, - { - name: "app request failed", - requestFunc: func(t *testing.T, router *Router, client *Client, sender *common.MockSender, handler *mocks.MockHandler, wg *sync.WaitGroup) { - sender.EXPECT().SendAppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, request []byte) { - for range nodeIDs { - go func() { - require.NoError(t, router.AppRequestFailed(ctx, nodeID, requestID)) - }() - } - }) - - callback := func(_ context.Context, actualNodeID ids.NodeID, actualResponse []byte, err error) { - defer wg.Done() - - require.ErrorIs(t, err, ErrAppRequestFailed) - require.Equal(t, nodeID, actualNodeID) - require.Nil(t, actualResponse) - } - - require.NoError(t, client.AppRequest(context.Background(), set.Of(nodeID), request, callback)) - }, - }, - { - name: "cross-chain app request", - requestFunc: func(t *testing.T, router *Router, client *Client, sender *common.MockSender, handler *mocks.MockHandler, wg *sync.WaitGroup) { - chainID := ids.GenerateTestID() - sender.EXPECT().SendCrossChainAppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, chainID ids.ID, requestID uint32, request []byte) { - go func() { - require.NoError(t, router.CrossChainAppRequest(ctx, chainID, requestID, time.Time{}, request)) - }() - }).AnyTimes() - sender.EXPECT().SendCrossChainAppResponse(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) { - go func() { - ctx = context.WithValue(ctx, ctxKey, ctxVal) - require.NoError(t, router.CrossChainAppResponse(ctx, chainID, requestID, response)) - }() - }).AnyTimes() - handler.EXPECT(). - CrossChainAppRequest(context.Background(), chainID, gomock.Any(), request). - DoAndReturn(func(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { - return response, nil - }) - - callback := func(ctx context.Context, actualChainID ids.ID, actualResponse []byte, err error) { - defer wg.Done() - require.NoError(t, err) - require.Equal(t, ctxVal, ctx.Value(ctxKey)) - require.Equal(t, chainID, actualChainID) - require.Equal(t, response, actualResponse) - } - - require.NoError(t, client.CrossChainAppRequest(context.Background(), chainID, request, callback)) - }, - }, - { - name: "cross-chain app request failed", - requestFunc: func(t *testing.T, router *Router, client *Client, sender *common.MockSender, handler *mocks.MockHandler, wg *sync.WaitGroup) { - sender.EXPECT().SendCrossChainAppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, chainID ids.ID, requestID uint32, request []byte) { - go func() { - require.NoError(t, router.CrossChainAppRequestFailed(ctx, chainID, requestID)) - }() - }) - - callback := func(_ context.Context, actualChainID ids.ID, actualResponse []byte, err error) { - defer wg.Done() - - require.ErrorIs(t, err, ErrAppRequestFailed) - require.Equal(t, chainID, actualChainID) - require.Nil(t, actualResponse) - } - - require.NoError(t, client.CrossChainAppRequest(context.Background(), chainID, request, callback)) - }, - }, - { - name: "app gossip", - requestFunc: func(t *testing.T, router *Router, client *Client, sender *common.MockSender, handler *mocks.MockHandler, wg *sync.WaitGroup) { - sender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, gossip []byte) { - go func() { - require.NoError(t, router.AppGossip(ctx, nodeID, gossip)) - }() - }).AnyTimes() - handler.EXPECT(). - AppGossip(context.Background(), nodeID, request). - DoAndReturn(func(context.Context, ids.NodeID, []byte) error { - defer wg.Done() - return nil - }) - - require.NoError(t, client.AppGossip(context.Background(), request)) - }, - }, - { - name: "app gossip specific", - requestFunc: func(t *testing.T, router *Router, client *Client, sender *common.MockSender, handler *mocks.MockHandler, wg *sync.WaitGroup) { - sender.EXPECT().SendAppGossipSpecific(gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, nodeIDs set.Set[ids.NodeID], gossip []byte) { - for n := range nodeIDs { - nodeID := n - go func() { - require.NoError(t, router.AppGossip(ctx, nodeID, gossip)) - }() - } - }).AnyTimes() - handler.EXPECT(). - AppGossip(context.Background(), nodeID, request). - DoAndReturn(func(context.Context, ids.NodeID, []byte) error { - defer wg.Done() - return nil - }) - - require.NoError(t, client.AppGossipSpecific(context.Background(), set.Of(nodeID), request)) - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - - sender := common.NewMockSender(ctrl) - handler := mocks.NewMockHandler(ctrl) - router := NewRouter(logging.NoLog{}, sender, prometheus.NewRegistry(), "") - peers := &Peers{} - require.NoError(peers.Connected(context.Background(), nodeID, nil)) - client, err := router.RegisterAppProtocol(handlerID, handler, peers) - require.NoError(err) - - wg := &sync.WaitGroup{} - wg.Add(1) - tt.requestFunc(t, router, client, sender, handler, wg) - wg.Wait() - }) - } -} - -func TestRouterDropMessage(t *testing.T) { - unregistered := byte(0x0) - - tests := []struct { - name string - requestFunc func(router *Router) error - err error - }{ - { - name: "drop unregistered app request message", - requestFunc: func(router *Router) error { - return router.AppRequest(context.Background(), ids.GenerateTestNodeID(), 0, time.Time{}, []byte{unregistered}) - }, - err: nil, - }, - { - name: "drop empty app request message", - requestFunc: func(router *Router) error { - return router.AppRequest(context.Background(), ids.GenerateTestNodeID(), 0, time.Time{}, []byte{}) - }, - err: nil, - }, - { - name: "drop unregistered cross-chain app request message", - requestFunc: func(router *Router) error { - return router.CrossChainAppRequest(context.Background(), ids.GenerateTestID(), 0, time.Time{}, []byte{unregistered}) - }, - err: nil, - }, - { - name: "drop empty cross-chain app request message", - requestFunc: func(router *Router) error { - return router.CrossChainAppRequest(context.Background(), ids.GenerateTestID(), 0, time.Time{}, []byte{}) - }, - err: nil, - }, - { - name: "drop unregistered gossip message", - requestFunc: func(router *Router) error { - return router.AppGossip(context.Background(), ids.GenerateTestNodeID(), []byte{unregistered}) - }, - err: nil, - }, - { - name: "drop empty gossip message", - requestFunc: func(router *Router) error { - return router.AppGossip(context.Background(), ids.GenerateTestNodeID(), []byte{}) - }, - err: nil, - }, - { - name: "drop unrequested app request failed", - requestFunc: func(router *Router) error { - return router.AppRequestFailed(context.Background(), ids.GenerateTestNodeID(), 0) - }, - err: ErrUnrequestedResponse, - }, - { - name: "drop unrequested app response", - requestFunc: func(router *Router) error { - return router.AppResponse(context.Background(), ids.GenerateTestNodeID(), 0, nil) - }, - err: ErrUnrequestedResponse, - }, - { - name: "drop unrequested cross-chain request failed", - requestFunc: func(router *Router) error { - return router.CrossChainAppRequestFailed(context.Background(), ids.GenerateTestID(), 0) - }, - err: ErrUnrequestedResponse, - }, - { - name: "drop unrequested cross-chain response", - requestFunc: func(router *Router) error { - return router.CrossChainAppResponse(context.Background(), ids.GenerateTestID(), 0, nil) - }, - err: ErrUnrequestedResponse, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - - router := NewRouter(logging.NoLog{}, nil, prometheus.NewRegistry(), "") - - err := tt.requestFunc(router) - require.ErrorIs(err, tt.err) - }) - } -} - -// It's possible for the request id to overflow and wrap around. -// If there are still pending requests with the same request id, we should -// not attempt to issue another request until the previous one has cleared. -func TestAppRequestDuplicateRequestIDs(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - - handler := mocks.NewMockHandler(ctrl) - sender := common.NewMockSender(ctrl) - router := NewRouter(logging.NoLog{}, sender, prometheus.NewRegistry(), "") - nodeID := ids.GenerateTestNodeID() - - requestSent := &sync.WaitGroup{} - sender.EXPECT().SendAppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, request []byte) { - for range nodeIDs { - requestSent.Add(1) - go func() { - require.NoError(router.AppRequest(ctx, nodeID, requestID, time.Time{}, request)) - requestSent.Done() - }() - } - }).AnyTimes() - - timeout := &sync.WaitGroup{} - response := []byte("response") - handler.EXPECT().AppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(ctx context.Context, nodeID ids.NodeID, deadline time.Time, request []byte) ([]byte, error) { - timeout.Wait() - return response, nil - }).AnyTimes() - sender.EXPECT().SendAppResponse(gomock.Any(), gomock.Any(), gomock.Any(), response) - - peers := &Peers{} - require.NoError(peers.Connected(context.Background(), nodeID, nil)) - client, err := router.RegisterAppProtocol(0x1, handler, peers) - require.NoError(err) - - require.NoError(client.AppRequest(context.Background(), set.Of(nodeID), []byte{}, nil)) - requestSent.Wait() - - // force the router to use the same requestID - router.requestID = 1 - timeout.Add(1) - err = client.AppRequest(context.Background(), set.Of(nodeID), []byte{}, nil) - requestSent.Wait() - require.ErrorIs(err, ErrRequestPending) - - timeout.Done() -} diff --git a/network/p2p/throttler.go b/network/p2p/throttler.go index de173a655266..c8f34a7ee90f 100644 --- a/network/p2p/throttler.go +++ b/network/p2p/throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p2p diff --git a/network/p2p/throttler_handler.go b/network/p2p/throttler_handler.go index e7b4d8f26082..8fa3df93faee 100644 --- a/network/p2p/throttler_handler.go +++ b/network/p2p/throttler_handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p2p @@ -9,7 +9,10 @@ import ( "fmt" "time" + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" ) var ( @@ -17,23 +20,41 @@ var ( _ Handler = (*ThrottlerHandler)(nil) ) +func NewThrottlerHandler(handler Handler, throttler Throttler, log logging.Logger) *ThrottlerHandler { + return &ThrottlerHandler{ + handler: handler, + throttler: throttler, + log: log, + } +} + type ThrottlerHandler struct { - Handler - Throttler Throttler + handler Handler + throttler Throttler + log logging.Logger } -func (t ThrottlerHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) error { - if !t.Throttler.Handle(nodeID) { - return fmt.Errorf("dropping message from %s: %w", nodeID, ErrThrottled) +func (t ThrottlerHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { + if !t.throttler.Handle(nodeID) { + t.log.Debug( + "dropping message", + zap.Stringer("nodeID", nodeID), + zap.String("reason", "throttled"), + ) + return } - return t.Handler.AppGossip(ctx, nodeID, gossipBytes) + t.handler.AppGossip(ctx, nodeID, gossipBytes) } func (t ThrottlerHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) { - if !t.Throttler.Handle(nodeID) { + if !t.throttler.Handle(nodeID) { return nil, fmt.Errorf("dropping message from %s: %w", nodeID, ErrThrottled) } - return t.Handler.AppRequest(ctx, nodeID, deadline, requestBytes) + return t.handler.AppRequest(ctx, nodeID, deadline, requestBytes) +} + +func (t ThrottlerHandler) CrossChainAppRequest(ctx context.Context, chainID ids.ID, deadline time.Time, requestBytes []byte) ([]byte, error) { + return t.handler.CrossChainAppRequest(ctx, chainID, deadline, requestBytes) } diff --git a/network/p2p/throttler_handler_test.go b/network/p2p/throttler_handler_test.go index af9c3fda7194..1f5a07069d8e 100644 --- a/network/p2p/throttler_handler_test.go +++ b/network/p2p/throttler_handler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p2p @@ -11,34 +11,44 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" ) +var _ Handler = (*TestHandler)(nil) + func TestThrottlerHandlerAppGossip(t *testing.T) { tests := []struct { - name string - Throttler Throttler - expectedErr error + name string + Throttler Throttler + expected bool }{ { - name: "throttled", + name: "not throttled", Throttler: NewSlidingWindowThrottler(time.Second, 1), + expected: true, }, { - name: "throttler errors", - Throttler: NewSlidingWindowThrottler(time.Second, 0), - expectedErr: ErrThrottled, + name: "throttled", + Throttler: NewSlidingWindowThrottler(time.Second, 0), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - handler := ThrottlerHandler{ - Handler: NoOpHandler{}, - Throttler: tt.Throttler, - } - err := handler.AppGossip(context.Background(), ids.GenerateTestNodeID(), []byte("foobar")) - require.ErrorIs(err, tt.expectedErr) + called := false + handler := NewThrottlerHandler( + TestHandler{ + AppGossipF: func(context.Context, ids.NodeID, []byte) { + called = true + }, + }, + tt.Throttler, + logging.NoLog{}, + ) + + handler.AppGossip(context.Background(), ids.GenerateTestNodeID(), []byte("foobar")) + require.Equal(tt.expected, called) }) } } @@ -50,11 +60,11 @@ func TestThrottlerHandlerAppRequest(t *testing.T) { expectedErr error }{ { - name: "throttled", + name: "not throttled", Throttler: NewSlidingWindowThrottler(time.Second, 1), }, { - name: "throttler errors", + name: "throttled", Throttler: NewSlidingWindowThrottler(time.Second, 0), expectedErr: ErrThrottled, }, @@ -63,10 +73,11 @@ func TestThrottlerHandlerAppRequest(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - handler := ThrottlerHandler{ - Handler: NoOpHandler{}, - Throttler: tt.Throttler, - } + handler := NewThrottlerHandler( + NoOpHandler{}, + tt.Throttler, + logging.NoLog{}, + ) _, err := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, []byte("foobar")) require.ErrorIs(err, tt.expectedErr) }) diff --git a/network/p2p/throttler_test.go b/network/p2p/throttler_test.go index c7b0153e671d..3c3c56360dc1 100644 --- a/network/p2p/throttler_test.go +++ b/network/p2p/throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p2p diff --git a/network/p2p/validators.go b/network/p2p/validators.go index edad9b890430..3ece6559af42 100644 --- a/network/p2p/validators.go +++ b/network/p2p/validators.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p2p @@ -22,11 +22,18 @@ var ( ) type ValidatorSet interface { - Has(ctx context.Context, nodeID ids.NodeID) bool + Has(ctx context.Context, nodeID ids.NodeID) bool // TODO return error } -func NewValidators(log logging.Logger, subnetID ids.ID, validators validators.State, maxValidatorSetStaleness time.Duration) *Validators { +func NewValidators( + peers *Peers, + log logging.Logger, + subnetID ids.ID, + validators validators.State, + maxValidatorSetStaleness time.Duration, +) *Validators { return &Validators{ + peers: peers, log: log, subnetID: subnetID, validators: validators, @@ -36,6 +43,7 @@ func NewValidators(log logging.Logger, subnetID ids.ID, validators validators.St // Validators contains a set of nodes that are staking. type Validators struct { + peers *Peers log logging.Logger subnetID ids.ID validators validators.State @@ -71,20 +79,35 @@ func (v *Validators) refresh(ctx context.Context) { v.lastUpdated = time.Now() } +// Sample returns a random sample of connected validators func (v *Validators) Sample(ctx context.Context, limit int) []ids.NodeID { v.lock.Lock() defer v.lock.Unlock() v.refresh(ctx) - return v.validatorIDs.Sample(limit) + // TODO: Account for peer connectivity during the sampling of validators + // rather than filtering sampled validators. + validatorIDs := v.validatorIDs.Sample(limit) + sampled := validatorIDs[:0] + + for _, validatorID := range validatorIDs { + if !v.peers.has(validatorID) { + continue + } + + sampled = append(sampled, validatorID) + } + + return sampled } +// Has returns if nodeID is a connected validator func (v *Validators) Has(ctx context.Context, nodeID ids.NodeID) bool { v.lock.Lock() defer v.lock.Unlock() v.refresh(ctx) - return v.validatorIDs.Contains(nodeID) + return v.peers.has(nodeID) && v.validatorIDs.Contains(nodeID) } diff --git a/network/p2p/validators_test.go b/network/p2p/validators_test.go index 5db06f7a2efa..4671a20fdcae 100644 --- a/network/p2p/validators_test.go +++ b/network/p2p/validators_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p2p @@ -9,11 +9,14 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -151,12 +154,11 @@ func TestValidatorsSample(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - subnetID := ids.GenerateTestID() + ctrl := gomock.NewController(t) mockValidators := validators.NewMockState(ctrl) - calls := make([]*gomock.Call, 0) + calls := make([]any, 0) for _, call := range tt.calls { calls = append(calls, mockValidators.EXPECT(). GetCurrentHeight(gomock.Any()).Return(call.height, call.getCurrentHeightErr)) @@ -177,10 +179,17 @@ func TestValidatorsSample(t *testing.T) { } gomock.InOrder(calls...) - v := NewValidators(logging.NoLog{}, subnetID, mockValidators, tt.maxStaleness) + network, err := NewNetwork(logging.NoLog{}, &common.FakeSender{}, prometheus.NewRegistry(), "") + require.NoError(err) + + ctx := context.Background() + require.NoError(network.Connected(ctx, nodeID1, nil)) + require.NoError(network.Connected(ctx, nodeID2, nil)) + + v := NewValidators(network.Peers, network.log, subnetID, mockValidators, tt.maxStaleness) for _, call := range tt.calls { v.lastUpdated = call.time - sampled := v.Sample(context.Background(), call.limit) + sampled := v.Sample(ctx, call.limit) require.LessOrEqual(len(sampled), call.limit) require.Subset(call.expected, sampled) } diff --git a/network/peer/config.go b/network/peer/config.go index b4fd03db2166..0a01cf87fb92 100644 --- a/network/peer/config.go +++ b/network/peer/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -40,6 +40,9 @@ type Config struct { PongTimeout time.Duration MaxClockDifference time.Duration + SupportedACPs []uint32 + ObjectedACPs []uint32 + // Unix time of the last message sent and received respectively // Must only be accessed atomically LastSent, LastReceived int64 @@ -50,6 +53,6 @@ type Config struct { // Calculates uptime of peers UptimeCalculator uptime.Calculator - // Signs my IP so I can send my signed IP address in the Version message + // Signs my IP so I can send my signed IP address in the Handshake message IPSigner *IPSigner } diff --git a/network/peer/example_test.go b/network/peer/example_test.go index 75eaecee53d5..d6c8ba20c913 100644 --- a/network/peer/example_test.go +++ b/network/peer/example_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer diff --git a/network/peer/gossip_tracker.go b/network/peer/gossip_tracker.go deleted file mode 100644 index 5676b0734fc8..000000000000 --- a/network/peer/gossip_tracker.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package peer - -import ( - "fmt" - "sync" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" -) - -// GossipTracker tracks the validators that we're currently aware of, as well as -// the validators we've told each peers about. This data is stored in a bitset -// to optimize space, where only N (num validators) bits will be used per peer. -// -// This is done by recording some state information of both what validators this -// node is aware of, and what validators we've told each peer about. -// As an example, say we track three peers and three validators (MSB first): -// -// trackedPeers: { -// p1: [1, 1, 1] // we have already told [p1] about all validators -// p2: [0, 1, 1] // [p2] doesn't know about [v3] -// p3: [0, 0, 1] // [p3] knows only about [v3] -// } -// -// GetUnknown computes the validators we haven't sent to a given peer. Ex: -// -// GetUnknown(p1) - [0, 0, 0] -// GetUnknown(p2) - [1, 0, 0] -// GetUnknown(p3) - [1, 1, 0] -// -// Using the gossipTracker, we can quickly compute the validators each peer -// doesn't know about using GetUnknown so that in subsequent PeerList gossip -// messages we only send information that this peer (most likely) doesn't -// already know about. The only case where we'll send a redundant set of -// bytes is if another remote peer gossips to the same peer we're trying to -// gossip to first. -type GossipTracker interface { - // Tracked returns if a peer is being tracked - // Returns: - // bool: False if [peerID] is not tracked. True otherwise. - Tracked(peerID ids.NodeID) bool - - // StartTrackingPeer starts tracking a peer - // Returns: - // bool: False if [peerID] was already tracked. True otherwise. - StartTrackingPeer(peerID ids.NodeID) bool - // StopTrackingPeer stops tracking a given peer - // Returns: - // bool: False if [peerID] was not tracked. True otherwise. - StopTrackingPeer(peerID ids.NodeID) bool - - // AddValidator adds a validator that can be gossiped about - // bool: False if a validator with the same node ID or txID as [validator] - // is present. True otherwise. - AddValidator(validator ValidatorID) bool - // GetNodeID maps a txID into a nodeIDs - // nodeID: The nodeID that was registered by [txID] - // bool: False if [validator] was not present. True otherwise. - GetNodeID(txID ids.ID) (ids.NodeID, bool) - // RemoveValidator removes a validator that can be gossiped about - // bool: False if [validator] was already not present. True otherwise. - RemoveValidator(validatorID ids.NodeID) bool - // ResetValidator resets known gossip status of [validatorID] to unknown - // for all peers - // bool: False if [validator] was not present. True otherwise. - ResetValidator(validatorID ids.NodeID) bool - - // AddKnown adds [knownTxIDs] to the txIDs known by [peerID] and filters - // [txIDs] for non-validators. - // Returns: - // txIDs: The txIDs in [txIDs] that are currently validators. - // bool: False if [peerID] is not tracked. True otherwise. - AddKnown( - peerID ids.NodeID, - knownTxIDs []ids.ID, - txIDs []ids.ID, - ) ([]ids.ID, bool) - // GetUnknown gets the peers that we haven't sent to this peer - // Returns: - // []ValidatorID: a slice of ValidatorIDs that [peerID] doesn't know about. - // bool: False if [peerID] is not tracked. True otherwise. - GetUnknown(peerID ids.NodeID) ([]ValidatorID, bool) -} - -type gossipTracker struct { - lock sync.RWMutex - // a mapping of txIDs => the validator added to the validiator set by that - // tx. - txIDsToNodeIDs map[ids.ID]ids.NodeID - // a mapping of validators => the index they occupy in the bitsets - nodeIDsToIndices map[ids.NodeID]int - // each validator in the index it occupies in the bitset - validatorIDs []ValidatorID - // a mapping of each peer => the validators they know about - trackedPeers map[ids.NodeID]set.Bits - - metrics gossipTrackerMetrics -} - -// NewGossipTracker returns an instance of gossipTracker -func NewGossipTracker( - registerer prometheus.Registerer, - namespace string, -) (GossipTracker, error) { - m, err := newGossipTrackerMetrics(registerer, fmt.Sprintf("%s_gossip_tracker", namespace)) - if err != nil { - return nil, err - } - - return &gossipTracker{ - txIDsToNodeIDs: make(map[ids.ID]ids.NodeID), - nodeIDsToIndices: make(map[ids.NodeID]int), - trackedPeers: make(map[ids.NodeID]set.Bits), - metrics: m, - }, nil -} - -func (g *gossipTracker) Tracked(peerID ids.NodeID) bool { - g.lock.RLock() - defer g.lock.RUnlock() - - _, ok := g.trackedPeers[peerID] - return ok -} - -func (g *gossipTracker) StartTrackingPeer(peerID ids.NodeID) bool { - g.lock.Lock() - defer g.lock.Unlock() - - // don't track the peer if it's already being tracked - if _, ok := g.trackedPeers[peerID]; ok { - return false - } - - // start tracking the peer. Initialize their bitset to zero since we - // haven't sent them anything yet. - g.trackedPeers[peerID] = set.NewBits() - - // emit metrics - g.metrics.trackedPeersSize.Set(float64(len(g.trackedPeers))) - - return true -} - -func (g *gossipTracker) StopTrackingPeer(peerID ids.NodeID) bool { - g.lock.Lock() - defer g.lock.Unlock() - - // only stop tracking peers that are actually being tracked - if _, ok := g.trackedPeers[peerID]; !ok { - return false - } - - // stop tracking the peer by removing them - delete(g.trackedPeers, peerID) - g.metrics.trackedPeersSize.Set(float64(len(g.trackedPeers))) - - return true -} - -func (g *gossipTracker) AddValidator(validator ValidatorID) bool { - g.lock.Lock() - defer g.lock.Unlock() - - // only add validators that are not already present - if _, ok := g.txIDsToNodeIDs[validator.TxID]; ok { - return false - } - if _, ok := g.nodeIDsToIndices[validator.NodeID]; ok { - return false - } - - // add the validator to the MSB of the bitset. - msb := len(g.validatorIDs) - g.txIDsToNodeIDs[validator.TxID] = validator.NodeID - g.nodeIDsToIndices[validator.NodeID] = msb - g.validatorIDs = append(g.validatorIDs, validator) - - // emit metrics - g.metrics.validatorsSize.Set(float64(len(g.validatorIDs))) - - return true -} - -func (g *gossipTracker) GetNodeID(txID ids.ID) (ids.NodeID, bool) { - g.lock.RLock() - defer g.lock.RUnlock() - - nodeID, ok := g.txIDsToNodeIDs[txID] - return nodeID, ok -} - -func (g *gossipTracker) RemoveValidator(validatorID ids.NodeID) bool { - g.lock.Lock() - defer g.lock.Unlock() - - // only remove validators that are already present - indexToRemove, ok := g.nodeIDsToIndices[validatorID] - if !ok { - return false - } - validatorToRemove := g.validatorIDs[indexToRemove] - - // swap the validator-to-be-removed with the validator in the last index - // if the element we're swapping with is ourselves, we can skip this swap - // since we only need to delete instead - lastIndex := len(g.validatorIDs) - 1 - if indexToRemove != lastIndex { - lastValidator := g.validatorIDs[lastIndex] - - g.nodeIDsToIndices[lastValidator.NodeID] = indexToRemove - g.validatorIDs[indexToRemove] = lastValidator - } - - delete(g.txIDsToNodeIDs, validatorToRemove.TxID) - delete(g.nodeIDsToIndices, validatorID) - g.validatorIDs = g.validatorIDs[:lastIndex] - - // Invariant: We must remove the validator from everyone else's validator - // bitsets to make sure that each validator occupies the same position in - // each bitset. - for _, knownPeers := range g.trackedPeers { - // swap the element to be removed with the msb - if indexToRemove != lastIndex { - if knownPeers.Contains(lastIndex) { - knownPeers.Add(indexToRemove) - } else { - knownPeers.Remove(indexToRemove) - } - } - knownPeers.Remove(lastIndex) - } - - // emit metrics - g.metrics.validatorsSize.Set(float64(len(g.validatorIDs))) - - return true -} - -func (g *gossipTracker) ResetValidator(validatorID ids.NodeID) bool { - g.lock.Lock() - defer g.lock.Unlock() - - // only reset validators that exist - indexToReset, ok := g.nodeIDsToIndices[validatorID] - if !ok { - return false - } - - for _, knownPeers := range g.trackedPeers { - knownPeers.Remove(indexToReset) - } - - return true -} - -// AddKnown invariants: -// -// 1. [peerID] SHOULD only be a nodeID that has been tracked with -// StartTrackingPeer(). -func (g *gossipTracker) AddKnown( - peerID ids.NodeID, - knownTxIDs []ids.ID, - txIDs []ids.ID, -) ([]ids.ID, bool) { - g.lock.Lock() - defer g.lock.Unlock() - - knownPeers, ok := g.trackedPeers[peerID] - if !ok { - return nil, false - } - for _, txID := range knownTxIDs { - nodeID, ok := g.txIDsToNodeIDs[txID] - if !ok { - // We don't know about this txID, this can happen due to differences - // between our current validator set and the peer's current - // validator set. - continue - } - - // Because we fetched the nodeID from [g.txIDsToNodeIDs], we are - // guaranteed that the index is populated. - index := g.nodeIDsToIndices[nodeID] - knownPeers.Add(index) - } - - validatorTxIDs := make([]ids.ID, 0, len(txIDs)) - for _, txID := range txIDs { - if _, ok := g.txIDsToNodeIDs[txID]; ok { - validatorTxIDs = append(validatorTxIDs, txID) - } - } - return validatorTxIDs, true -} - -func (g *gossipTracker) GetUnknown(peerID ids.NodeID) ([]ValidatorID, bool) { - g.lock.RLock() - defer g.lock.RUnlock() - - // return false if this peer isn't tracked - knownPeers, ok := g.trackedPeers[peerID] - if !ok { - return nil, false - } - - // Calculate the unknown information we need to send to this peer. We do - // this by computing the difference between the validators we know about - // and the validators we know we've sent to [peerID]. - result := make([]ValidatorID, 0, len(g.validatorIDs)) - for i, validatorID := range g.validatorIDs { - if !knownPeers.Contains(i) { - result = append(result, validatorID) - } - } - - return result, true -} diff --git a/network/peer/gossip_tracker_callback.go b/network/peer/gossip_tracker_callback.go deleted file mode 100644 index 28514ac163a6..000000000000 --- a/network/peer/gossip_tracker_callback.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package peer - -import ( - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/logging" -) - -var _ validators.SetCallbackListener = (*GossipTrackerCallback)(nil) - -// GossipTrackerCallback synchronizes GossipTracker's validator state with the -// validator set it's registered to. -type GossipTrackerCallback struct { - Log logging.Logger - GossipTracker GossipTracker -} - -// OnValidatorAdded adds [validatorID] to the set of validators that can be -// gossiped about -func (g *GossipTrackerCallback) OnValidatorAdded( - nodeID ids.NodeID, - _ *bls.PublicKey, - txID ids.ID, - _ uint64, -) { - vdr := ValidatorID{ - NodeID: nodeID, - TxID: txID, - } - if !g.GossipTracker.AddValidator(vdr) { - g.Log.Error("failed to add a validator", - zap.Stringer("nodeID", nodeID), - zap.Stringer("txID", txID), - ) - } -} - -// OnValidatorRemoved removes [validatorID] from the set of validators that can -// be gossiped about. -func (g *GossipTrackerCallback) OnValidatorRemoved(nodeID ids.NodeID, _ uint64) { - if !g.GossipTracker.RemoveValidator(nodeID) { - g.Log.Error("failed to remove a validator", - zap.Stringer("nodeID", nodeID), - ) - } -} - -// OnValidatorWeightChanged does nothing because PeerList gossip doesn't care -// about validator weights. -func (*GossipTrackerCallback) OnValidatorWeightChanged(ids.NodeID, uint64, uint64) {} diff --git a/network/peer/gossip_tracker_metrics.go b/network/peer/gossip_tracker_metrics.go deleted file mode 100644 index e80f31765b9c..000000000000 --- a/network/peer/gossip_tracker_metrics.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package peer - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/utils" -) - -type gossipTrackerMetrics struct { - trackedPeersSize prometheus.Gauge - validatorsSize prometheus.Gauge -} - -func newGossipTrackerMetrics(registerer prometheus.Registerer, namespace string) (gossipTrackerMetrics, error) { - m := gossipTrackerMetrics{ - trackedPeersSize: prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "tracked_peers_size", - Help: "amount of peers that are being tracked", - }, - ), - validatorsSize: prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "validators_size", - Help: "number of validators this node is tracking", - }, - ), - } - - err := utils.Err( - registerer.Register(m.trackedPeersSize), - registerer.Register(m.validatorsSize), - ) - return m, err -} diff --git a/network/peer/gossip_tracker_test.go b/network/peer/gossip_tracker_test.go deleted file mode 100644 index 1bd420c4f433..000000000000 --- a/network/peer/gossip_tracker_test.go +++ /dev/null @@ -1,620 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package peer - -import ( - "testing" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" -) - -var ( - // peers - p1 = ids.GenerateTestNodeID() - p2 = ids.GenerateTestNodeID() - p3 = ids.GenerateTestNodeID() - - // validators - v1 = ValidatorID{ - NodeID: ids.GenerateTestNodeID(), - TxID: ids.GenerateTestID(), - } - v2 = ValidatorID{ - NodeID: ids.GenerateTestNodeID(), - TxID: ids.GenerateTestID(), - } - v3 = ValidatorID{ - NodeID: ids.GenerateTestNodeID(), - TxID: ids.GenerateTestID(), - } -) - -func TestGossipTracker_Contains(t *testing.T) { - tests := []struct { - name string - track []ids.NodeID - contains ids.NodeID - expected bool - }{ - { - name: "empty", - track: []ids.NodeID{}, - contains: p1, - expected: false, - }, - { - name: "populated - does not contain", - track: []ids.NodeID{p1, p2}, - contains: p3, - expected: false, - }, - { - name: "populated - contains", - track: []ids.NodeID{p1, p2, p3}, - contains: p3, - expected: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - for _, add := range test.track { - require.True(g.StartTrackingPeer(add)) - } - - require.Equal(test.expected, g.Tracked(test.contains)) - }) - } -} - -func TestGossipTracker_StartTrackingPeer(t *testing.T) { - tests := []struct { - name string - toStartTracking []ids.NodeID - expected []bool - }{ - { - // Tracking new peers always works - name: "unique adds", - toStartTracking: []ids.NodeID{p1, p2, p3}, - expected: []bool{true, true, true}, - }, - { - // We shouldn't be able to track a peer more than once - name: "duplicate adds", - toStartTracking: []ids.NodeID{p1, p1, p1}, - expected: []bool{true, false, false}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - for i, p := range test.toStartTracking { - require.Equal(test.expected[i], g.StartTrackingPeer(p)) - require.True(g.Tracked(p)) - } - }) - } -} - -func TestGossipTracker_StopTrackingPeer(t *testing.T) { - tests := []struct { - name string - toStartTracking []ids.NodeID - expectedStartTracking []bool - toStopTracking []ids.NodeID - expectedStopTracking []bool - }{ - { - // We should be able to stop tracking that we are tracking - name: "stop tracking tracked peers", - toStartTracking: []ids.NodeID{p1, p2, p3}, - toStopTracking: []ids.NodeID{p1, p2, p3}, - expectedStopTracking: []bool{true, true, true}, - }, - { - // We shouldn't be able to stop tracking peers we've stopped tracking - name: "stop tracking twice", - toStartTracking: []ids.NodeID{p1}, - toStopTracking: []ids.NodeID{p1, p1}, - expectedStopTracking: []bool{true, false}, - }, - { - // We shouldn't be able to stop tracking peers we were never tracking - name: "remove non-existent elements", - toStartTracking: []ids.NodeID{}, - expectedStartTracking: []bool{}, - toStopTracking: []ids.NodeID{p1, p2, p3}, - expectedStopTracking: []bool{false, false, false}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - for _, add := range test.toStartTracking { - require.True(g.StartTrackingPeer(add)) - require.True(g.Tracked(add)) - } - - for i, p := range test.toStopTracking { - require.Equal(test.expectedStopTracking[i], g.StopTrackingPeer(p)) - } - }) - } -} - -func TestGossipTracker_AddValidator(t *testing.T) { - type args struct { - validator ValidatorID - } - - tests := []struct { - name string - validators []ValidatorID - args args - expected bool - }{ - { - name: "not present", - validators: []ValidatorID{}, - args: args{validator: v1}, - expected: true, - }, - { - name: "already present txID but with different nodeID", - validators: []ValidatorID{v1}, - args: args{validator: ValidatorID{ - NodeID: ids.GenerateTestNodeID(), - TxID: v1.TxID, - }}, - expected: false, - }, - { - name: "already present nodeID but with different txID", - validators: []ValidatorID{v1}, - args: args{validator: ValidatorID{ - NodeID: v1.NodeID, - TxID: ids.GenerateTestID(), - }}, - expected: false, - }, - { - name: "already present validatorID", - validators: []ValidatorID{v1}, - args: args{validator: v1}, - expected: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - for _, v := range test.validators { - require.True(g.AddValidator(v)) - } - - require.Equal(test.expected, g.AddValidator(test.args.validator)) - }) - } -} - -func TestGossipTracker_RemoveValidator(t *testing.T) { - type args struct { - id ids.NodeID - } - - tests := []struct { - name string - validators []ValidatorID - args args - expected bool - }{ - { - name: "not already present", - validators: []ValidatorID{}, - args: args{id: v1.NodeID}, - expected: false, - }, - { - name: "already present", - validators: []ValidatorID{v1}, - args: args{id: v1.NodeID}, - expected: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - for _, v := range test.validators { - require.True(g.AddValidator(v)) - } - - require.Equal(test.expected, g.RemoveValidator(test.args.id)) - }) - } -} - -func TestGossipTracker_ResetValidator(t *testing.T) { - type args struct { - id ids.NodeID - } - - tests := []struct { - name string - validators []ValidatorID - args args - expected bool - }{ - { - name: "non-existent validator", - validators: []ValidatorID{}, - args: args{id: v1.NodeID}, - expected: false, - }, - { - name: "existing validator", - validators: []ValidatorID{v1}, - args: args{id: v1.NodeID}, - expected: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - require.True(g.StartTrackingPeer(p1)) - - for _, v := range test.validators { - require.True(g.AddValidator(v)) - g.AddKnown(p1, []ids.ID{v.TxID}, nil) - - unknown, ok := g.GetUnknown(p1) - require.True(ok) - require.NotContains(unknown, v) - } - - require.Equal(test.expected, g.ResetValidator(test.args.id)) - - for _, v := range test.validators { - unknown, ok := g.GetUnknown(p1) - require.True(ok) - require.Contains(unknown, v) - } - }) - } -} - -func TestGossipTracker_AddKnown(t *testing.T) { - type args struct { - peerID ids.NodeID - txIDs []ids.ID - } - - tests := []struct { - name string - trackedPeers []ids.NodeID - validators []ValidatorID - args args - expectedTxIDs []ids.ID - expectedOk bool - }{ - { - // We should not be able to update an untracked peer - name: "untracked peer - empty", - trackedPeers: []ids.NodeID{}, - validators: []ValidatorID{}, - args: args{peerID: p1, txIDs: []ids.ID{}}, - expectedTxIDs: nil, - expectedOk: false, - }, - { - // We should not be able to update an untracked peer - name: "untracked peer - populated", - trackedPeers: []ids.NodeID{p2, p3}, - validators: []ValidatorID{}, - args: args{peerID: p1, txIDs: []ids.ID{}}, - expectedTxIDs: nil, - expectedOk: false, - }, - { - // We shouldn't be able to look up a peer that isn't tracked - name: "untracked peer - unknown validator", - trackedPeers: []ids.NodeID{}, - validators: []ValidatorID{}, - args: args{peerID: p1, txIDs: []ids.ID{v1.TxID}}, - expectedTxIDs: nil, - expectedOk: false, - }, - { - // We shouldn't fail on a validator that's not registered - name: "tracked peer - unknown validator", - trackedPeers: []ids.NodeID{p1}, - validators: []ValidatorID{}, - args: args{peerID: p1, txIDs: []ids.ID{v1.TxID}}, - expectedTxIDs: []ids.ID{}, - expectedOk: true, - }, - { - // We should be able to update a tracked validator - name: "update tracked validator", - trackedPeers: []ids.NodeID{p1, p2, p3}, - validators: []ValidatorID{v1}, - args: args{peerID: p1, txIDs: []ids.ID{v1.TxID}}, - expectedTxIDs: []ids.ID{v1.TxID}, - expectedOk: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - for _, p := range test.trackedPeers { - require.True(g.StartTrackingPeer(p)) - require.True(g.Tracked(p)) - } - - for _, v := range test.validators { - require.True(g.AddValidator(v)) - } - - txIDs, ok := g.AddKnown(test.args.peerID, test.args.txIDs, test.args.txIDs) - require.Equal(test.expectedOk, ok) - require.Equal(test.expectedTxIDs, txIDs) - }) - } -} - -func TestGossipTracker_GetUnknown(t *testing.T) { - tests := []struct { - name string - peerID ids.NodeID - peersToTrack []ids.NodeID - validators []ValidatorID - expectedUnknown []ValidatorID - expectedOk bool - }{ - { - name: "non tracked peer", - peerID: p1, - validators: []ValidatorID{v2}, - peersToTrack: []ids.NodeID{}, - expectedUnknown: nil, - expectedOk: false, - }, - { - name: "only validators", - peerID: p1, - peersToTrack: []ids.NodeID{p1}, - validators: []ValidatorID{v2}, - expectedUnknown: []ValidatorID{v2}, - expectedOk: true, - }, - { - name: "only non-validators", - peerID: p1, - peersToTrack: []ids.NodeID{p1, p2}, - validators: []ValidatorID{}, - expectedUnknown: []ValidatorID{}, - expectedOk: true, - }, - { - name: "validators and non-validators", - peerID: p1, - peersToTrack: []ids.NodeID{p1, p3}, - validators: []ValidatorID{v2}, - expectedUnknown: []ValidatorID{v2}, - expectedOk: true, - }, - { - name: "same as limit", - peerID: p1, - peersToTrack: []ids.NodeID{p1}, - validators: []ValidatorID{v2, v3}, - expectedUnknown: []ValidatorID{v2, v3}, - expectedOk: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - // add our validators - for _, validator := range test.validators { - require.True(g.AddValidator(validator)) - } - - // start tracking our peers - for _, nonValidator := range test.peersToTrack { - require.True(g.StartTrackingPeer(nonValidator)) - require.True(g.Tracked(nonValidator)) - } - - // get the unknown peers for this peer - result, ok := g.GetUnknown(test.peerID) - require.Equal(test.expectedOk, ok) - require.Len(result, len(test.expectedUnknown)) - for _, v := range test.expectedUnknown { - require.Contains(result, v) - } - }) - } -} - -func TestGossipTracker_E2E(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - // [v1, v2, v3] are validators - require.True(g.AddValidator(v1)) - require.True(g.AddValidator(v2)) - - // we should get an empty unknown since we're not tracking anything - unknown, ok := g.GetUnknown(p1) - require.False(ok) - require.Nil(unknown) - - // we should get a unknown of [v1, v2] since v1 and v2 are registered - require.True(g.StartTrackingPeer(p1)) - require.True(g.Tracked(p1)) - - // check p1's unknown - unknown, ok = g.GetUnknown(p1) - require.True(ok) - require.Contains(unknown, v1) - require.Contains(unknown, v2) - require.Len(unknown, 2) - - // Check p2's unknown. We should get nothing since we're not tracking it - // yet. - unknown, ok = g.GetUnknown(p2) - require.False(ok) - require.Nil(unknown) - - // Start tracking p2 - require.True(g.StartTrackingPeer(p2)) - - // check p2's unknown - unknown, ok = g.GetUnknown(p2) - require.True(ok) - require.Contains(unknown, v1) - require.Contains(unknown, v2) - require.Len(unknown, 2) - - // p1 now knows about v1, but not v2, so it should see [v2] in its unknown - // p2 still knows nothing, so it should see both - txIDs, ok := g.AddKnown(p1, []ids.ID{v1.TxID}, []ids.ID{v1.TxID}) - require.True(ok) - require.Equal([]ids.ID{v1.TxID}, txIDs) - - // p1 should have an unknown of [v2], since it knows v1 - unknown, ok = g.GetUnknown(p1) - require.True(ok) - require.Contains(unknown, v2) - require.Len(unknown, 1) - - // p2 should have a unknown of [v1, v2], since it knows nothing - unknown, ok = g.GetUnknown(p2) - require.True(ok) - require.Contains(unknown, v1) - require.Contains(unknown, v2) - require.Len(unknown, 2) - - // Add v3 - require.True(g.AddValidator(v3)) - - // track p3, who knows of v1, v2, and v3 - // p1 and p2 still don't know of v3 - require.True(g.StartTrackingPeer(p3)) - - txIDs, ok = g.AddKnown(p3, []ids.ID{v1.TxID, v2.TxID, v3.TxID}, []ids.ID{v1.TxID, v2.TxID, v3.TxID}) - require.True(ok) - require.Equal([]ids.ID{v1.TxID, v2.TxID, v3.TxID}, txIDs) - - // p1 doesn't know about [v2, v3] - unknown, ok = g.GetUnknown(p1) - require.True(ok) - require.Contains(unknown, v2) - require.Contains(unknown, v3) - require.Len(unknown, 2) - - // p2 doesn't know about [v1, v2, v3] - unknown, ok = g.GetUnknown(p2) - require.True(ok) - require.Contains(unknown, v1) - require.Contains(unknown, v2) - require.Contains(unknown, v3) - require.Len(unknown, 3) - - // p3 knows about everyone - unknown, ok = g.GetUnknown(p3) - require.True(ok) - require.Empty(unknown) - - // stop tracking p2 - require.True(g.StopTrackingPeer(p2)) - unknown, ok = g.GetUnknown(p2) - require.False(ok) - require.Nil(unknown) - - // p1 doesn't know about [v2, v3] because v2 is still registered as - // a validator - unknown, ok = g.GetUnknown(p1) - require.True(ok) - require.Contains(unknown, v2) - require.Contains(unknown, v3) - require.Len(unknown, 2) - - // Remove p2 from the validator set - require.True(g.RemoveValidator(v2.NodeID)) - - // p1 doesn't know about [v3] since v2 left the validator set - unknown, ok = g.GetUnknown(p1) - require.True(ok) - require.Contains(unknown, v3) - require.Len(unknown, 1) - - // p3 knows about everyone since it learned about v1 and v3 earlier. - unknown, ok = g.GetUnknown(p3) - require.Empty(unknown) - require.True(ok) -} - -func TestGossipTracker_Regression_IncorrectTxIDDeletion(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - require.True(g.AddValidator(v1)) - require.True(g.AddValidator(v2)) - - require.True(g.RemoveValidator(v1.NodeID)) - - require.False(g.AddValidator(ValidatorID{ - NodeID: ids.GenerateTestNodeID(), - TxID: v2.TxID, - })) -} diff --git a/network/peer/info.go b/network/peer/info.go index 45f7a3cdd4a6..00ccaec7953b 100644 --- a/network/peer/info.go +++ b/network/peer/info.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/utils/set" ) type Info struct { @@ -19,5 +20,7 @@ type Info struct { LastReceived time.Time `json:"lastReceived"` ObservedUptime json.Uint32 `json:"observedUptime"` ObservedSubnetUptimes map[ids.ID]json.Uint32 `json:"observedSubnetUptimes"` - TrackedSubnets []ids.ID `json:"trackedSubnets"` + TrackedSubnets set.Set[ids.ID] `json:"trackedSubnets"` + SupportedACPs set.Set[uint32] `json:"supportedACPs"` + ObjectedACPs set.Set[uint32] `json:"objectedACPs"` } diff --git a/network/peer/ip.go b/network/peer/ip.go index 8fb9d744f974..590003c850d8 100644 --- a/network/peer/ip.go +++ b/network/peer/ip.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -6,6 +6,9 @@ package peer import ( "crypto" "crypto/rand" + "errors" + "fmt" + "time" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils/hashing" @@ -13,6 +16,11 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) +var ( + errTimestampTooFarInFuture = errors.New("timestamp too far in the future") + errInvalidSignature = errors.New("invalid signature") +) + // UnsignedIP is used for a validator to claim an IP. The [Timestamp] is used to // ensure that the most updated IP claim is tracked by peers for a given // validator. @@ -49,10 +57,24 @@ type SignedIP struct { Signature []byte } -func (ip *SignedIP) Verify(cert *staking.Certificate) error { - return staking.CheckSignature( +// Returns nil if: +// * [ip.Timestamp] is not after [maxTimestamp]. +// * [ip.Signature] is a valid signature over [ip.UnsignedIP] from [cert]. +func (ip *SignedIP) Verify( + cert *staking.Certificate, + maxTimestamp time.Time, +) error { + maxUnixTimestamp := uint64(maxTimestamp.Unix()) + if ip.Timestamp > maxUnixTimestamp { + return fmt.Errorf("%w: timestamp %d > maxTimestamp %d", errTimestampTooFarInFuture, ip.Timestamp, maxUnixTimestamp) + } + + if err := staking.CheckSignature( cert, ip.UnsignedIP.bytes(), ip.Signature, - ) + ); err != nil { + return fmt.Errorf("%w: %w", errInvalidSignature, err) + } + return nil } diff --git a/network/peer/ip_signer.go b/network/peer/ip_signer.go index b524d3463619..cfe85f387819 100644 --- a/network/peer/ip_signer.go +++ b/network/peer/ip_signer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer diff --git a/network/peer/ip_signer_test.go b/network/peer/ip_signer_test.go index 382501a825bc..7e5314f5f58a 100644 --- a/network/peer/ip_signer_test.go +++ b/network/peer/ip_signer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer diff --git a/network/peer/ip_test.go b/network/peer/ip_test.go new file mode 100644 index 000000000000..3b4854562ec5 --- /dev/null +++ b/network/peer/ip_test.go @@ -0,0 +1,110 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package peer + +import ( + "crypto" + "net" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils/ips" +) + +func TestSignedIpVerify(t *testing.T) { + tlsCert1, err := staking.NewTLSCert() + require.NoError(t, err) + cert1, err := staking.CertificateFromX509(tlsCert1.Leaf) + require.NoError(t, err) + require.NoError(t, staking.ValidateCertificate(cert1)) + + tlsCert2, err := staking.NewTLSCert() + require.NoError(t, err) + cert2, err := staking.CertificateFromX509(tlsCert2.Leaf) + require.NoError(t, err) + require.NoError(t, staking.ValidateCertificate(cert2)) + + now := time.Now() + + type test struct { + name string + signer crypto.Signer + expectedCert *staking.Certificate + ip UnsignedIP + maxTimestamp time.Time + expectedErr error + } + + tests := []test{ + { + name: "valid (before max time)", + signer: tlsCert1.PrivateKey.(crypto.Signer), + expectedCert: cert1, + ip: UnsignedIP{ + IPPort: ips.IPPort{ + IP: net.IPv4(1, 2, 3, 4), + Port: 1, + }, + Timestamp: uint64(now.Unix()) - 1, + }, + maxTimestamp: now, + expectedErr: nil, + }, + { + name: "valid (at max time)", + signer: tlsCert1.PrivateKey.(crypto.Signer), + expectedCert: cert1, + ip: UnsignedIP{ + IPPort: ips.IPPort{ + IP: net.IPv4(1, 2, 3, 4), + Port: 1, + }, + Timestamp: uint64(now.Unix()), + }, + maxTimestamp: now, + expectedErr: nil, + }, + { + name: "timestamp too far ahead", + signer: tlsCert1.PrivateKey.(crypto.Signer), + expectedCert: cert1, + ip: UnsignedIP{ + IPPort: ips.IPPort{ + IP: net.IPv4(1, 2, 3, 4), + Port: 1, + }, + Timestamp: uint64(now.Unix()) + 1, + }, + maxTimestamp: now, + expectedErr: errTimestampTooFarInFuture, + }, + { + name: "sig from wrong cert", + signer: tlsCert1.PrivateKey.(crypto.Signer), + expectedCert: cert2, // note this isn't cert1 + ip: UnsignedIP{ + IPPort: ips.IPPort{ + IP: net.IPv4(1, 2, 3, 4), + Port: 1, + }, + Timestamp: uint64(now.Unix()), + }, + maxTimestamp: now, + expectedErr: errInvalidSignature, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + signedIP, err := tt.ip.Sign(tt.signer) + require.NoError(t, err) + + err = signedIP.Verify(tt.expectedCert, tt.maxTimestamp) + require.ErrorIs(t, err, tt.expectedErr) + }) + } +} diff --git a/network/peer/message_queue.go b/network/peer/message_queue.go index b9d38996723b..f2ccef6dc915 100644 --- a/network/peer/message_queue.go +++ b/network/peer/message_queue.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer diff --git a/network/peer/message_queue_test.go b/network/peer/message_queue_test.go index 2e1e46f5e2f5..496f19425f20 100644 --- a/network/peer/message_queue_test.go +++ b/network/peer/message_queue_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer diff --git a/network/peer/metrics.go b/network/peer/metrics.go index 602726131134..cad8797addfb 100644 --- a/network/peer/metrics.go +++ b/network/peer/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer diff --git a/network/peer/mock_gossip_tracker.go b/network/peer/mock_gossip_tracker.go deleted file mode 100644 index ee7b8c21ca91..000000000000 --- a/network/peer/mock_gossip_tracker.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/network/peer (interfaces: GossipTracker) - -// Package peer is a generated GoMock package. -package peer - -import ( - reflect "reflect" - - ids "github.com/ava-labs/avalanchego/ids" - gomock "go.uber.org/mock/gomock" -) - -// MockGossipTracker is a mock of GossipTracker interface. -type MockGossipTracker struct { - ctrl *gomock.Controller - recorder *MockGossipTrackerMockRecorder -} - -// MockGossipTrackerMockRecorder is the mock recorder for MockGossipTracker. -type MockGossipTrackerMockRecorder struct { - mock *MockGossipTracker -} - -// NewMockGossipTracker creates a new mock instance. -func NewMockGossipTracker(ctrl *gomock.Controller) *MockGossipTracker { - mock := &MockGossipTracker{ctrl: ctrl} - mock.recorder = &MockGossipTrackerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockGossipTracker) EXPECT() *MockGossipTrackerMockRecorder { - return m.recorder -} - -// AddKnown mocks base method. -func (m *MockGossipTracker) AddKnown(arg0 ids.NodeID, arg1, arg2 []ids.ID) ([]ids.ID, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddKnown", arg0, arg1, arg2) - ret0, _ := ret[0].([]ids.ID) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// AddKnown indicates an expected call of AddKnown. -func (mr *MockGossipTrackerMockRecorder) AddKnown(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddKnown", reflect.TypeOf((*MockGossipTracker)(nil).AddKnown), arg0, arg1, arg2) -} - -// AddValidator mocks base method. -func (m *MockGossipTracker) AddValidator(arg0 ValidatorID) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddValidator", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// AddValidator indicates an expected call of AddValidator. -func (mr *MockGossipTrackerMockRecorder) AddValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddValidator", reflect.TypeOf((*MockGossipTracker)(nil).AddValidator), arg0) -} - -// GetNodeID mocks base method. -func (m *MockGossipTracker) GetNodeID(arg0 ids.ID) (ids.NodeID, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNodeID", arg0) - ret0, _ := ret[0].(ids.NodeID) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetNodeID indicates an expected call of GetNodeID. -func (mr *MockGossipTrackerMockRecorder) GetNodeID(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeID", reflect.TypeOf((*MockGossipTracker)(nil).GetNodeID), arg0) -} - -// GetUnknown mocks base method. -func (m *MockGossipTracker) GetUnknown(arg0 ids.NodeID) ([]ValidatorID, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUnknown", arg0) - ret0, _ := ret[0].([]ValidatorID) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetUnknown indicates an expected call of GetUnknown. -func (mr *MockGossipTrackerMockRecorder) GetUnknown(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnknown", reflect.TypeOf((*MockGossipTracker)(nil).GetUnknown), arg0) -} - -// RemoveValidator mocks base method. -func (m *MockGossipTracker) RemoveValidator(arg0 ids.NodeID) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemoveValidator", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// RemoveValidator indicates an expected call of RemoveValidator. -func (mr *MockGossipTrackerMockRecorder) RemoveValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveValidator", reflect.TypeOf((*MockGossipTracker)(nil).RemoveValidator), arg0) -} - -// ResetValidator mocks base method. -func (m *MockGossipTracker) ResetValidator(arg0 ids.NodeID) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ResetValidator", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// ResetValidator indicates an expected call of ResetValidator. -func (mr *MockGossipTrackerMockRecorder) ResetValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetValidator", reflect.TypeOf((*MockGossipTracker)(nil).ResetValidator), arg0) -} - -// StartTrackingPeer mocks base method. -func (m *MockGossipTracker) StartTrackingPeer(arg0 ids.NodeID) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StartTrackingPeer", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// StartTrackingPeer indicates an expected call of StartTrackingPeer. -func (mr *MockGossipTrackerMockRecorder) StartTrackingPeer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartTrackingPeer", reflect.TypeOf((*MockGossipTracker)(nil).StartTrackingPeer), arg0) -} - -// StopTrackingPeer mocks base method. -func (m *MockGossipTracker) StopTrackingPeer(arg0 ids.NodeID) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StopTrackingPeer", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// StopTrackingPeer indicates an expected call of StopTrackingPeer. -func (mr *MockGossipTrackerMockRecorder) StopTrackingPeer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopTrackingPeer", reflect.TypeOf((*MockGossipTracker)(nil).StopTrackingPeer), arg0) -} - -// Tracked mocks base method. -func (m *MockGossipTracker) Tracked(arg0 ids.NodeID) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Tracked", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// Tracked indicates an expected call of Tracked. -func (mr *MockGossipTrackerMockRecorder) Tracked(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tracked", reflect.TypeOf((*MockGossipTracker)(nil).Tracked), arg0) -} diff --git a/network/peer/msg_length.go b/network/peer/msg_length.go index 27a48dea3060..625034913d9f 100644 --- a/network/peer/msg_length.go +++ b/network/peer/msg_length.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer diff --git a/network/peer/msg_length_test.go b/network/peer/msg_length_test.go index 52767888c8c2..97866a7d95cf 100644 --- a/network/peer/msg_length_test.go +++ b/network/peer/msg_length_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer diff --git a/network/peer/network.go b/network/peer/network.go index fc136f0bcb9c..b8fb01814546 100644 --- a/network/peer/network.go +++ b/network/peer/network.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/ips" ) @@ -19,15 +19,9 @@ type Network interface { // connection is no longer desired and should be terminated. AllowConnection(peerID ids.NodeID) bool - // Track allows the peer to notify the network of a potential new peer to - // connect to, given the [ips] of the peers it sent us during the peer - // handshake. - // - // Returns which IPs should not be gossipped to this node again. - Track(peerID ids.NodeID, ips []*ips.ClaimedIPPort) ([]*p2p.PeerAck, error) - - // MarkTracked stops sending gossip about [ips] to [peerID]. - MarkTracked(peerID ids.NodeID, ips []*p2p.PeerAck) error + // Track allows the peer to notify the network of potential new peers to + // connect to. + Track(ips []*ips.ClaimedIPPort) error // Disconnected is called when the peer finishes shutting down. It is not // guaranteed that [Connected] was called for the provided peer. However, it @@ -35,6 +29,13 @@ type Network interface { // for a given [Peer] object. Disconnected(peerID ids.NodeID) - // Peers returns peers that [peerID] might not know about. - Peers(peerID ids.NodeID) ([]ips.ClaimedIPPort, error) + // KnownPeers returns the bloom filter of the known peers. + KnownPeers() (bloomFilter []byte, salt []byte) + + // Peers returns peers that are not known. + Peers( + peerID ids.NodeID, + knownPeers *bloom.ReadFilter, + peerSalt []byte, + ) []*ips.ClaimedIPPort } diff --git a/network/peer/peer.go b/network/peer/peer.go index 503f97262882..f5cebb613e00 100644 --- a/network/peer/peer.go +++ b/network/peer/peer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -21,6 +21,7 @@ import ( "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/json" @@ -29,6 +30,10 @@ import ( "github.com/ava-labs/avalanchego/version" ) +// maxBloomSaltLen restricts the allowed size of the bloom salt to prevent +// excessively expensive bloom filter contains checks. +const maxBloomSaltLen = 32 + var ( errClosed = errors.New("closed") @@ -91,6 +96,11 @@ type Peer interface { // sent. StartSendPeerList() + // StartSendGetPeerList attempts to send a GetPeerList message to this peer + // on this peer's gossip routine. It is not guaranteed that a GetPeerList + // will be sent. + StartSendGetPeerList() + // StartClose will begin shutting down the peer. It will not block. StartClose() @@ -120,27 +130,30 @@ type peer struct { // queue of messages to send to this peer. messageQueue MessageQueue - // ip is the claimed IP the peer gave us in the Version message. + // ip is the claimed IP the peer gave us in the Handshake message. ip *SignedIP // version is the claimed version the peer is running that we received in - // the Version message. + // the Handshake message. version *version.Application - // trackedSubnets is the subset of subnetIDs the peer sent us in the Version + // trackedSubnets is the subset of subnetIDs the peer sent us in the Handshake // message that we are also tracking. trackedSubnets set.Set[ids.ID] + // options of ACPs provided in the Handshake message. + supportedACPs set.Set[uint32] + objectedACPs set.Set[uint32] observedUptimesLock sync.RWMutex // [observedUptimesLock] must be held while accessing [observedUptime] // Subnet ID --> Our uptime for the given subnet as perceived by the peer observedUptimes map[ids.ID]uint32 - // True if this peer has sent us a valid Version message and + // True if this peer has sent us a valid Handshake message and // is running a compatible version. // Only modified on the connection's reader routine. - gotVersion utils.Atomic[bool] + gotHandshake utils.Atomic[bool] // True if the peer: - // * Has sent us a Version message + // * Has sent us a Handshake message // * Has sent us a PeerList message // * Is running a compatible version // Only modified on the connection's reader routine. @@ -167,6 +180,10 @@ type peer struct { // peerListChan signals that we should attempt to send a PeerList to this // peer peerListChan chan struct{} + + // getPeerListChan signals that we should attempt to send a GetPeerList to + // this peer + getPeerListChan chan struct{} } // Start a new peer instance. @@ -194,6 +211,7 @@ func Start( onClosed: make(chan struct{}), observedUptimes: make(map[ids.ID]uint32), peerListChan: make(chan struct{}, 1), + getPeerListChan: make(chan struct{}, 1), } go p.readMessages() @@ -246,10 +264,9 @@ func (p *peer) Info() Info { publicIPStr = p.ip.IPPort.String() } - trackedSubnets := p.trackedSubnets.List() - uptimes := make(map[ids.ID]json.Uint32, len(trackedSubnets)) + uptimes := make(map[ids.ID]json.Uint32, p.trackedSubnets.Len()) - for _, subnetID := range trackedSubnets { + for subnetID := range p.trackedSubnets { uptime, exist := p.ObservedUptime(subnetID) if !exist { continue @@ -271,7 +288,9 @@ func (p *peer) Info() Info { LastReceived: p.LastReceived(), ObservedUptime: json.Uint32(primaryUptime), ObservedSubnetUptimes: uptimes, - TrackedSubnets: trackedSubnets, + TrackedSubnets: p.trackedSubnets, + SupportedACPs: p.supportedACPs, + ObjectedACPs: p.objectedACPs, } } @@ -306,6 +325,13 @@ func (p *peer) StartSendPeerList() { } } +func (p *peer) StartSendGetPeerList() { + select { + case p.getPeerListChan <- struct{}{}: + default: + } +} + func (p *peer) StartClose() { p.startClosingOnce.Do(func() { if err := p.conn.Close(); err != nil { @@ -487,7 +513,7 @@ func (p *peer) writeMessages() { writer := bufio.NewWriterSize(p.conn, p.Config.WriteBufferSize) - // Make sure that the version is the first message sent + // Make sure that the Handshake is the first message sent mySignedIP, err := p.IPSigner.GetSignedIP() if err != nil { p.Log.Error("failed to get signed IP", @@ -496,19 +522,44 @@ func (p *peer) writeMessages() { ) return } + if mySignedIP.Port == 0 { + p.Log.Error("signed IP has invalid port", + zap.Stringer("nodeID", p.id), + zap.Uint16("port", mySignedIP.Port), + ) + return + } - msg, err := p.MessageCreator.Version( + myVersion := p.VersionCompatibility.Version() + legacyApplication := &version.Application{ + Name: version.LegacyAppName, + Major: myVersion.Major, + Minor: myVersion.Minor, + Patch: myVersion.Patch, + } + + knownPeersFilter, knownPeersSalt := p.Network.KnownPeers() + + msg, err := p.MessageCreator.Handshake( p.NetworkID, p.Clock.Unix(), mySignedIP.IPPort, - p.VersionCompatibility.Version().String(), + legacyApplication.String(), + myVersion.Name, + uint32(myVersion.Major), + uint32(myVersion.Minor), + uint32(myVersion.Patch), mySignedIP.Timestamp, mySignedIP.Signature, p.MySubnets.List(), + p.SupportedACPs, + p.ObjectedACPs, + knownPeersFilter, + knownPeersSalt, ) if err != nil { p.Log.Error("failed to create message", - zap.Stringer("messageOp", message.VersionOp), + zap.Stringer("messageOp", message.HandshakeOp), zap.Stringer("nodeID", p.id), zap.Error(err), ) @@ -596,15 +647,7 @@ func (p *peer) sendNetworkMessages() { for { select { case <-p.peerListChan: - peerIPs, err := p.Config.Network.Peers(p.id) - if err != nil { - p.Log.Error("failed to get peers to gossip", - zap.Stringer("nodeID", p.id), - zap.Error(err), - ) - return - } - + peerIPs := p.Config.Network.Peers(p.id, bloom.EmptyFilter, nil) if len(peerIPs) == 0 { p.Log.Verbo( "skipping peer gossip as there are no unknown peers", @@ -629,6 +672,22 @@ func (p *peer) sendNetworkMessages() { zap.Stringer("nodeID", p.id), ) } + case <-p.getPeerListChan: + knownPeersFilter, knownPeersSalt := p.Config.Network.KnownPeers() + msg, err := p.Config.MessageCreator.GetPeerList(knownPeersFilter, knownPeersSalt) + if err != nil { + p.Log.Error("failed to create get peer list message", + zap.Stringer("nodeID", p.id), + zap.Error(err), + ) + continue + } + + if !p.Send(p.onClosingCtx, msg) { + p.Log.Debug("failed to send get peer list", + zap.Stringer("nodeID", p.id), + ) + } case <-sendPingsTicker.C: if !p.Network.AllowConnection(p.id) { p.Log.Debug("disconnecting from peer", @@ -678,16 +737,16 @@ func (p *peer) handle(msg message.InboundMessage) { p.handlePong(m) msg.OnFinishedHandling() return - case *p2p.Version: - p.handleVersion(m) + case *p2p.Handshake: + p.handleHandshake(m) msg.OnFinishedHandling() return - case *p2p.PeerList: - p.handlePeerList(m) + case *p2p.GetPeerList: + p.handleGetPeerList(m) msg.OnFinishedHandling() return - case *p2p.PeerListAck: - p.handlePeerListAck(m) + case *p2p.PeerList: + p.handlePeerList(m) msg.OnFinishedHandling() return } @@ -828,10 +887,10 @@ func (p *peer) observeUptime(subnetID ids.ID, uptime uint32) { p.observedUptimesLock.Unlock() } -func (p *peer) handleVersion(msg *p2p.Version) { - if p.gotVersion.Get() { +func (p *peer) handleHandshake(msg *p2p.Handshake) { + if p.gotHandshake.Get() { // TODO: this should never happen, should we close the connection here? - p.Log.Verbo("dropping duplicated version message", + p.Log.Verbo("dropping duplicated handshake message", zap.Stringer("nodeID", p.id), ) return @@ -847,8 +906,9 @@ func (p *peer) handleVersion(msg *p2p.Version) { return } - myTime := p.Clock.Unix() - clockDifference := math.Abs(float64(msg.MyTime) - float64(myTime)) + myTime := p.Clock.Time() + myTimeUnix := uint64(myTime.Unix()) + clockDifference := math.Abs(float64(msg.MyTime) - float64(myTimeUnix)) p.Metrics.ClockSkew.Observe(clockDifference) @@ -857,66 +917,64 @@ func (p *peer) handleVersion(msg *p2p.Version) { p.Log.Warn("beacon reports out of sync time", zap.Stringer("nodeID", p.id), zap.Uint64("peerTime", msg.MyTime), - zap.Uint64("myTime", myTime), + zap.Uint64("myTime", myTimeUnix), ) } else { p.Log.Debug("peer reports out of sync time", zap.Stringer("nodeID", p.id), zap.Uint64("peerTime", msg.MyTime), - zap.Uint64("myTime", myTime), + zap.Uint64("myTime", myTimeUnix), ) } p.StartClose() return } - peerVersion, err := version.ParseApplication(msg.MyVersion) - if err != nil { - p.Log.Debug("failed to parse peer version", - zap.Stringer("nodeID", p.id), - zap.Error(err), - ) - p.StartClose() - return + if msg.Client != nil { + p.version = &version.Application{ + Name: msg.Client.Name, + Major: int(msg.Client.Major), + Minor: int(msg.Client.Minor), + Patch: int(msg.Client.Patch), + } + } else { + // Handle legacy version field + peerVersion, err := version.ParseLegacyApplication(msg.MyVersion) + if err != nil { + p.Log.Debug("failed to parse peer version", + zap.Stringer("nodeID", p.id), + zap.Error(err), + ) + p.StartClose() + return + } + p.version = peerVersion } - p.version = peerVersion - if p.VersionCompatibility.Version().Before(peerVersion) { + if p.VersionCompatibility.Version().Before(p.version) { if _, ok := p.Beacons.GetValidator(constants.PrimaryNetworkID, p.id); ok { p.Log.Info("beacon attempting to connect with newer version. You may want to update your client", zap.Stringer("nodeID", p.id), - zap.Stringer("beaconVersion", peerVersion), + zap.Stringer("beaconVersion", p.version), ) } else { p.Log.Debug("peer attempting to connect with newer version. You may want to update your client", zap.Stringer("nodeID", p.id), - zap.Stringer("peerVersion", peerVersion), + zap.Stringer("peerVersion", p.version), ) } } - if err := p.VersionCompatibility.Compatible(peerVersion); err != nil { + if err := p.VersionCompatibility.Compatible(p.version); err != nil { p.Log.Verbo("peer version not compatible", zap.Stringer("nodeID", p.id), - zap.Stringer("peerVersion", peerVersion), + zap.Stringer("peerVersion", p.version), zap.Error(err), ) p.StartClose() return } - // Note that it is expected that the [versionTime] can be in the past. We - // are just verifying that the claimed signing time isn't too far in the - // future here. - if float64(msg.MyVersionTime)-float64(myTime) > p.MaxClockDifference.Seconds() { - p.Log.Debug("peer attempting to connect with version timestamp too far in the future", - zap.Stringer("nodeID", p.id), - zap.Uint64("versionTime", msg.MyVersionTime), - ) - p.StartClose() - return - } - // handle subnet IDs for _, subnetIDBytes := range msg.TrackedSubnets { subnetID, err := ids.ToID(subnetIDBytes) @@ -934,17 +992,81 @@ func (p *peer) handleVersion(msg *p2p.Version) { } } + for _, acp := range msg.SupportedAcps { + if constants.CurrentACPs.Contains(acp) { + p.supportedACPs.Add(acp) + } + } + for _, acp := range msg.ObjectedAcps { + if constants.CurrentACPs.Contains(acp) { + p.objectedACPs.Add(acp) + } + } + + if p.supportedACPs.Overlaps(p.objectedACPs) { + p.Log.Debug("message with invalid field", + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("field", "ACPs"), + zap.Reflect("supportedACPs", p.supportedACPs), + zap.Reflect("objectedACPs", p.objectedACPs), + ) + p.StartClose() + return + } + + var ( + knownPeers = bloom.EmptyFilter + salt []byte + ) + if msg.KnownPeers != nil { + var err error + knownPeers, err = bloom.Parse(msg.KnownPeers.Filter) + if err != nil { + p.Log.Debug("message with invalid field", + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("field", "KnownPeers.Filter"), + zap.Error(err), + ) + p.StartClose() + return + } + + salt = msg.KnownPeers.Salt + if saltLen := len(salt); saltLen > maxBloomSaltLen { + p.Log.Debug("message with invalid field", + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("field", "KnownPeers.Salt"), + zap.Int("saltLen", saltLen), + ) + p.StartClose() + return + } + } + // "net.IP" type in Golang is 16-byte if ipLen := len(msg.IpAddr); ipLen != net.IPv6len { p.Log.Debug("message with invalid field", zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.VersionOp), + zap.Stringer("messageOp", message.HandshakeOp), zap.String("field", "IP"), zap.Int("ipLen", ipLen), ) p.StartClose() return } + if msg.IpPort == 0 { + p.Log.Debug("message with invalid field", + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("field", "Port"), + zap.Uint32("port", msg.IpPort), + ) + p.StartClose() + return + } p.ip = &SignedIP{ UnsignedIP: UnsignedIP{ @@ -952,55 +1074,120 @@ func (p *peer) handleVersion(msg *p2p.Version) { IP: msg.IpAddr, Port: uint16(msg.IpPort), }, - Timestamp: msg.MyVersionTime, + Timestamp: msg.IpSigningTime, }, Signature: msg.Sig, } - if err := p.ip.Verify(p.cert); err != nil { - p.Log.Debug("signature verification failed", + maxTimestamp := myTime.Add(p.MaxClockDifference) + if err := p.ip.Verify(p.cert, maxTimestamp); err != nil { + if _, ok := p.Beacons.GetValidator(constants.PrimaryNetworkID, p.id); ok { + p.Log.Warn("beacon has invalid signature or is out of sync", + zap.Stringer("nodeID", p.id), + zap.Uint64("peerTime", msg.MyTime), + zap.Uint64("myTime", myTimeUnix), + zap.Error(err), + ) + } else { + p.Log.Debug("peer has invalid signature or is out of sync", + zap.Stringer("nodeID", p.id), + zap.Uint64("peerTime", msg.MyTime), + zap.Uint64("myTime", myTimeUnix), + zap.Error(err), + ) + } + + p.StartClose() + return + } + + p.gotHandshake.Set(true) + + peerIPs := p.Network.Peers(p.id, knownPeers, salt) + + // We bypass throttling here to ensure that the handshake message is + // acknowledged correctly. + peerListMsg, err := p.Config.MessageCreator.PeerList(peerIPs, true /*=bypassThrottling*/) + if err != nil { + p.Log.Error("failed to create peer list handshake message", zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PeerListOp), zap.Error(err), ) - p.StartClose() return } - p.gotVersion.Set(true) + if !p.Send(p.onClosingCtx, peerListMsg) { + // Because throttling was marked to be bypassed with this message, + // sending should only fail if the peer has started closing. + p.Log.Debug("failed to send peer list for handshake", + zap.Stringer("nodeID", p.id), + zap.Error(p.onClosingCtx.Err()), + ) + } +} + +func (p *peer) handleGetPeerList(msg *p2p.GetPeerList) { + if !p.finishedHandshake.Get() { + p.Log.Verbo("dropping get peer list message", + zap.Stringer("nodeID", p.id), + ) + return + } - peerIPs, err := p.Network.Peers(p.id) + knownPeersMsg := msg.GetKnownPeers() + filter, err := bloom.Parse(knownPeersMsg.GetFilter()) if err != nil { - p.Log.Error("failed to get peers to gossip for handshake", + p.Log.Debug("message with invalid field", zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.GetPeerListOp), + zap.String("field", "KnownPeers.Filter"), zap.Error(err), ) + p.StartClose() return } - // We bypass throttling here to ensure that the version message is - // acknowledged timely. - peerListMsg, err := p.Config.MessageCreator.PeerList(peerIPs, true /*=bypassThrottling*/) + salt := knownPeersMsg.GetSalt() + if saltLen := len(salt); saltLen > maxBloomSaltLen { + p.Log.Debug("message with invalid field", + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.GetPeerListOp), + zap.String("field", "KnownPeers.Salt"), + zap.Int("saltLen", saltLen), + ) + p.StartClose() + return + } + + peerIPs := p.Network.Peers(p.id, filter, salt) + if len(peerIPs) == 0 { + p.Log.Debug("skipping sending of empty peer list", + zap.Stringer("nodeID", p.id), + ) + return + } + + // Bypass throttling is disabled here to follow the non-handshake message + // sending pattern. + peerListMsg, err := p.Config.MessageCreator.PeerList(peerIPs, false /*=bypassThrottling*/) if err != nil { - p.Log.Error("failed to create peer list handshake message", + p.Log.Error("failed to create peer list message", zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.PeerListOp), zap.Error(err), ) return } if !p.Send(p.onClosingCtx, peerListMsg) { - // Because throttling was marked to be bypassed with this message, - // sending should only fail if the peer has started closing. - p.Log.Debug("failed to send peer list for handshake", + p.Log.Debug("failed to send peer list", zap.Stringer("nodeID", p.id), - zap.Error(p.onClosingCtx.Err()), ) } } func (p *peer) handlePeerList(msg *p2p.PeerList) { if !p.finishedHandshake.Get() { - if !p.gotVersion.Get() { + if !p.gotHandshake.Get() { return } @@ -1009,10 +1196,22 @@ func (p *peer) handlePeerList(msg *p2p.PeerList) { close(p.onFinishHandshake) } - // the peers this peer told us about - discoveredIPs := make([]*ips.ClaimedIPPort, len(msg.ClaimedIpPorts)) + // Invariant: We do not account for clock skew here, as the sender of the + // certificate is expected to account for clock skew during the activation + // of Durango. + durangoTime := version.GetDurangoTime(p.NetworkID) + beforeDurango := time.Now().Before(durangoTime) + discoveredIPs := make([]*ips.ClaimedIPPort, len(msg.ClaimedIpPorts)) // the peers this peer told us about for i, claimedIPPort := range msg.ClaimedIpPorts { - tlsCert, err := staking.ParseCertificate(claimedIPPort.X509Certificate) + var ( + tlsCert *staking.Certificate + err error + ) + if beforeDurango { + tlsCert, err = staking.ParseCertificate(claimedIPPort.X509Certificate) + } else { + tlsCert, err = staking.ParseCertificatePermissive(claimedIPPort.X509Certificate) + } if err != nil { p.Log.Debug("message with invalid field", zap.Stringer("nodeID", p.id), @@ -1028,40 +1227,36 @@ func (p *peer) handlePeerList(msg *p2p.PeerList) { if ipLen := len(claimedIPPort.IpAddr); ipLen != net.IPv6len { p.Log.Debug("message with invalid field", zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.VersionOp), + zap.Stringer("messageOp", message.PeerListOp), zap.String("field", "IP"), zap.Int("ipLen", ipLen), ) p.StartClose() return } - - txID, err := ids.ToID(claimedIPPort.TxId) - if err != nil { + if claimedIPPort.IpPort == 0 { p.Log.Debug("message with invalid field", zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.PeerListOp), - zap.String("field", "txID"), - zap.Error(err), + zap.String("field", "Port"), + zap.Uint32("port", claimedIPPort.IpPort), ) - p.StartClose() - return + // TODO: After v1.11.x is activated, close the peer here. + continue } - discoveredIPs[i] = &ips.ClaimedIPPort{ - Cert: tlsCert, - IPPort: ips.IPPort{ + discoveredIPs[i] = ips.NewClaimedIPPort( + tlsCert, + ips.IPPort{ IP: claimedIPPort.IpAddr, Port: uint16(claimedIPPort.IpPort), }, - Timestamp: claimedIPPort.Timestamp, - Signature: claimedIPPort.Signature, - TxID: txID, - } + claimedIPPort.Timestamp, + claimedIPPort.Signature, + ) } - trackedPeers, err := p.Network.Track(p.id, discoveredIPs) - if err != nil { + if err := p.Network.Track(discoveredIPs); err != nil { p.Log.Debug("message with invalid field", zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.PeerListOp), @@ -1069,42 +1264,6 @@ func (p *peer) handlePeerList(msg *p2p.PeerList) { zap.Error(err), ) p.StartClose() - return - } - if len(trackedPeers) == 0 { - p.Log.Debug("skipping peerlist ack as there were no tracked peers", - zap.Stringer("nodeID", p.id), - ) - return - } - - peerListAckMsg, err := p.Config.MessageCreator.PeerListAck(trackedPeers) - if err != nil { - p.Log.Error("failed to create message", - zap.Stringer("messageOp", message.PeerListAckOp), - zap.Stringer("nodeID", p.id), - zap.Error(err), - ) - return - } - - if !p.Send(p.onClosingCtx, peerListAckMsg) { - p.Log.Debug("failed to send peer list ack", - zap.Stringer("nodeID", p.id), - ) - } -} - -func (p *peer) handlePeerListAck(msg *p2p.PeerListAck) { - err := p.Network.MarkTracked(p.id, msg.PeerAcks) - if err != nil { - p.Log.Debug("message with invalid field", - zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.PeerListAckOp), - zap.String("field", "txID"), - zap.Error(err), - ) - p.StartClose() } } diff --git a/network/peer/peer_test.go b/network/peer/peer_test.go index 30695e4902ad..82a2560b22d9 100644 --- a/network/peer/peer_test.go +++ b/network/peer/peer_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -78,17 +78,17 @@ func makeRawTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*rawTestPee tlsCert0, err := staking.NewTLSCert() require.NoError(err) - cert0 := staking.CertificateFromX509(tlsCert0.Leaf) - - tlsCert1, err := staking.NewTLSCert() + cert0, err := staking.CertificateFromX509(tlsCert0.Leaf) require.NoError(err) - cert1 := staking.CertificateFromX509(tlsCert1.Leaf) - nodeID0, err := CertToID(tlsCert0.Leaf) + tlsCert1, err := staking.NewTLSCert() require.NoError(err) - nodeID1, err := CertToID(tlsCert1.Leaf) + cert1, err := staking.CertificateFromX509(tlsCert1.Leaf) require.NoError(err) + nodeID0 := cert0.NodeID + nodeID1 := cert1.NodeID + mc := newMessageCreator(t) metrics, err := NewMetrics( @@ -124,7 +124,7 @@ func makeRawTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*rawTestPee peerConfig0 := sharedConfig peerConfig1 := sharedConfig - ip0 := ips.NewDynamicIPPort(net.IPv6loopback, 0) + ip0 := ips.NewDynamicIPPort(net.IPv6loopback, 1) tls0 := tlsCert0.PrivateKey.(crypto.Signer) peerConfig0.IPSigner = NewIPSigner(ip0, tls0) @@ -134,7 +134,7 @@ func makeRawTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*rawTestPee inboundMsgChan0 <- msg }) - ip1 := ips.NewDynamicIPPort(net.IPv6loopback, 1) + ip1 := ips.NewDynamicIPPort(net.IPv6loopback, 2) tls1 := tlsCert1.PrivateKey.(crypto.Signer) peerConfig1.IPSigner = NewIPSigner(ip1, tls1) diff --git a/network/peer/set.go b/network/peer/set.go index bc3fbe60743d..cbb9675ec305 100644 --- a/network/peer/set.go +++ b/network/peer/set.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer diff --git a/network/peer/set_test.go b/network/peer/set_test.go index f26b1d19f8ec..fbdbc3e84643 100644 --- a/network/peer/set_test.go +++ b/network/peer/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -18,24 +18,24 @@ func TestSet(t *testing.T) { set := NewSet() peer1 := &peer{ - id: ids.NodeID{0x01}, + id: ids.BuildTestNodeID([]byte{0x01}), observedUptimes: map[ids.ID]uint32{constants.PrimaryNetworkID: 0}, } updatedPeer1 := &peer{ - id: ids.NodeID{0x01}, + id: ids.BuildTestNodeID([]byte{0x01}), observedUptimes: map[ids.ID]uint32{constants.PrimaryNetworkID: 1}, } peer2 := &peer{ - id: ids.NodeID{0x02}, + id: ids.BuildTestNodeID([]byte{0x02}), } unknownPeer := &peer{ - id: ids.NodeID{0xff}, + id: ids.BuildTestNodeID([]byte{0xff}), } peer3 := &peer{ - id: ids.NodeID{0x03}, + id: ids.BuildTestNodeID([]byte{0x03}), } peer4 := &peer{ - id: ids.NodeID{0x04}, + id: ids.BuildTestNodeID([]byte{0x04}), } // add of first peer is handled @@ -105,10 +105,10 @@ func TestSetSample(t *testing.T) { set := NewSet() peer1 := &peer{ - id: ids.NodeID{0x01}, + id: ids.BuildTestNodeID([]byte{0x01}), } peer2 := &peer{ - id: ids.NodeID{0x02}, + id: ids.BuildTestNodeID([]byte{0x02}), } // Case: Empty diff --git a/network/peer/test_network.go b/network/peer/test_network.go index 9bac6260bece..01a341ae9abc 100644 --- a/network/peer/test_network.go +++ b/network/peer/test_network.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/ips" ) @@ -19,16 +19,16 @@ func (testNetwork) AllowConnection(ids.NodeID) bool { return true } -func (testNetwork) Track(ids.NodeID, []*ips.ClaimedIPPort) ([]*p2p.PeerAck, error) { - return nil, nil -} - -func (testNetwork) MarkTracked(ids.NodeID, []*p2p.PeerAck) error { +func (testNetwork) Track([]*ips.ClaimedIPPort) error { return nil } func (testNetwork) Disconnected(ids.NodeID) {} -func (testNetwork) Peers(ids.NodeID) ([]ips.ClaimedIPPort, error) { - return nil, nil +func (testNetwork) KnownPeers() ([]byte, []byte) { + return bloom.EmptyFilter.Marshal(), nil +} + +func (testNetwork) Peers(ids.NodeID, *bloom.ReadFilter, []byte) []*ips.ClaimedIPPort { + return nil } diff --git a/network/peer/test_peer.go b/network/peer/test_peer.go index 62717e27dca1..04cfd93aaa7a 100644 --- a/network/peer/test_peer.go +++ b/network/peer/test_peer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -65,6 +65,7 @@ func StartTestPeer( clientUpgrader := NewTLSClientUpgrader( tlsConfg, prometheus.NewCounter(prometheus.CounterOpts{}), + version.GetDurangoTime(networkID), ) peerID, conn, cert, err := clientUpgrader.Upgrade(conn) @@ -102,7 +103,7 @@ func StartTestPeer( return nil, err } - signerIP := ips.NewDynamicIPPort(net.IPv6zero, 0) + signerIP := ips.NewDynamicIPPort(net.IPv6zero, 1) tls := tlsCert.PrivateKey.(crypto.Signer) peer := Start( diff --git a/network/peer/tls_config.go b/network/peer/tls_config.go index 733812db5f7e..7de848ed062a 100644 --- a/network/peer/tls_config.go +++ b/network/peer/tls_config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer diff --git a/network/peer/upgrader.go b/network/peer/upgrader.go index d2dc5499afbe..185b38ee20c6 100644 --- a/network/peer/upgrader.go +++ b/network/peer/upgrader.go @@ -8,22 +8,21 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer import ( "crypto/tls" - "crypto/x509" "errors" "net" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/staking" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" ) var ( @@ -41,80 +40,68 @@ type Upgrader interface { type tlsServerUpgrader struct { config *tls.Config invalidCerts prometheus.Counter + durangoTime time.Time } -func NewTLSServerUpgrader(config *tls.Config, invalidCerts prometheus.Counter) Upgrader { +func NewTLSServerUpgrader(config *tls.Config, invalidCerts prometheus.Counter, durangoTime time.Time) Upgrader { return &tlsServerUpgrader{ config: config, invalidCerts: invalidCerts, + durangoTime: durangoTime, } } func (t *tlsServerUpgrader) Upgrade(conn net.Conn) (ids.NodeID, net.Conn, *staking.Certificate, error) { - return connToIDAndCert(tls.Server(conn, t.config), t.invalidCerts) + return connToIDAndCert(tls.Server(conn, t.config), t.invalidCerts, t.durangoTime) } type tlsClientUpgrader struct { config *tls.Config invalidCerts prometheus.Counter + durangoTime time.Time } -func NewTLSClientUpgrader(config *tls.Config, invalidCerts prometheus.Counter) Upgrader { +func NewTLSClientUpgrader(config *tls.Config, invalidCerts prometheus.Counter, durangoTime time.Time) Upgrader { return &tlsClientUpgrader{ config: config, invalidCerts: invalidCerts, + durangoTime: durangoTime, } } func (t *tlsClientUpgrader) Upgrade(conn net.Conn) (ids.NodeID, net.Conn, *staking.Certificate, error) { - return connToIDAndCert(tls.Client(conn, t.config), t.invalidCerts) + return connToIDAndCert(tls.Client(conn, t.config), t.invalidCerts, t.durangoTime) } -func connToIDAndCert(conn *tls.Conn, invalidCerts prometheus.Counter) (ids.NodeID, net.Conn, *staking.Certificate, error) { +func connToIDAndCert(conn *tls.Conn, invalidCerts prometheus.Counter, durangoTime time.Time) (ids.NodeID, net.Conn, *staking.Certificate, error) { if err := conn.Handshake(); err != nil { - return ids.NodeID{}, nil, nil, err + return ids.EmptyNodeID, nil, nil, err } state := conn.ConnectionState() if len(state.PeerCertificates) == 0 { - return ids.NodeID{}, nil, nil, errNoCert + return ids.EmptyNodeID, nil, nil, errNoCert } tlsCert := state.PeerCertificates[0] // Invariant: ParseCertificate is used rather than CertificateFromX509 to // ensure that signature verification can assume the certificate was // parseable according the staking package's parser. - peerCert, err := staking.ParseCertificate(tlsCert.Raw) - if err != nil { - invalidCerts.Inc() - return ids.NodeID{}, nil, nil, err - } - - // We validate the certificate here to attempt to make the validity of the - // peer certificate as clear as possible. Specifically, a node running a - // prior version using an invalid certificate should not be able to report - // healthy. - if err := staking.ValidateCertificate(peerCert); err != nil { - invalidCerts.Inc() - return ids.NodeID{}, nil, nil, err + // + // TODO: Remove pre-Durango parsing after v1.11.x has activated. + var ( + peerCert *staking.Certificate + err error + ) + if time.Now().Before(durangoTime) { + peerCert, err = staking.ParseCertificate(tlsCert.Raw) + } else { + peerCert, err = staking.ParseCertificatePermissive(tlsCert.Raw) } - - nodeID, err := CertToID(tlsCert) - return nodeID, conn, peerCert, err -} - -func CertToID(cert *x509.Certificate) (ids.NodeID, error) { - pubKeyBytes, err := secp256k1.RecoverSecp256PublicKey(cert) if err != nil { - return ids.EmptyNodeID, err + invalidCerts.Inc() + return ids.EmptyNodeID, nil, nil, err } - return ids.ToNodeID(pubKeyBytes) -} -func StakingCertToID(cert *staking.Certificate) (ids.NodeID, error) { - tlsCert, err := x509.ParseCertificate(cert.Raw) - if err != nil { - return ids.EmptyNodeID, err - } - return CertToID(tlsCert) + return peerCert.NodeID, conn, peerCert, err } diff --git a/network/peer/validator_id.go b/network/peer/validator_id.go deleted file mode 100644 index 5471fda20118..000000000000 --- a/network/peer/validator_id.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package peer - -import "github.com/ava-labs/avalanchego/ids" - -// ValidatorID represents a validator that we gossip to other peers -type ValidatorID struct { - // The validator's ID - NodeID ids.NodeID - // The Tx that added this into the validator set - TxID ids.ID -} diff --git a/network/test_cert_1.crt b/network/test_cert_1.crt new file mode 100644 index 000000000000..fdea827de0d8 --- /dev/null +++ b/network/test_cert_1.crt @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE8DCCAtigAwIBAgIBADANBgkqhkiG9w0BAQsFADAAMCAXDTk5MTIzMTAwMDAw +MFoYDzIxMjQwMzE4MTYwMTI5WjAAMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAvsEZXaDjj95upEeri5jtyXOL6eg63IfTX+EEsTL8ntDZVBEAI8082PPH +0tYmn/TzyYpabiGeRhmnVmClxKDmRjZKo7yzQXjMWBASjGl3aY9ywR0sDDf/Zp66 +wtNGP7JyJEgJFfQyPCU7Updy4ZCfa6PUEJ9zYVCbeLIZq5UQJriwn5kd+MlPup5m +McEEj8nGgROceSGEU7n6lj9jR054ZVhtmhg5TyZ4FazS+JVlspPrLvCLnzZx8wRK +DFz0d0PyI7ys6Op2CfEhF7OY9SGbW5bRFszUiWeZLxXKTyDcKJclr8hDX6U/3Eiv +D57K9Iy7j1dr4GSbQqanYjmrKHHcHdY3RA9ZNo4kPtdDNlMEHv4CK7W4ErawI9Uz +2x67AFpF9yhSU9V8a4MZRNodjCdiIpoL05KfEeLv/qPnovh45a0+OSZQ+sfAuNZU +JHvYTWNFrWlWOmKc7VxeRP27hkj3quFLTzfDAYKYJAia0S/D37MlN9oEonWfE2KM +wn1eXWckebAdp8+zE7qIu/CN27zramypmfJGY0PJKtiTdOQHZL2faD1bjzN9JVTM +GET+4ltYQcwldVy0fiJZD+snVRe4tK+5a159Ifetg9tyHp8xnirXZf5LbTMd5GCY +GXpRmej9yeX6ffPKsugMiGOiEYQEsbadnLXhLgw7Sc9KzH6bfKsCAwEAAaNzMHEw +DgYDVR0PAQH/BAQDAgSwMAwGA1UdEwEB/wQCMAAwUQYJKoZIhvcNAQkVAQH/BEEb +C+HFwdeYOcU1MCyPv0eYmIVFic/Qe/VC0MkpE+Y2xcQeC2JMEW9qXRkDxkqsogC4 +BIb+wTzAe/k0qZJyW39LTzANBgkqhkiG9w0BAQsFAAOCAgEAb6/SwnSwJtdK9p9u +AjHrhrho2UNdm1ng8C5vZZKmsYEvdOMsm3HACZm9p87gzNwG8lAOGIOsAMVUwDWd +2fMCP2XGqK5XLR1YSZJ4tQ6FMWM9vmovcmBOdIC8U++QDg+YPBueP6VFd4pnrRBO +bNwfzsIz3Y3tPJUWsrAIm0l9Pb/S+aN/SE+Fkh0H+lGeyEOwYjjodrNz/8zUMNu/ +XwE160kBmhsAzxqOwX8LDsk+iD5pUOqRVh7mAfLsB9azJbT52kxZY/e+8F80dEjQ +ZVHW5BpTrKZRQET9QcDYzvwRtqC8Lo/D9j7Rw9EyITxTK5US/7TZzv0n1JVYGZkN +B+ssz8hg2JULWr37s1LLGMSw+UcTNixrQcJ+TSmtKIVnJC+T8qYBHeEMV9AY2c/V +H06BkOFF3epOwV1f8TvtNmmfzC/I6zi6nU3ucrrz5VJpFYXBX3MgvkIV4k+W25EA +ZwGOSOidQJeoSOw/StYgRyL8pK5GGNm14HYcwfRfTv9rAvwaoJ75YmHxb2GKmIyY +4sHCIOE7G8hgRVbHXlt6zOJ/PTHCHpjGIkb3ensDIRgKhMAmwZcugtNvYCzGMHTA +tzXYMbb2AedwlPPBEDGOLZNTiYnHGngiQw47G+2O95xmPhJp8enm+nu+kCL8yf0n +oQp/Rh3sD1jtRwRwHkfcMeXSc5c= +-----END CERTIFICATE----- diff --git a/network/test_cert_2.crt b/network/test_cert_2.crt new file mode 100644 index 000000000000..9da575477df7 --- /dev/null +++ b/network/test_cert_2.crt @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE8DCCAtigAwIBAgIBADANBgkqhkiG9w0BAQsFADAAMCAXDTk5MTIzMTAwMDAw +MFoYDzIxMjQwMzE4MTYwMTMwWjAAMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAtPZD0vWJsOtAvfFRmZLg+DtKfWuLTxQpvQ+Nk8Qhb+HMrSdvQGEfP0Ng +ooicuB6qDeqIZDpTQE+wL25d+G3Kl7IMinnIQKUfhfP6nAaQfBJ/sNK2O5CbvorB +hx5Sux5tYeoTlueRmMzwNwaBwHaOI01qrCdU2lZq7Dw6SwR9zg6FTxpTgDuoQ6l5 +lqLV3aFj7hFxW4HrVVjJlv4E6ud40TBBpK1eMVTHuTWro9fR0ywC2JSNn0ziGaxq +i60yqtyjxAuW29O+cqFj8aZMUd5Ctp1fhpKSIQ29X91WwXY3An7ktDQFRdG82BLP +kuMzuO36w2VjotUvVY3yFKjp4cRX8gUqyR8VrmP47Q3ggz2ikGc9WrPjKusqOHBa +SVUdesGzxz/02O+B1S2jCeg2RyNDSvGv2+95a8LIU81hknWO91ehUIpjToBXKr2d +uHO/yWIeFvjeY5GzDPSQynAeYwuGlk4yOQtfrMqAssSsyzzeNh2p7NwL6x+UOfgv +7emRIaGvXS3IDOrZhYqFzrepuKdAbZjlaB9UR2SzEHfSUFfZ7Vr1uuEdAR8E/AVP +IML1ZcGJBZCgk5XtBONXpz/OC6mBhEHwyYsT3WKEbYV1YUvLSLJblazCuu/8lt2H +XdKiJOnO7bqaljULrRurUjvm8tPw4emh8Wn5lpdsS5WwNh7dtbsCAwEAAaNzMHEw +DgYDVR0PAQH/BAQDAgSwMAwGA1UdEwEB/wQCMAAwUQYJKoZIhvcNAQkVAQH/BEEb +Ptzr6dH/qgkH6FajLU+2aSz2Ilbptfq2MyWpjou2G39Q4keXs03sA2N5q8zxwpIl +U5QntRoh+XYMqk7P2XiiezANBgkqhkiG9w0BAQsFAAOCAgEAZXsfQvMp3IvDeFVZ +wT53Nre6UGESh0XH1YfRcYfKH62GQc52RiO2W1/0L8hg+QEeOZEkXV4c6Z+3bKhY +rogtLlUZJtHcY7GoV11BdzLeHKpSgIvtZ00kAXB9pivGufJe0PSHXKGtpmcf7Uim +SfVYyP7c7/J+pWgixrqqROatQLh/vXTsFDSrjhzQukRChECGG3rkp6X7czeh95A+ +S+u37MaOS1LGlTzvZOJRvTl/SxSObLKRj6vPmRnqnc6sMCLH6904yEK6NFMY1D1a +Ixp+3o0pnqTAbLpXzXQOEPOHaq/Bqd7cKLX5pbscptJ2Fy6hY8f3WcIzEb2spB1v +KHpmb8yYG/QXS1JmIm/ywPyniLzAXljZJoLmgG3brXXPqhvoEocPXUtMUFiA3kLt +AVMZCoCZqJlPUmR9CKYV9ELAeYZJ838la6/9aTHzK812FCun2uOkTrTqkd6bMiV7 +Qv0dyiYpOUy6F/O9ZhqCWU+JIjOQ+8jXD537G+pJOm+HJiFPWjL0a9J57DrKObXT +SGUX0o4PKLwLSbpQNNbXUHq46DU7mPn2AmgjrYnXaFmGLdOsOrN9Vy4vMqFhgPVO +4v/a3oyFwpve5t0/PZDYsLJCb8MDE4RDxlGa5xI47UeYV7j1DqlAVDO3stKxvz0K +Mg2BgFkrVQaV5n9r8J8GMzGIDM0= +-----END CERTIFICATE----- diff --git a/network/test_cert_3.crt b/network/test_cert_3.crt new file mode 100644 index 000000000000..8667b3581bcc --- /dev/null +++ b/network/test_cert_3.crt @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE8DCCAtigAwIBAgIBADANBgkqhkiG9w0BAQsFADAAMCAXDTk5MTIzMTAwMDAw +MFoYDzIxMjQwMzE4MTYwMTMxWjAAMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAsnMwCGlZFe4NiWHzXNHQzv5O9AOZGRf4sBQaLullMyC+QJAiISxZGPSd +zcc2Yo3C873ORRjY4ipY5jpicdL4/5GthxGGMiz9ypbubBXfNZ+NGfC/okESEKZ2 +lCQeIfeG4x+VfFwfLbN4zPLGRj+dDziunImr7ceNFx9iR8IMkvzyHjsXhQYeGRu8 +sB8pY9aIYbkxn2M02rqXRNj3FNc5v6lPlmbh8WRPdD9lYGBSf7lbtqfGVNSEW4ld +LWonA3dtP9VyEA/8sJ1zUS3HzWaL6ndms2PQ329wb7F3OU5cc8OxGLSQ2e+hrcQw +ZC8qhbqI53Da3yiKGeEjsZxuIyLoDWHAFM3RwKjAjkTlqf+YeZ6yQDHd8a4QKJxO +mpBpDke+YhpBWXXOeAiQV9lI+ZO4eNzuMhZxGrL788C4zdm9ORpXP4gM9LKVEcX0 +85Yz4T560jiD4g95V8DvOb1+NBWY762IQmzXGvXD0ZO/HD10x7dDsJ325PfzfBp1 +imPaJJwRK4PW0OXEZA4tJJl6GwgCGjlP+VGcW8Hypzy4rWivQQGok8YcDBzrzm8S +iVdVSSptgq+5mrbBEnxeNCP86v45iQKVEBYZjNxwnWyhBww2PJ1u0LCpdeSq2KOs +bkQBJu0sBLvOG1JvQF49FbTlI5nIjmrze5fKskX4xgMXBRR22y8CAwEAAaNzMHEw +DgYDVR0PAQH/BAQDAgSwMAwGA1UdEwEB/wQCMAAwUQYJKoZIhvcNAQkVAQH/BEEb +a6VUI7aCC05oxh/67D8Q0ORevEIa3GC8wI2soXvmEZIf3t4FBhFsVtpfRo/obWkE +dc0M6cSydxFKK3D1q8hwvzANBgkqhkiG9w0BAQsFAAOCAgEAa2xMhEVRnQK7rHWC +GVRlIcX9F6uWnb8NhgJ+kKRBdoKZ4u2pWf4NlIkwOleUQqLr112QHJ0kyv3T2DOX +lKkNyqeMf5jAOW8hRWKcm26hivsD6XdaA2W2hWjnFO3qvVncSv4DECrGFj5O4J8a +xNmA0T9l1+xtXWqKmYCISeycalBAWd7Qhl6veZd44MB9OEaWMYspJ2vNenV91C5f +oe6YLFe84TqAFSYHdp+Onkweg7OD3kX6873ey0VNrU7W119O+ciy1zbepcRP4rmD ++grTF5LFPnvQTvY872CRLXyeToF4U5yQIK+ObmrHh3S1TUoHuXpHE33OWRB1Wbd/ +6N2hiaukfk4WQx6nmMLtNd8G36Mj08kpeXSQIGfMw4JgddT6O5+y6ladUQ2/HrGq +DXz6Ip3S6XbIdj9ifUXz7ZZmQw419YK74DekbMxteV7Wu/3EKM3ofTeR52dKC3op +XN2J9NjAydFMb/dCbWUk2wYNgeWToQhnEZKM4//H9Sp4oCVVCvAU4TmhSlf4rNwd +Ma/L8vqkO1U4n8b4jb+E1271d9ozWOspSdbSKGz2QnAA+wHDoVZ7CzRoZWZpuR9T +mqakDAXUyJ76qQdrdmvp0oqaOp4lX5PwHxQuqG7eff/YGB6Cqk0I0ckMeNAsWsha +QWkCWSgEkL0mgz3A05Rns6TeUzU= +-----END CERTIFICATE----- diff --git a/network/test_key_1.key b/network/test_key_1.key new file mode 100644 index 000000000000..98adab941bc9 --- /dev/null +++ b/network/test_key_1.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQC+wRldoOOP3m6k +R6uLmO3Jc4vp6Drch9Nf4QSxMvye0NlUEQAjzTzY88fS1iaf9PPJilpuIZ5GGadW +YKXEoOZGNkqjvLNBeMxYEBKMaXdpj3LBHSwMN/9mnrrC00Y/snIkSAkV9DI8JTtS +l3LhkJ9ro9QQn3NhUJt4shmrlRAmuLCfmR34yU+6nmYxwQSPycaBE5x5IYRTufqW +P2NHTnhlWG2aGDlPJngVrNL4lWWyk+su8IufNnHzBEoMXPR3Q/IjvKzo6nYJ8SEX +s5j1IZtbltEWzNSJZ5kvFcpPINwolyWvyENfpT/cSK8Pnsr0jLuPV2vgZJtCpqdi +Oasocdwd1jdED1k2jiQ+10M2UwQe/gIrtbgStrAj1TPbHrsAWkX3KFJT1XxrgxlE +2h2MJ2IimgvTkp8R4u/+o+ei+HjlrT45JlD6x8C41lQke9hNY0WtaVY6YpztXF5E +/buGSPeq4UtPN8MBgpgkCJrRL8PfsyU32gSidZ8TYozCfV5dZyR5sB2nz7MTuoi7 +8I3bvOtqbKmZ8kZjQ8kq2JN05AdkvZ9oPVuPM30lVMwYRP7iW1hBzCV1XLR+IlkP +6ydVF7i0r7lrXn0h962D23IenzGeKtdl/kttMx3kYJgZelGZ6P3J5fp988qy6AyI +Y6IRhASxtp2cteEuDDtJz0rMfpt8qwIDAQABAoICAHFi/tz0wc/G/CWSpJm6Mb5J +fKVxcWudITwg2VUrZZZRtp2N7XNakAy1+9WLjjIOvMrT/qTwPtCUdy+VC3Qx4C9k +HkHt7O/CClVeWGg2WNDaf+/ik0hAEQTC3WjphRxfyhFe8GeoXCP8TaLue08xVFH7 +dBbdcQwrif7YfwHK/WrG5dY4geRcZZQci8qPXVLLMb+QFLa8AZhzOE0PKMg+ELrN +MXkjZQ1zZHYd4Nn6lBTucCHawmPFTT6TEiD/MCGktc4gXPemRrvqJS2diBJOA7cR +kv/miX+ToS6n2TXn5nYKQlCWzdWQuMEMQ2MZlLKA5QxzfqBHrTKVNXHhrafA5oGe +c5HF7Y3jF6Sl0o564t7xefgfCg93Q4wvxKpSIROZYQwcM331Rp7DpiAhz3P4oSqj +pk+jnL6DtXs/ACxwNEh+ta1ofvOAzBwlH9ZtXiuQPKhdIqsZjYDF43ms3/P/g8Yv +1iGrvXr56g+ehkTSMJWHrmSxr4QQKRPxzYNUO0oRMmseZsO87aTi8SWS4sUTXM/5 +Mdy02id6qI1RD84oU7P4ii9MsNz5hLHBzQ9l50t8FWSSEtraS7G5KWJ+5Yh7Q21h +n61Tuzs5XQw86akmek/w9NKIRutnUSuiAu46R4hwwvNnLqMZi/ys0XazXMjxBI3R +jPc91/saVS8VZUQ1wjuBAoIBAQDtDyLIMFul+Mmmq9xgu03lLzgF/Zv+3xPMWEyk +vOF2CPRZJIZaZCdCII/6QYfJ+Tg0fNBXVg4M0zvMguFBoRdQdW3R70EDibBxQg5W +VbtZYwJySoQa+WZ/hncN400eDdOt++VhZ4fSIh36NZRE7oazs6lhx2A8LA7CpGGT +/fpmP/ovRICxECZsqTyvSZSjZHwiMBn46SMF4rEI77eXw2eqK5diS/nEWwtOYuXy +BDOaHYdBNhOFtjEB1jXAZvawOhlGE1y3P3O3OGuGoBvW2G8Eq6Tt3AxlN9oc/jOT +Yj7wUnBgxV78g5qDVzgoE+EDGxxwxwwkt53BwVmQ8zilrYqRAoIBAQDN/tVGbZFu +4DPVQJr3+smn3ecWuqhWYXSiyIL/WIiLn63YMplMRoei6FTuRFWxhIwM9+YRVIOp +nC0xDyWL1ZEzjbPQypStsLsogfaENv3tVllPCBU//E5MjqB8aWv7an/u6WngqsM3 +JN5VihhnKEF4VW2PG1aSa41YiA8ZEYoqobP9v0yvOhzMnHAdLHZ6vy/3c36CsK5F +Gju2efncMiNs/q+p7l+cYwhzFdXovVJjwBmSAoMpuaRWqpKdTlFO/x2MUCKOQ/hL +tL/W7IUbpwcRUjAQIuoAu2k2943Bx/y736hqxs9D7xMu9ynFTDxYw4dNEyYdxYn3 +kXSGO3Dd9dl7AoIBABl6uv5RVEhuiR4E8tbiyuiLPrZGH/Iw/+vCwdojAwiwxbKf +HmGwyhdtcIwxZurqgoQBtlLsyO3P9mlw1806B0t6k6cw1AgRUImb0/armEtvPOAT +6kcL71xdk4ZGnA9S5SGaJXlmq06GpDo0cA9Io+nEsbv0tf9BrQR2rpY9giBjV/yk +nEBrv/WF6yPcAMHfFwiFqwT56e3EA8s2GMGTGx1LOiYyjFHyhzCRqK1uji1OZy83 +JLoGbxYHdBeN+Y3PcM/7XMfFZiaXRddedh+Ne8FAwaVfNWXbrvHW0KxSrvkdoz6D +eEYjzwO26C5GlLTEwTXN7xwzMB2XLo1J1xjXokECggEALuPhHGT4g+qZIePQ4r29 +hW6nma7nfrI0YJGP8BvxCQdSBTKPXqN3YIfOPWZks0Het1z+i9dXGRap2s791vTI +Vpnc4pwad+cQ00myGaqC5rGPJsRKQgRmtlqJAYjlwZceg/2x0ihMw5Kq4YHLyD+L +l56qj7bDYBUHWa2u8h/h+Y5RcGNJS2HJCHJbuuhSF+LJQmSR2aHqKWStTv449/yZ +v8fBqMbQEMMiO0AvLwlZZcdmiqvzu128oNW3BgO70mWUDkp3czbZoDIGsR+ptP+y +RikVM7ce2QfQDWkQZZnmV8Wzioqyx132Wex9H6IY4oOvmsXPGVvL6gS8J4oESSIb +XQKCAQAFjafw2VtyhsVxpQx9Nlw1WU8YMGmvwJs0g80RsQutHChnQcgXfASfSJcq +8r1aJc4apTCsol7KTfrU5ui2nTGYD9XHrazNETki4mhufqPiOJSBjoSDenobIU65 +dBbwOqoChLXg72aSo3XMViDm+w0kwpC39nCzuSdssxIsL8GjRSQJSivLQghChfRZ +GnG0IF4809Qa/kBvrnTwXM7K6HVGhJeKYzNJqvWF7mhuEEqMzYhZMOxFwFa+ZoSr +evCGsC/zzuagFVjxsTFvDtUHwx+vUdIOIWOkFeYyrzaV6W/Dme2Fsr1sXcojOFzU +/9hCIUsYMEfZ5FMGf6gJPRYm1MhL +-----END PRIVATE KEY----- diff --git a/network/test_key_2.key b/network/test_key_2.key new file mode 100644 index 000000000000..80bae94193ca --- /dev/null +++ b/network/test_key_2.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQC09kPS9Ymw60C9 +8VGZkuD4O0p9a4tPFCm9D42TxCFv4cytJ29AYR8/Q2CiiJy4HqoN6ohkOlNAT7Av +bl34bcqXsgyKechApR+F8/qcBpB8En+w0rY7kJu+isGHHlK7Hm1h6hOW55GYzPA3 +BoHAdo4jTWqsJ1TaVmrsPDpLBH3ODoVPGlOAO6hDqXmWotXdoWPuEXFbgetVWMmW +/gTq53jRMEGkrV4xVMe5Nauj19HTLALYlI2fTOIZrGqLrTKq3KPEC5bb075yoWPx +pkxR3kK2nV+GkpIhDb1f3VbBdjcCfuS0NAVF0bzYEs+S4zO47frDZWOi1S9VjfIU +qOnhxFfyBSrJHxWuY/jtDeCDPaKQZz1as+Mq6yo4cFpJVR16wbPHP/TY74HVLaMJ +6DZHI0NK8a/b73lrwshTzWGSdY73V6FQimNOgFcqvZ24c7/JYh4W+N5jkbMM9JDK +cB5jC4aWTjI5C1+syoCyxKzLPN42Hans3AvrH5Q5+C/t6ZEhoa9dLcgM6tmFioXO +t6m4p0BtmOVoH1RHZLMQd9JQV9ntWvW64R0BHwT8BU8gwvVlwYkFkKCTle0E41en +P84LqYGEQfDJixPdYoRthXVhS8tIsluVrMK67/yW3Ydd0qIk6c7tupqWNQutG6tS +O+by0/Dh6aHxafmWl2xLlbA2Ht21uwIDAQABAoICAD4gnkB7E/6thdiwVPeIubv/ +fx94IKfRoFLMWCr3FxO2TfLUqxlLPtMVasPiawu7W17TumfOrm5R7YcbOR/lcbyK +4EtabCnlhuESVmPizRALudMXRIFGsHHGaZLeEkEzvRH0ry0L66yuD3qUvU86ZKoy +FgFsr3EWYjcgplLDA/4Odi4mN6y6zpYMRTY2SX6BvjEpw+VKwWTXyr4QD9gmCBCp +pJ/I9aGyHa+0gW0K13rMjV2WAtlCKA3wZ9P0boTCo2bXI7/s/mGUjx+TnHgu+nB/ +ryAgihLXCME02GgrEc/FiPdlNLo3u8HjE2i2oDdJXOJgmMh6sHmK9iWpqEJeimJ0 +kQxL+lXD29IDurxk+cBh5iOt09fTvbcOoiatCFuFs782kELgwAqLTBHvd7x2jTfg +P676EBe9vF+Do8WT9b3fBVnEGE1X1bl1TwSnLZ/BfzZhe8YITWubqoDs3izNTS/0 +guiw3KdI64yVLAB+ONL7J4JBiZXs9RsDJqMI9XNSNVV869j+NxAhxrcZ7ujPdWcu +gr9IV+hs9qMfllXIqut92EnsJif2/6QF6lHCLqLTziJ0D4MypDr3msEt78LJYs9K +vTXL2zhmnNibpaitJYxdpru/kqvL6CUubMUcieZHo0dwwW5h/Y8m+ebPHxoapElm +b9HUtHHo9rXvoMFzSuJBAoIBAQDFPYIeDplhddAUBP+NIAclqdZ64PfgGlhyTBDb +FGYLKpZva2KwTsHyr/wsc5mlipaCqF3K0YgLy2Xh5N7rE2eD7dd15mfnf6y2ByDU +rLRciwmPwJP3SbRckIWJagSt4k5vg6w7GUfZXxOhrBrLTAWWI/AdsLgOV42G/Uku +zUHEC0vUHCPKCAOWSqapC0WaMlROBTwHwC+6XHmxcgw7JBg/9K52SCQyGrgedEQO +PpswTo17lxbA3wk+Gyk4CKVVAppHxcJBzCIafTVkoh7/nbtYhKXS73ZrtSKNZoXN +ynpQc9byHD6ItfOIuhQQI1///hIHty7tkIPyTv6Zd4UuoPYJAoIBAQDq30uuE5Pq +U+WYF1CJzRFiRSDfT8bp3erdl6Y2mVhGCm6MANI2nDa8iQtTnK08zbQNjKrFDCzr +3uC4tHBHUOWm+PWgo3pTFXa2nlozsBuKNJ5DujFQjZ6T1eQ+JELttQ9fCT1x3qCW +zhd1vDi5X2Ku0RCHbRdpxGNZMdhMWYMG6XnFym1E3NAtlpQMLxkYFs6QcLHzXGDK +tRGs0+TqDGSMPP1B0zkp0D5330fJsK5p5VZsq0m+iHsIblFr7ithT81g7Sw6qNlv +MvbcE+GN9jGExE6UKm24b33i+G9rY+ZKyTDcU5Jjm9gyZrtzjwovCNbE6Z5vafQL +pNhnmNgILB6jAoIBAQCyNebw3WP8SQRecj2r9zo973xYvhd4ppUvgEbii0W/5RTT +SwV6I07dxeBlEXXLurJPD2zfKf5wGbDOL4qyf2/SJf29CxbqddNJDJu/TOQRkZZs +CiYnErMhx3rAM8hSi90uqJvfMfJXFq6YLvZupRuPEp2LVfaXDshTJVsQLGi1hzdW +SK6VhjQngP+gUQzsS3kcnWIl5qG5EoDpsRAYB37ZmmxfdsMtejLNYWN0M2DofrEa +7+KhFWQ2dzgA5t7rNlTLcIUaCiTuJh8t88VqU8vIKOHi+nXWz2yOsglHbSCivGeD +jb16rFuEOAyUoEHG/Hqx+fMutpphPrqPUwxEmy5xAoIBAQC1/VGpz7e0QU/MunwI +WqKWr5PaFPm/Ktoa26/J7KYOl3Sy/MvlzsNG8TWakvNVAtr6CL4lstp35Zng5Md/ +KvrXl17UGvfkuuPieu7P0Cx2uXDMb9BYZ3Oxf3G8iOJnR/1iGUnIGsX6NDTPYInf +cT6I0KvrE6epp6eHbGz6M/2n9G8LyF9ElYilWmDa0/+wv7NNDlWEiVbimszYStO8 +Wt/qHjPqtl07vgU5YwRADqwGl9KQn2SjwyL6FGj4pXJtk6VC7mNwDHd2h1nqi2kh +U9to0adFfV7JI3K61N+Yqa6+0ggPIJ50mkQ6QYoL75DngwTPTWobUFDuWMvp8e+q +3WzbAoIBAGnDcsQ8u1tk7K6dexykbK8zsJ9VazBJEpt6ExIrw6E8rsHMyZzygoI4 +00gW2eJQiDzqaOeIDXA0J9J1RAiOzCnfBPzIPlOvNuzAH0iOuzPx9kyhtVnuJSiv +neK9eoxd/MruDbl6jsSlBLP2BHhn4/Z03UltVeRql+wpLiY7dT/YmeDWQllRHIIn +JVs0tnBE+c52rGpEZnGosaOedWK2LQ4lLlGi6c2Oqzx8GInierKEHNr7ADEOosdV +KVAdCctLCwl7rzc+umkl8v2eIXTdfXbun69iEaO6S0Zq4us3hpjmyuBSKbonvTPc +sNyt0CfeWOACrfkDeQgeTEAYwHQK32M= +-----END PRIVATE KEY----- diff --git a/network/test_key_3.key b/network/test_key_3.key new file mode 100644 index 000000000000..de4f94c07524 --- /dev/null +++ b/network/test_key_3.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCyczAIaVkV7g2J +YfNc0dDO/k70A5kZF/iwFBou6WUzIL5AkCIhLFkY9J3NxzZijcLzvc5FGNjiKljm +OmJx0vj/ka2HEYYyLP3Klu5sFd81n40Z8L+iQRIQpnaUJB4h94bjH5V8XB8ts3jM +8sZGP50POK6ciavtx40XH2JHwgyS/PIeOxeFBh4ZG7ywHylj1ohhuTGfYzTaupdE +2PcU1zm/qU+WZuHxZE90P2VgYFJ/uVu2p8ZU1IRbiV0taicDd20/1XIQD/ywnXNR +LcfNZovqd2azY9Dfb3BvsXc5Tlxzw7EYtJDZ76GtxDBkLyqFuojncNrfKIoZ4SOx +nG4jIugNYcAUzdHAqMCOROWp/5h5nrJAMd3xrhAonE6akGkOR75iGkFZdc54CJBX +2Uj5k7h43O4yFnEasvvzwLjN2b05Glc/iAz0spURxfTzljPhPnrSOIPiD3lXwO85 +vX40FZjvrYhCbNca9cPRk78cPXTHt0Ownfbk9/N8GnWKY9oknBErg9bQ5cRkDi0k +mXobCAIaOU/5UZxbwfKnPLitaK9BAaiTxhwMHOvObxKJV1VJKm2Cr7matsESfF40 +I/zq/jmJApUQFhmM3HCdbKEHDDY8nW7QsKl15KrYo6xuRAEm7SwEu84bUm9AXj0V +tOUjmciOavN7l8qyRfjGAxcFFHbbLwIDAQABAoICAF9mLRuSFkofBaWYc93/XT4L +vnG9Kq+7nPulA4pow+1dZ9hkNZq+drNcsrA7+9scJkjgA9H0wp3GPdh3BgIL44nd +e4Nl3vQcEWGo5rVpy/aC4ZAooIpBd0LneyUfWeyWw8nQLL5bEWABPmdysoUDpRdq +Gg8dmoh30fm2kXgDSW2bRGTc+mnnJ1gM+Aa/4gS5wBdeRiULiEoWzp/DzfOaTAkm +nqGtRsUh90gSTEQFupUFpv4zG1hmdIvrF6EbyteTlRg3J89mfJR+2BNiAhrQ46Pp +SUJF7nhEvzs3CKHePpa024x1m6qm7SxUYfptLLJpUp9IOfprVckYD1j4Y/jsxFje +W/D7j/iNijzngDWcWHWG9tgyUOANpmTpTR3gyyZlAxPpoh6vIUSmH1pY6VSxvG41 +LYjDLLVWT16fpz3/VAcBH2YnwXgXPFXAeKsFsxMlsVDJWCt6+Eau355HKGyTqTzC +/ajfiPXIB9ld3iCBVDcpJQYEi/aPSj1ppsob6ZbYwwfiHvzzyGErMT6gv/VEM6XU +slYdY+RzLL5WUDNrooTbxwG0DGSUToLsTdgS5d/E6AAz1Zjb7W3wuusNfblNWFEo +gegajf7ZrvzV5kiCyFTqx336i7yUNzWz6DHen+tn7MnZMDVhz1gRDiuk3jS1T3f9 +6MA3XGy4nvfOTXrd5mDBAoIBAQDNHO12xK7OnTjF+/7mmCjGJT7qbw1K1UpmYCCX +TN7s01fvmpmZ8bYwbSej2NTaEg0V54oTcroWuZpU9LBW4XNdgvnCfCETRzMjYsrv +A+IWBeG7qhwfYKPbAIFo5ZYZzeXyahWkeLwDAG9LLnT3Dfku89swAsMxa5yPrZRO +Iy/BE4OBuIx0KNM1NeJ0KX3gH5waXyhz3LNWSzaVkK/ETtAY+Lsr8SZyNKxTOuI4 +y3c6MteFFv9VTvR0vcTWwSVGH5XkfCYG7KjDLzljCD2vRYbff1jft8Phz8Ez4GYZ +WShBTD3lCyBEwhdksMS5Jt7un2LNEw9wPLuFHql1TSwHwvm5AoIBAQDeuNhQSunU +Ra/pYP6Q7x3ZTthaRtzqkAvWHECHJW06Sjm+O7eyAm66F41w8l4a0mh+BMWGnybd +ZrNyj/ZMgrNQ7zmmD8pLL37aHSqfizzK1s2DPpJFS5MzBEf9kUD7fNLAzKledyzR +DW9k1NKm53X25+EM578FzTHYq3d0D9hXg4L4GgHJKYtEreKWfhOVL/Vz2Bk0idZf +T79tRJ7jnCX7r3wMBV1vL7rUM4LBYJ6f4rda/iua6zEC+St2cCB53XXnGUovC/iW +1K1GKWntyJVV6crdWmSlvWl7y6OTXSEhmXGCuAByGGjkOag0OcU2fCpwA5EvNRVv +IqZooabuAVAnAoIBAEC106UYwB2nNHYh3nVZo+N/dK35gzQMvoA165JQSjRlKOUK +3VLYEyaMCWmDywNRlpdGiSVBmLv6qloLKGcAkaj63VkiWD0AxX1weZ2WmAliqajP +LjgoAQniyvERHZ3ee6FTHqjY/lfkFzic24HmAqtxe8FV3ccFsEsT9CoCp9o+Ecsn +MgijqJ6s9Vi4jmHbFyCqzNRg2KNs7zeYghto0fZO4p4mYn69Z3CKrzxD1MWjrKLs +cnmzgyQhiqxGG0BYTq3bDRQ6LbQGfhBkVTGqubZhMuTB0Sa4qLd5IDz3B+Ax8YUF +UZTftwmpSycuwD6AQmd4j/JU9sQ0vDmpsy5vsOECggEAf1zK7ld4kdfr+ZRq3qyu +sAcDd9SQHl6TNjRfvijr5mptzNhPeq8jbK1tR8qBf+sUsAPAhPRcAD3rnjavDR+s +tTqoB8t9zjLx4n7NUgEImaHuUgAlGxVVrtXi6SD4PRgrWO9wZ0HVUhLRwaJmd+Vi +svf03TFlLkciGxoqrCcnexwMeB4/KS7lojehnJeqUSTkwwMvnri36zcqa6zTA6vW +mK/ISwOCY8Oyngh63GSJMTsvyQwSGXwnQeEFNqx2FdpLwwTWREMfO/mQwM+L8NtE +cqXISX6YkaLYQF+6Qdn+yTz7CVp5fsVwrho+kub6XObySa3wh0Ne53e0G59dWztK +/wKCAQEArQbzk/KDSJqRFgyK+HkZkygDllDy1ZYMY22SSCkONqFH17TVmRxNApnK +6K2EMjQaGTnTNrJqCDcojPJQcj2JJicvHp+PdOhzZWdqtvI9X0O7ZX4+IhlPsxiL +T9VtqhA5m1qG799RENc9RN+I+JRqrlictc7O2bnQLIv+tTVaTm6Ei0/5VhF2UUVs +gDPnYV6w0ehTWD1AwtjkRXKDdfKFh79dohqCyaNMoWtvy2JHvSEx49pz8Kbj/06K +DT+xUeix1DTcLWjUMOQHGuDZLd7cDG+J+WKAKCmRrmN7LC5Zzt+U+WCYARVz8mp0 +nYXu723STiOS//FeD/l8QRi2BJt3nQ== +-----END PRIVATE KEY----- diff --git a/network/test_network.go b/network/test_network.go index d8795e14e044..8079e76240c1 100644 --- a/network/test_network.go +++ b/network/test_network.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -18,7 +18,6 @@ import ( "github.com/ava-labs/avalanchego/network/dialer" "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/network/throttling" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/uptime" @@ -67,7 +66,7 @@ func (l *noopListener) Close() error { func (*noopListener) Addr() net.Addr { return &net.TCPAddr{ IP: net.IPv4zero, - Port: 0, + Port: 1, } } @@ -157,6 +156,8 @@ func NewTestNetwork( PeerListNonValidatorGossipSize: constants.DefaultNetworkPeerListNonValidatorGossipSize, PeerListPeersGossipSize: constants.DefaultNetworkPeerListPeersGossipSize, PeerListGossipFreq: constants.DefaultNetworkPeerListGossipFreq, + PeerListPullGossipFreq: constants.DefaultNetworkPeerListPullGossipFreq, + PeerListBloomResetFreq: constants.DefaultNetworkPeerListBloomResetFreq, }, DelayConfig: DelayConfig{ @@ -187,10 +188,8 @@ func NewTestNetwork( networkConfig.TLSConfig = tlsConfig networkConfig.TLSKey = tlsCert.PrivateKey.(crypto.Signer) - ctx := snow.DefaultConsensusContextTest() - beacons := validators.NewManager() networkConfig.Validators = currentValidators - networkConfig.Beacons = beacons + networkConfig.Beacons = validators.NewManager() // This never actually does anything because we never initialize the P-chain networkConfig.UptimeCalculator = uptime.NoOpCalculator @@ -207,7 +206,7 @@ func NewTestNetwork( return nil, err } networkConfig.CPUTargeter = tracker.NewTargeter( - ctx.Log, + logging.NoLog{}, &tracker.TargeterConfig{ VdrAlloc: float64(runtime.NumCPU()), MaxNonVdrUsage: .8 * float64(runtime.NumCPU()), @@ -217,7 +216,7 @@ func NewTestNetwork( networkConfig.ResourceTracker.CPUTracker(), ) networkConfig.DiskTargeter = tracker.NewTargeter( - ctx.Log, + logging.NoLog{}, &tracker.TargeterConfig{ VdrAlloc: 1000 * units.GiB, MaxNonVdrUsage: 1000 * units.GiB, @@ -227,12 +226,7 @@ func NewTestNetwork( networkConfig.ResourceTracker.DiskTracker(), ) - networkConfig.MyIPPort = ips.NewDynamicIPPort(net.IPv4zero, 0) - - networkConfig.GossipTracker, err = peer.NewGossipTracker(metrics, "") - if err != nil { - return nil, err - } + networkConfig.MyIPPort = ips.NewDynamicIPPort(net.IPv4zero, 1) return NewNetwork( &networkConfig, diff --git a/network/throttling/bandwidth_throttler.go b/network/throttling/bandwidth_throttler.go index 5adfcb0062a2..d8244eb37974 100644 --- a/network/throttling/bandwidth_throttler.go +++ b/network/throttling/bandwidth_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/bandwidth_throttler_test.go b/network/throttling/bandwidth_throttler_test.go index 5d51555baa9f..11a687f3a91b 100644 --- a/network/throttling/bandwidth_throttler_test.go +++ b/network/throttling/bandwidth_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/common.go b/network/throttling/common.go index 9350fb4f684c..cedd5d732dbb 100644 --- a/network/throttling/common.go +++ b/network/throttling/common.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/dial_throttler.go b/network/throttling/dial_throttler.go index 491c312b95ef..07c04aefb812 100644 --- a/network/throttling/dial_throttler.go +++ b/network/throttling/dial_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/dial_throttler_test.go b/network/throttling/dial_throttler_test.go index db1776e8e24b..1dd57c2e78ad 100644 --- a/network/throttling/dial_throttler_test.go +++ b/network/throttling/dial_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/inbound_conn_throttler.go b/network/throttling/inbound_conn_throttler.go index 7f2206396ca8..5e1528074135 100644 --- a/network/throttling/inbound_conn_throttler.go +++ b/network/throttling/inbound_conn_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/inbound_conn_throttler_test.go b/network/throttling/inbound_conn_throttler_test.go index 0b5d1ccd7fb8..9e2fde15e825 100644 --- a/network/throttling/inbound_conn_throttler_test.go +++ b/network/throttling/inbound_conn_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/inbound_conn_upgrade_throttler.go b/network/throttling/inbound_conn_upgrade_throttler.go index 9d058e29ba12..4df5ee39b776 100644 --- a/network/throttling/inbound_conn_upgrade_throttler.go +++ b/network/throttling/inbound_conn_upgrade_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/inbound_conn_upgrade_throttler_test.go b/network/throttling/inbound_conn_upgrade_throttler_test.go index d0e1fe93c84a..2f6cd926451e 100644 --- a/network/throttling/inbound_conn_upgrade_throttler_test.go +++ b/network/throttling/inbound_conn_upgrade_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/inbound_msg_buffer_throttler.go b/network/throttling/inbound_msg_buffer_throttler.go index d06177839ea2..65306eea7d51 100644 --- a/network/throttling/inbound_msg_buffer_throttler.go +++ b/network/throttling/inbound_msg_buffer_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/inbound_msg_buffer_throttler_test.go b/network/throttling/inbound_msg_buffer_throttler_test.go index 76f399b6e94e..11d655c1c4fc 100644 --- a/network/throttling/inbound_msg_buffer_throttler_test.go +++ b/network/throttling/inbound_msg_buffer_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/inbound_msg_byte_throttler.go b/network/throttling/inbound_msg_byte_throttler.go index 659d9f398309..459df7a11b5d 100644 --- a/network/throttling/inbound_msg_byte_throttler.go +++ b/network/throttling/inbound_msg_byte_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/inbound_msg_byte_throttler_test.go b/network/throttling/inbound_msg_byte_throttler_test.go index e71f0abba238..68a12965ff1e 100644 --- a/network/throttling/inbound_msg_byte_throttler_test.go +++ b/network/throttling/inbound_msg_byte_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/inbound_msg_throttler.go b/network/throttling/inbound_msg_throttler.go index 3d79f640ae1a..ea9167deca15 100644 --- a/network/throttling/inbound_msg_throttler.go +++ b/network/throttling/inbound_msg_throttler.go @@ -1,11 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling import ( "context" - "fmt" "github.com/prometheus/client_golang/prometheus" @@ -13,6 +12,7 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/metric" ) var _ InboundMsgThrottler = (*inboundMsgThrottler)(nil) @@ -90,7 +90,7 @@ func NewInboundMsgThrottler( return nil, err } cpuThrottler, err := NewSystemThrottler( - fmt.Sprintf("%s_cpu", namespace), + metric.AppendNamespace(namespace, "cpu"), registerer, throttlerConfig.CPUThrottlerConfig, resourceTracker.CPUTracker(), @@ -100,7 +100,7 @@ func NewInboundMsgThrottler( return nil, err } diskThrottler, err := NewSystemThrottler( - fmt.Sprintf("%s_disk", namespace), + metric.AppendNamespace(namespace, "disk"), registerer, throttlerConfig.DiskThrottlerConfig, resourceTracker.DiskTracker(), diff --git a/network/throttling/inbound_resource_throttler.go b/network/throttling/inbound_resource_throttler.go index 42873fe42d6a..eb0e939b8d9e 100644 --- a/network/throttling/inbound_resource_throttler.go +++ b/network/throttling/inbound_resource_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/inbound_resource_throttler_test.go b/network/throttling/inbound_resource_throttler_test.go index eebbaf7dc851..bfc5a726c14b 100644 --- a/network/throttling/inbound_resource_throttler_test.go +++ b/network/throttling/inbound_resource_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/no_inbound_msg_throttler.go b/network/throttling/no_inbound_msg_throttler.go index de6e03f81502..6f7af32fb135 100644 --- a/network/throttling/no_inbound_msg_throttler.go +++ b/network/throttling/no_inbound_msg_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/outbound_msg_throttler.go b/network/throttling/outbound_msg_throttler.go index 6f5ad24561f3..e1656c04ffe1 100644 --- a/network/throttling/outbound_msg_throttler.go +++ b/network/throttling/outbound_msg_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/outbound_msg_throttler_test.go b/network/throttling/outbound_msg_throttler_test.go index 09d8b6f272ef..1930d935171b 100644 --- a/network/throttling/outbound_msg_throttler_test.go +++ b/network/throttling/outbound_msg_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/throttling/release_func.go b/network/throttling/release_func.go index 0abe2bf4270d..e2cbcf1b1b20 100644 --- a/network/throttling/release_func.go +++ b/network/throttling/release_func.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/network/tracked_ip.go b/network/tracked_ip.go index ca673f76b91d..6a95bbee5a47 100644 --- a/network/tracked_ip.go +++ b/network/tracked_ip.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network diff --git a/network/tracked_ip_test.go b/network/tracked_ip_test.go index bbf6267d86db..956f02cc19b4 100644 --- a/network/tracked_ip_test.go +++ b/network/tracked_ip_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network diff --git a/node/beacon_manager.go b/node/beacon_manager.go index af088f3b4845..9b6806fdf037 100644 --- a/node/beacon_manager.go +++ b/node/beacon_manager.go @@ -1,16 +1,16 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package node import ( + "sync" "sync/atomic" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" ) @@ -18,10 +18,11 @@ var _ router.Router = (*beaconManager)(nil) type beaconManager struct { router.Router - timer *timer.Timer - beacons validators.Manager - requiredConns int64 - numConns int64 + beacons validators.Manager + requiredConns int64 + numConns int64 + onSufficientlyConnected chan struct{} + onceOnSufficientlyConnected sync.Once } func (b *beaconManager) Connected(nodeID ids.NodeID, nodeVersion *version.Application, subnetID ids.ID) { @@ -29,7 +30,9 @@ func (b *beaconManager) Connected(nodeID ids.NodeID, nodeVersion *version.Applic if isBeacon && constants.PrimaryNetworkID == subnetID && atomic.AddInt64(&b.numConns, 1) >= b.requiredConns { - b.timer.Cancel() + b.onceOnSufficientlyConnected.Do(func() { + close(b.onSufficientlyConnected) + }) } b.Router.Connected(nodeID, nodeVersion, subnetID) } diff --git a/node/beacon_manager_test.go b/node/beacon_manager_test.go index 82be435e92f1..82b47efdacd6 100644 --- a/node/beacon_manager_test.go +++ b/node/beacon_manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package node @@ -15,7 +15,6 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" ) @@ -41,10 +40,10 @@ func TestBeaconManager_DataRace(t *testing.T) { mockRouter := router.NewMockRouter(ctrl) b := beaconManager{ - Router: mockRouter, - timer: timer.NewTimer(nil), - beacons: validatorSet, - requiredConns: numValidators, + Router: mockRouter, + beacons: validatorSet, + requiredConns: numValidators, + onSufficientlyConnected: make(chan struct{}), } // connect numValidators validators, each with a weight of 1 diff --git a/node/config.go b/node/config.go index 4906dfcb3705..de8923352e74 100644 --- a/node/config.go +++ b/node/config.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package node @@ -21,7 +21,6 @@ import ( "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/nat" "github.com/ava-labs/avalanchego/network" "github.com/ava-labs/avalanchego/snow/networking/benchlist" "github.com/ava-labs/avalanchego/snow/networking/router" @@ -29,7 +28,6 @@ import ( "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/dynamicip" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/profiler" @@ -84,19 +82,16 @@ type APIConfig struct { } type IPConfig struct { - IPPort ips.DynamicIPPort `json:"ip"` - IPUpdater dynamicip.Updater `json:"-"` - IPResolutionFreq time.Duration `json:"ipResolutionFrequency"` - // True if we attempted NAT traversal - AttemptedNATTraversal bool `json:"attemptedNATTraversal"` - // Tries to perform network address translation - Nat nat.Router `json:"-"` + PublicIP string `json:"publicIP"` + PublicIPResolutionService string `json:"publicIPResolutionService"` + PublicIPResolutionFreq time.Duration `json:"publicIPResolutionFreq"` // The host portion of the address to listen on. The port to // listen on will be sourced from IPPort. // // - If empty, listen on all interfaces (both ipv4 and ipv6). // - If populated, listen only on the specified address. ListenHost string `json:"listenHost"` + ListenPort uint16 `json:"listenPort"` } type StakingConfig struct { @@ -117,12 +112,6 @@ type StateSyncConfig struct { } type BootstrapConfig struct { - // Should Bootstrap be retried - RetryBootstrap bool `json:"retryBootstrap"` - - // Max number of times to retry bootstrap before warning the node operator - RetryBootstrapWarnFrequency int `json:"retryBootstrapWarnFrequency"` - // Timeout before emitting a warn log when connecting to bootstrapping beacons BootstrapBeaconConnectionTimeout time.Duration `json:"bootstrapBeaconConnectionTimeout"` @@ -141,6 +130,9 @@ type BootstrapConfig struct { } type DatabaseConfig struct { + // If true, all writes are to memory and are discarded at node shutdown. + ReadOnly bool `json:"readOnly"` + // Path to database Path string `json:"path"` @@ -194,8 +186,8 @@ type Config struct { ConsensusRouter router.Router `json:"-"` RouterHealthConfig router.HealthConfig `json:"routerHealthConfig"` ConsensusShutdownTimeout time.Duration `json:"consensusShutdownTimeout"` - // Gossip a container in the accepted frontier every [AcceptedFrontierGossipFrequency] - AcceptedFrontierGossipFrequency time.Duration `json:"consensusGossipFreq"` + // Poll for new frontiers every [FrontierPollFrequency] + FrontierPollFrequency time.Duration `json:"consensusGossipFreq"` // ConsensusAppConcurrency defines the maximum number of goroutines to // handle App messages per chain. ConsensusAppConcurrency int `json:"consensusAppConcurrency"` diff --git a/node/insecure_validator_manager.go b/node/insecure_validator_manager.go index bd69529619dc..0e23b8b90cc3 100644 --- a/node/insecure_validator_manager.go +++ b/node/insecure_validator_manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package node @@ -27,7 +27,7 @@ func (i *insecureValidatorManager) Connected(vdrID ids.NodeID, nodeVersion *vers // peer as a validator. Because each validator needs a txID associated // with it, we hack one together by padding the nodeID with zeroes. dummyTxID := ids.Empty - copy(dummyTxID[:], vdrID[:]) + copy(dummyTxID[:], vdrID.Bytes()) err := i.vdrs.AddStaker(constants.PrimaryNetworkID, vdrID, nil, dummyTxID, i.weight) if err != nil { diff --git a/node/node.go b/node/node.go index 2aaa0ba2d3fc..f82bf962c7fc 100644 --- a/node/node.go +++ b/node/node.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package node @@ -52,11 +52,13 @@ import ( "github.com/ava-labs/avalanchego/database/meterdb" "github.com/ava-labs/avalanchego/database/pebble" "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/indexer" "github.com/ava-labs/avalanchego/ipcs" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/nat" "github.com/ava-labs/avalanchego/network" "github.com/ava-labs/avalanchego/network/dialer" "github.com/ava-labs/avalanchego/network/peer" @@ -73,6 +75,7 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/dynamicip" "github.com/ava-labs/avalanchego/utils/filesystem" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/ips" @@ -82,23 +85,28 @@ import ( "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/avm" - "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/platformvm" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/signer" - "github.com/ava-labs/avalanchego/vms/propertyfx" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/registry" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" ipcsapi "github.com/ava-labs/avalanchego/api/ipcs" avmconfig "github.com/ava-labs/avalanchego/vms/avm/config" platformconfig "github.com/ava-labs/avalanchego/vms/platformvm/config" ) +const ( + stakingPortName = constants.AppName + "-staking" + httpPortName = constants.AppName + "-http" + + ipResolutionTimeout = 30 * time.Second +) + var ( genesisHashKey = []byte("genesisID") ungracefulShutdown = []byte("ungracefulShutdown") @@ -110,6 +118,161 @@ var ( errShuttingDown = errors.New("server shutting down") ) +// New returns an instance of Node +func New( + config *Config, + logFactory logging.Factory, + logger logging.Logger, +) (*Node, error) { + tlsCert := config.StakingTLSCert.Leaf + stakingCert, err := staking.CertificateFromX509(tlsCert) + if err != nil { + return nil, fmt.Errorf("failed to create staking certificate out of x509 cert: %w", err) + } + + if err := staking.ValidateCertificate(stakingCert); err != nil { + return nil, fmt.Errorf("invalid staking certificate: %w", err) + } + + n := &Node{ + Log: logger, + LogFactory: logFactory, + ID: stakingCert.NodeID, + Config: config, + } + + n.DoneShuttingDown.Add(1) + + pop := signer.NewProofOfPossession(n.Config.StakingSigningKey) + logger.Info("initializing node", + zap.Stringer("version", version.CurrentApp), + zap.Stringer("nodeID", n.ID), + zap.Stringer("stakingKeyType", tlsCert.PublicKeyAlgorithm), + zap.Reflect("nodePOP", pop), + zap.Reflect("providedFlags", n.Config.ProvidedFlags), + zap.Reflect("config", n.Config), + ) + + n.VMFactoryLog, err = logFactory.Make("vm-factory") + if err != nil { + return nil, fmt.Errorf("problem creating vm logger: %w", err) + } + + n.VMManager = vms.NewManager(n.VMFactoryLog, config.VMAliaser) + + if err := n.initBootstrappers(); err != nil { // Configure the bootstrappers + return nil, fmt.Errorf("problem initializing node beacons: %w", err) + } + + // Set up tracer + n.tracer, err = trace.New(n.Config.TraceConfig) + if err != nil { + return nil, fmt.Errorf("couldn't initialize tracer: %w", err) + } + + if n.Config.TraceConfig.Enabled { + n.Config.ConsensusRouter = router.Trace(n.Config.ConsensusRouter, n.tracer) + } + + n.initMetrics() + n.initNAT() + if err := n.initAPIServer(); err != nil { // Start the API Server + return nil, fmt.Errorf("couldn't initialize API server: %w", err) + } + + if err := n.initMetricsAPI(); err != nil { // Start the Metrics API + return nil, fmt.Errorf("couldn't initialize metrics API: %w", err) + } + + if err := n.initDatabase(); err != nil { // Set up the node's database + return nil, fmt.Errorf("problem initializing database: %w", err) + } + + if err := n.initKeystoreAPI(); err != nil { // Start the Keystore API + return nil, fmt.Errorf("couldn't initialize keystore API: %w", err) + } + + n.initSharedMemory() // Initialize shared memory + + // message.Creator is shared between networking, chainManager and the engine. + // It must be initiated before networking (initNetworking), chain manager (initChainManager) + // and the engine (initChains) but after the metrics (initMetricsAPI) + // message.Creator currently record metrics under network namespace + n.networkNamespace = "network" + n.msgCreator, err = message.NewCreator( + n.Log, + n.MetricsRegisterer, + n.networkNamespace, + n.Config.NetworkConfig.CompressionType, + n.Config.NetworkConfig.MaximumInboundMessageTimeout, + ) + if err != nil { + return nil, fmt.Errorf("problem initializing message creator: %w", err) + } + + n.vdrs = validators.NewManager() + if !n.Config.SybilProtectionEnabled { + logger.Warn("sybil control is not enforced") + n.vdrs = newOverriddenManager(constants.PrimaryNetworkID, n.vdrs) + } + if err := n.initResourceManager(n.MetricsRegisterer); err != nil { + return nil, fmt.Errorf("problem initializing resource manager: %w", err) + } + n.initCPUTargeter(&config.CPUTargeterConfig) + n.initDiskTargeter(&config.DiskTargeterConfig) + if err := n.initNetworking(); err != nil { // Set up networking layer. + return nil, fmt.Errorf("problem initializing networking: %w", err) + } + + n.initEventDispatchers() + + // Start the Health API + // Has to be initialized before chain manager + // [n.Net] must already be set + if err := n.initHealthAPI(); err != nil { + return nil, fmt.Errorf("couldn't initialize health API: %w", err) + } + if err := n.addDefaultVMAliases(); err != nil { + return nil, fmt.Errorf("couldn't initialize API aliases: %w", err) + } + if err := n.initChainManager(n.Config.AvaxAssetID); err != nil { // Set up the chain manager + return nil, fmt.Errorf("couldn't initialize chain manager: %w", err) + } + if err := n.initVMs(); err != nil { // Initialize the VM registry. + return nil, fmt.Errorf("couldn't initialize VM registry: %w", err) + } + if err := n.initAdminAPI(); err != nil { // Start the Admin API + return nil, fmt.Errorf("couldn't initialize admin API: %w", err) + } + if err := n.initInfoAPI(); err != nil { // Start the Info API + return nil, fmt.Errorf("couldn't initialize info API: %w", err) + } + if err := n.initIPCs(); err != nil { // Start the IPCs + return nil, fmt.Errorf("couldn't initialize IPCs: %w", err) + } + if err := n.initIPCAPI(); err != nil { // Start the IPC API + return nil, fmt.Errorf("couldn't initialize the IPC API: %w", err) + } + if err := n.initChainAliases(n.Config.GenesisBytes); err != nil { + return nil, fmt.Errorf("couldn't initialize chain aliases: %w", err) + } + if err := n.initAPIAliases(n.Config.GenesisBytes); err != nil { + return nil, fmt.Errorf("couldn't initialize API aliases: %w", err) + } + if err := n.initIndexer(); err != nil { + return nil, fmt.Errorf("couldn't initialize indexer: %w", err) + } + + n.health.Start(context.TODO(), n.Config.HealthCheckFreq) + n.initProfiler() + + // Start the Platform chain + if err := n.initChains(n.Config.GenesisBytes); err != nil { + return nil, fmt.Errorf("couldn't initialize chains: %w", err) + } + return n, nil +} + // Node is an instance of an Avalanche node. type Node struct { Log logging.Logger @@ -123,6 +286,10 @@ type Node struct { // Storage for this node DB database.Database + router nat.Router + portMapper *nat.Mapper + ipUpdater dynamicip.Updater + // Profiles the process. Nil if continuous profiling is disabled. profiler profiler.ContinuousProfiler @@ -237,8 +404,6 @@ type Node struct { // Initialize the networking layer. // Assumes [n.vdrs], [n.CPUTracker], and [n.CPUTargeter] have been initialized. func (n *Node) initNetworking() error { - currentIPPort := n.Config.IPPort.IPPort() - // Providing either loopback address - `::1` for ipv6 and `127.0.0.1` for ipv4 - as the listen // host will avoid the need for a firewall exception on recent MacOS: // @@ -256,8 +421,7 @@ func (n *Node) initNetworking() error { // // 1: https://apple.stackexchange.com/questions/393715/do-you-want-the-application-main-to-accept-incoming-network-connections-pop // 2: https://github.com/golang/go/issues/56998 - listenAddress := net.JoinHostPort(n.Config.ListenHost, strconv.FormatUint(uint64(currentIPPort.Port), 10)) - + listenAddress := net.JoinHostPort(n.Config.ListenHost, strconv.FormatUint(uint64(n.Config.ListenPort), 10)) listener, err := net.Listen(constants.NetworkType, listenAddress) if err != nil { return err @@ -265,23 +429,67 @@ func (n *Node) initNetworking() error { // Wrap listener so it will only accept a certain number of incoming connections per second listener = throttling.NewThrottledListener(listener, n.Config.NetworkConfig.ThrottlerConfig.MaxInboundConnsPerSec) - ipPort, err := ips.ToIPPort(listener.Addr().String()) + // Record the bound address to enable inclusion in process context file. + n.stakingAddress = listener.Addr().String() + ipPort, err := ips.ToIPPort(n.stakingAddress) if err != nil { - n.Log.Info("initializing networking", - zap.Stringer("currentNodeIP", currentIPPort), - ) - } else { - ipPort = ips.IPPort{ - IP: currentIPPort.IP, - Port: ipPort.Port, + return err + } + + var dynamicIP ips.DynamicIPPort + switch { + case n.Config.PublicIP != "": + // Use the specified public IP. + ipPort.IP = net.ParseIP(n.Config.PublicIP) + if ipPort.IP == nil { + return fmt.Errorf("invalid IP Address: %s", n.Config.PublicIP) } - n.Log.Info("initializing networking", - zap.Stringer("currentNodeIP", ipPort), + dynamicIP = ips.NewDynamicIPPort(ipPort.IP, ipPort.Port) + n.ipUpdater = dynamicip.NewNoUpdater() + case n.Config.PublicIPResolutionService != "": + // Use dynamic IP resolution. + resolver, err := dynamicip.NewResolver(n.Config.PublicIPResolutionService) + if err != nil { + return fmt.Errorf("couldn't create IP resolver: %w", err) + } + + // Use that to resolve our public IP. + ctx, cancel := context.WithTimeout(context.Background(), ipResolutionTimeout) + ipPort.IP, err = resolver.Resolve(ctx) + cancel() + if err != nil { + return fmt.Errorf("couldn't resolve public IP: %w", err) + } + dynamicIP = ips.NewDynamicIPPort(ipPort.IP, ipPort.Port) + n.ipUpdater = dynamicip.NewUpdater(dynamicIP, resolver, n.Config.PublicIPResolutionFreq) + default: + ipPort.IP, err = n.router.ExternalIP() + if err != nil { + return fmt.Errorf("public IP / IP resolution service not given and failed to resolve IP with NAT: %w", err) + } + dynamicIP = ips.NewDynamicIPPort(ipPort.IP, ipPort.Port) + n.ipUpdater = dynamicip.NewNoUpdater() + } + + if ipPort.IP.IsLoopback() || ipPort.IP.IsPrivate() { + n.Log.Warn("P2P IP is private, you will not be publicly discoverable", + zap.Stringer("ip", ipPort), ) } - // Record the bound address to enable inclusion in process context file. - n.stakingAddress = listener.Addr().String() + // Regularly update our public IP and port mappings. + n.portMapper.Map( + ipPort.Port, + ipPort.Port, + stakingPortName, + dynamicIP, + n.Config.PublicIPResolutionFreq, + ) + go n.ipUpdater.Dispatch(n.Log) + + n.Log.Info("initializing networking", + zap.Stringer("ip", ipPort), + ) tlsKey, ok := n.Config.StakingTLSCert.PrivateKey.(crypto.Signer) if !ok { @@ -298,6 +506,25 @@ func (n *Node) initNetworking() error { ) } + // We allow nodes to gossip unknown ACPs in case the current ACPs constant + // becomes out of date. + var unknownACPs set.Set[uint32] + for acp := range n.Config.NetworkConfig.SupportedACPs { + if !constants.CurrentACPs.Contains(acp) { + unknownACPs.Add(acp) + } + } + for acp := range n.Config.NetworkConfig.ObjectedACPs { + if !constants.CurrentACPs.Contains(acp) { + unknownACPs.Add(acp) + } + } + if unknownACPs.Len() > 0 { + n.Log.Warn("gossipping unknown ACPs", + zap.Reflect("acps", unknownACPs), + ) + } + tlsConfig := peer.TLSConfig(n.Config.StakingTLSCert, n.tlsKeyLogWriterCloser) // Configure benchlist @@ -313,7 +540,7 @@ func (n *Node) initNetworking() error { // a validator. Because each validator needs a txID associated with it, // we hack one together by just padding our nodeID with zeroes. dummyTxID := ids.Empty - copy(dummyTxID[:], n.ID[:]) + copy(dummyTxID[:], n.ID.Bytes()) err := n.vdrs.AddStaker( constants.PrimaryNetworkID, @@ -338,46 +565,38 @@ func (n *Node) initNetworking() error { requiredConns := (3*numBootstrappers + 3) / 4 if requiredConns > 0 { - // Set a timer that will fire after a given timeout unless we connect - // to a sufficient portion of nodes. If the timeout fires, the node will - // shutdown. - timer := timer.NewTimer(func() { - // If the timeout fires and we're already shutting down, nothing to do. - if !n.shuttingDown.Get() { + onSufficientlyConnected := make(chan struct{}) + consensusRouter = &beaconManager{ + Router: consensusRouter, + beacons: n.bootstrappers, + requiredConns: int64(requiredConns), + onSufficientlyConnected: onSufficientlyConnected, + } + + // Log a warning if we aren't able to connect to a sufficient portion of + // nodes. + go func() { + timer := time.NewTimer(n.Config.BootstrapBeaconConnectionTimeout) + defer timer.Stop() + + select { + case <-timer.C: + if n.shuttingDown.Get() { + return + } n.Log.Warn("failed to connect to bootstrap nodes", zap.Stringer("bootstrappers", n.bootstrappers), zap.Duration("duration", n.Config.BootstrapBeaconConnectionTimeout), ) + case <-onSufficientlyConnected: } - }) - - go timer.Dispatch() - timer.SetTimeoutIn(n.Config.BootstrapBeaconConnectionTimeout) - - consensusRouter = &beaconManager{ - Router: consensusRouter, - timer: timer, - beacons: n.bootstrappers, - requiredConns: int64(requiredConns), - } + }() } - // initialize gossip tracker - gossipTracker, err := peer.NewGossipTracker(n.MetricsRegisterer, n.networkNamespace) - if err != nil { - return err - } - - // keep gossip tracker synchronized with the validator set - n.vdrs.RegisterCallbackListener(constants.PrimaryNetworkID, &peer.GossipTrackerCallback{ - Log: n.Log, - GossipTracker: gossipTracker, - }) - // add node configs to network config n.Config.NetworkConfig.Namespace = n.networkNamespace n.Config.NetworkConfig.MyNodeID = n.ID - n.Config.NetworkConfig.MyIPPort = n.Config.IPPort + n.Config.NetworkConfig.MyIPPort = dynamicIP n.Config.NetworkConfig.NetworkID = n.Config.NetworkID n.Config.NetworkConfig.Validators = n.vdrs n.Config.NetworkConfig.Beacons = n.bootstrappers @@ -389,7 +608,6 @@ func (n *Node) initNetworking() error { n.Config.NetworkConfig.ResourceTracker = n.resourceTracker n.Config.NetworkConfig.CPUTargeter = n.cpuTargeter n.Config.NetworkConfig.DiskTargeter = n.diskTargeter - n.Config.NetworkConfig.GossipTracker = gossipTracker n.Net, err = network.NewNetwork( &n.Config.NetworkConfig, @@ -541,6 +759,10 @@ func (n *Node) initDatabase() error { ) } + if n.Config.ReadOnly && n.Config.DatabaseConfig.Name != memdb.Name { + n.DB = versiondb.New(n.DB) + } + var err error n.DB, err = meterdb.New("db", n.MetricsRegisterer, n.DB) if err != nil { @@ -691,16 +913,76 @@ func (n *Node) initMetrics() { n.MetricsGatherer = metrics.NewMultiGatherer() } +func (n *Node) initNAT() { + n.Log.Info("initializing NAT") + + if n.Config.PublicIP == "" && n.Config.PublicIPResolutionService == "" { + n.router = nat.GetRouter() + if !n.router.SupportsNAT() { + n.Log.Warn("UPnP and NAT-PMP router attach failed, " + + "you may not be listening publicly. " + + "Please confirm the settings in your router") + } + } else { + n.router = nat.NewNoRouter() + } + + n.portMapper = nat.NewPortMapper(n.Log, n.router) +} + // initAPIServer initializes the server that handles HTTP calls func (n *Node) initAPIServer() error { n.Log.Info("initializing API server") + // An empty host is treated as a wildcard to match all addresses, so it is + // considered public. + hostIsPublic := n.Config.HTTPHost == "" + if !hostIsPublic { + ip, err := ips.Lookup(n.Config.HTTPHost) + if err != nil { + n.Log.Fatal("failed to lookup HTTP host", + zap.String("host", n.Config.HTTPHost), + zap.Error(err), + ) + return err + } + hostIsPublic = !ip.IsLoopback() && !ip.IsPrivate() + + n.Log.Debug("finished HTTP host lookup", + zap.String("host", n.Config.HTTPHost), + zap.Stringer("ip", ip), + zap.Bool("isPublic", hostIsPublic), + ) + } + listenAddress := net.JoinHostPort(n.Config.HTTPHost, strconv.FormatUint(uint64(n.Config.HTTPPort), 10)) listener, err := net.Listen("tcp", listenAddress) if err != nil { return err } + addr := listener.Addr().String() + ipPort, err := ips.ToIPPort(addr) + if err != nil { + return err + } + + // Don't open the HTTP port if the HTTP server is private + if hostIsPublic { + n.Log.Warn("HTTP server is binding to a potentially public host. "+ + "You may be vulnerable to a DoS attack if your HTTP port is publicly accessible", + zap.String("host", n.Config.HTTPHost), + ) + + n.portMapper.Map( + ipPort.Port, + ipPort.Port, + httpPortName, + nil, + n.Config.PublicIPResolutionFreq, + ) + } + protocol := "http" if n.Config.HTTPSEnabled { cert, err := tls.X509KeyPair(n.Config.HTTPSCert, n.Config.HTTPSKey) @@ -836,7 +1118,7 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { return fmt.Errorf("couldn't initialize chain router: %w", err) } - n.chainManager = chains.New(&chains.ManagerConfig{ + n.chainManager, err = chains.New(&chains.ManagerConfig{ SybilProtectionEnabled: n.Config.SybilProtectionEnabled, StakingTLSCert: n.Config.StakingTLSCert, StakingBLSKey: n.Config.StakingSigningKey, @@ -863,26 +1145,27 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { CriticalChains: criticalChains, TimeoutManager: n.timeoutManager, Health: n.health, - RetryBootstrap: n.Config.RetryBootstrap, - RetryBootstrapWarnFrequency: n.Config.RetryBootstrapWarnFrequency, ShutdownNodeFunc: n.Shutdown, MeterVMEnabled: n.Config.MeterVMEnabled, Metrics: n.MetricsGatherer, SubnetConfigs: n.Config.SubnetConfigs, ChainConfigs: n.Config.ChainConfigs, - AcceptedFrontierGossipFrequency: n.Config.AcceptedFrontierGossipFrequency, + FrontierPollFrequency: n.Config.FrontierPollFrequency, ConsensusAppConcurrency: n.Config.ConsensusAppConcurrency, BootstrapMaxTimeGetAncestors: n.Config.BootstrapMaxTimeGetAncestors, BootstrapAncestorsMaxContainersSent: n.Config.BootstrapAncestorsMaxContainersSent, BootstrapAncestorsMaxContainersReceived: n.Config.BootstrapAncestorsMaxContainersReceived, ApricotPhase4Time: version.GetApricotPhase4Time(n.Config.NetworkID), - ApricotPhase4MinPChainHeight: version.GetApricotPhase4MinPChainHeight(n.Config.NetworkID), + ApricotPhase4MinPChainHeight: version.ApricotPhase4MinPChainHeight[n.Config.NetworkID], ResourceTracker: n.resourceTracker, StateSyncBeacons: n.Config.StateSyncIDs, TracingEnabled: n.Config.TraceConfig.Enabled, Tracer: n.tracer, ChainDataDir: n.Config.ChainDataDir, }) + if err != nil { + return fmt.Errorf("couldn't initialize chain router: %w", err) + } // Notify the API server when new chains are created n.chainManager.AddRegistrant(n.APIServer) @@ -902,16 +1185,20 @@ func (n *Node) initVMs() error { vdrs = validators.NewManager() } - vmRegisterer := registry.NewVMRegisterer(registry.VMRegistererConfig{ - APIServer: n.APIServer, - Log: n.Log, - VMFactoryLog: n.VMFactoryLog, - VMManager: n.VMManager, - }) + durangoTime := version.GetDurangoTime(n.Config.NetworkID) + if err := txs.InitCodec(durangoTime); err != nil { + return err + } + if err := block.InitCodec(durangoTime); err != nil { + return err + } + if err := coreth.InitCodec(durangoTime); err != nil { + return err + } // Register the VMs that Avalanche supports err := utils.Err( - vmRegisterer.Register(context.TODO(), constants.PlatformVMID, &platformvm.Factory{ + n.VMManager.RegisterFactory(context.TODO(), constants.PlatformVMID, &platformvm.Factory{ Config: platformconfig.Config{ Chains: n.chainManager, Validators: vdrs, @@ -940,22 +1227,20 @@ func (n *Node) initVMs() error { ApricotPhase5Time: version.GetApricotPhase5Time(n.Config.NetworkID), BanffTime: version.GetBanffTime(n.Config.NetworkID), CortinaTime: version.GetCortinaTime(n.Config.NetworkID), - DTime: version.GetDTime(n.Config.NetworkID), + DurangoTime: durangoTime, AthensPhaseTime: version.GetAthensPhaseTime(n.Config.NetworkID), BerlinPhaseTime: version.GetBerlinPhaseTime(n.Config.NetworkID), UseCurrentHeight: n.Config.UseCurrentHeight, }, }), - vmRegisterer.Register(context.TODO(), constants.AVMID, &avm.Factory{ + n.VMManager.RegisterFactory(context.TODO(), constants.AVMID, &avm.Factory{ Config: avmconfig.Config{ TxFee: n.Config.TxFee, CreateAssetTxFee: n.Config.CreateAssetTxFee, + DurangoTime: durangoTime, }, }), - vmRegisterer.Register(context.TODO(), constants.EVMID, &coreth.Factory{}), - n.VMManager.RegisterFactory(context.TODO(), secp256k1fx.ID, &secp256k1fx.Factory{}), - n.VMManager.RegisterFactory(context.TODO(), nftfx.ID, &nftfx.Factory{}), - n.VMManager.RegisterFactory(context.TODO(), propertyfx.ID, &propertyfx.Factory{}), + n.VMManager.RegisterFactory(context.TODO(), constants.EVMID, &coreth.Factory{}), ) if err != nil { return err @@ -973,7 +1258,7 @@ func (n *Node) initVMs() error { CPUTracker: n.resourceManager, RuntimeTracker: n.runtimeManager, }), - VMRegisterer: vmRegisterer, + VMManager: n.VMManager, }) // register any vms that need to be installed as plugins from disk @@ -1060,6 +1345,7 @@ func (n *Node) initAdminAPI() error { admin.Config{ Secret: n.Config.AdminAPIEnabledSecret, Log: n.Log, + DB: n.DB, ChainManager: n.chainManager, HTTPServer: n.APIServer, ProfileDir: n.Config.ProfilerConfig.Dir, @@ -1133,6 +1419,7 @@ func (n *Node) initInfoAPI() error { GenesisBytes: n.Config.GenesisBytes, }, n.Log, + n.vdrs, n.chainManager, n.VMManager, n.Config.NetworkConfig.MyIPPort, @@ -1354,158 +1641,6 @@ func (n *Node) initDiskTargeter( ) } -// Initialize this node -func (n *Node) Initialize( - config *Config, - logger logging.Logger, - logFactory logging.Factory, -) error { - tlsCert := config.StakingTLSCert.Leaf - stakingCert := staking.CertificateFromX509(tlsCert) - if err := staking.ValidateCertificate(stakingCert); err != nil { - return fmt.Errorf("invalid staking certificate: %w", err) - } - - n.Log = logger - n.Config = config - // Get the nodeID from certificate (secp256k1 public key) - nodeID, err := peer.CertToID(tlsCert) - if err != nil { - return fmt.Errorf("cannot extract nodeID from certificate: %w", err) - } - n.ID = nodeID - n.LogFactory = logFactory - n.DoneShuttingDown.Add(1) - - pop := signer.NewProofOfPossession(n.Config.StakingSigningKey) - n.Log.Info("initializing node", - zap.Stringer("version", version.CurrentApp), - zap.Stringer("nodeID", n.ID), - zap.Stringer("stakingKeyType", tlsCert.PublicKeyAlgorithm), - zap.Reflect("nodePOP", pop), - zap.Reflect("providedFlags", n.Config.ProvidedFlags), - zap.Reflect("config", n.Config), - ) - - n.VMFactoryLog, err = logFactory.Make("vm-factory") - if err != nil { - return fmt.Errorf("problem creating vm logger: %w", err) - } - - n.VMManager = vms.NewManager(n.VMFactoryLog, config.VMAliaser) - - if err := n.initBootstrappers(); err != nil { // Configure the bootstrappers - return fmt.Errorf("problem initializing node beacons: %w", err) - } - - // Set up tracer - n.tracer, err = trace.New(n.Config.TraceConfig) - if err != nil { - return fmt.Errorf("couldn't initialize tracer: %w", err) - } - - if n.Config.TraceConfig.Enabled { - n.Config.ConsensusRouter = router.Trace(n.Config.ConsensusRouter, n.tracer) - } - - n.initMetrics() - - if err := n.initAPIServer(); err != nil { // Start the API Server - return fmt.Errorf("couldn't initialize API server: %w", err) - } - - if err := n.initMetricsAPI(); err != nil { // Start the Metrics API - return fmt.Errorf("couldn't initialize metrics API: %w", err) - } - - if err := n.initDatabase(); err != nil { // Set up the node's database - return fmt.Errorf("problem initializing database: %w", err) - } - - if err := n.initKeystoreAPI(); err != nil { // Start the Keystore API - return fmt.Errorf("couldn't initialize keystore API: %w", err) - } - - n.initSharedMemory() // Initialize shared memory - - // message.Creator is shared between networking, chainManager and the engine. - // It must be initiated before networking (initNetworking), chain manager (initChainManager) - // and the engine (initChains) but after the metrics (initMetricsAPI) - // message.Creator currently record metrics under network namespace - n.networkNamespace = "network" - n.msgCreator, err = message.NewCreator( - n.Log, - n.MetricsRegisterer, - n.networkNamespace, - n.Config.NetworkConfig.CompressionType, - n.Config.NetworkConfig.MaximumInboundMessageTimeout, - ) - if err != nil { - return fmt.Errorf("problem initializing message creator: %w", err) - } - - n.vdrs = validators.NewManager() - if !n.Config.SybilProtectionEnabled { - n.vdrs = newOverriddenManager(constants.PrimaryNetworkID, n.vdrs) - } - if err := n.initResourceManager(n.MetricsRegisterer); err != nil { - return fmt.Errorf("problem initializing resource manager: %w", err) - } - n.initCPUTargeter(&config.CPUTargeterConfig) - n.initDiskTargeter(&config.DiskTargeterConfig) - if err := n.initNetworking(); err != nil { // Set up networking layer. - return fmt.Errorf("problem initializing networking: %w", err) - } - - n.initEventDispatchers() - - // Start the Health API - // Has to be initialized before chain manager - // [n.Net] must already be set - if err := n.initHealthAPI(); err != nil { - return fmt.Errorf("couldn't initialize health API: %w", err) - } - if err := n.addDefaultVMAliases(); err != nil { - return fmt.Errorf("couldn't initialize API aliases: %w", err) - } - if err := n.initChainManager(n.Config.AvaxAssetID); err != nil { // Set up the chain manager - return fmt.Errorf("couldn't initialize chain manager: %w", err) - } - if err := n.initVMs(); err != nil { // Initialize the VM registry. - return fmt.Errorf("couldn't initialize VM registry: %w", err) - } - if err := n.initAdminAPI(); err != nil { // Start the Admin API - return fmt.Errorf("couldn't initialize admin API: %w", err) - } - if err := n.initInfoAPI(); err != nil { // Start the Info API - return fmt.Errorf("couldn't initialize info API: %w", err) - } - if err := n.initIPCs(); err != nil { // Start the IPCs - return fmt.Errorf("couldn't initialize IPCs: %w", err) - } - if err := n.initIPCAPI(); err != nil { // Start the IPC API - return fmt.Errorf("couldn't initialize the IPC API: %w", err) - } - if err := n.initChainAliases(n.Config.GenesisBytes); err != nil { - return fmt.Errorf("couldn't initialize chain aliases: %w", err) - } - if err := n.initAPIAliases(n.Config.GenesisBytes); err != nil { - return fmt.Errorf("couldn't initialize API aliases: %w", err) - } - if err := n.initIndexer(); err != nil { - return fmt.Errorf("couldn't initialize indexer: %w", err) - } - - n.health.Start(context.TODO(), n.Config.HealthCheckFreq) - n.initProfiler() - - // Start the Platform chain - if err := n.initChains(n.Config.GenesisBytes); err != nil { - return fmt.Errorf("couldn't initialize chains: %w", err) - } - return nil -} - // Shutdown this node // May be called multiple times func (n *Node) Shutdown(exitCode int) { @@ -1564,6 +1699,8 @@ func (n *Node) shutdown() { zap.Error(err), ) } + n.portMapper.UnmapAllPorts() + n.ipUpdater.Stop() if err := n.indexer.Close(); err != nil { n.Log.Debug("error closing tx indexer", zap.Error(err), diff --git a/node/overridden_manager.go b/node/overridden_manager.go index 91d8c198a4c3..4dd49b65eab6 100644 --- a/node/overridden_manager.go +++ b/node/overridden_manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package node diff --git a/node/overridden_manager_test.go b/node/overridden_manager_test.go index 79f03579a5d0..8af93ff68071 100644 --- a/node/overridden_manager_test.go +++ b/node/overridden_manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package node @@ -64,12 +64,12 @@ func TestOverriddenString(t *testing.T) { require.NoError(m.AddStaker(subnetID1, nodeID1, nil, ids.Empty, 1)) om := newOverriddenManager(subnetID0, m) - expected := "Overridden Validator Manager (SubnetID = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES): Validator Manager: (Size = 2)\n" + - " Subnet[TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES]: Validator Set: (Size = 2, Weight = 9223372036854775807)\n" + - " Validator[0]: NodeID-111111111111111111116DBWJs, 1\n" + - " Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806\n" + - " Subnet[2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w]: Validator Set: (Size = 1, Weight = 1)\n" + - " Validator[0]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 1" + expected := `Overridden Validator Manager (SubnetID = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES): Validator Manager: (Size = 2) + Subnet[TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES]: Validator Set: (Size = 2, Weight = 9223372036854775807) + Validator[0]: NodeID-111111111111111111116DBWJs, 1 + Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806 + Subnet[2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w]: Validator Set: (Size = 1, Weight = 1) + Validator[0]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 1` result := om.String() require.Equal(expected, result) } diff --git a/proto/Dockerfile.buf b/proto/Dockerfile.buf index 40d0a5420b4a..3c8864e636b7 100644 --- a/proto/Dockerfile.buf +++ b/proto/Dockerfile.buf @@ -6,7 +6,7 @@ RUN apt-get update && apt -y install bash curl unzip git WORKDIR /opt RUN \ - curl -L https://golang.org/dl/go1.20.8.linux-amd64.tar.gz > golang.tar.gz && \ + curl -L https://golang.org/dl/go1.20.12.linux-amd64.tar.gz > golang.tar.gz && \ mkdir golang && \ tar -zxvf golang.tar.gz -C golang/ diff --git a/proto/p2p/p2p.proto b/proto/p2p/p2p.proto index 9d355688bd49..6b5deacc7e01 100644 --- a/proto/p2p/p2p.proto +++ b/proto/p2p/p2p.proto @@ -8,6 +8,8 @@ option go_package = "github.com/ava-labs/avalanchego/proto/pb/p2p"; // Represents peer-to-peer messages. // Only one type can be non-null. message Message { + reserved 33; // Until after durango activation. + reserved 36; // Next unused field number. // NOTES // Use "oneof" for each message type and set rest to null if not used. // That is because when the compression is enabled, we don't want to include uncompressed fields. @@ -28,7 +30,8 @@ message Message { // Network messages: Ping ping = 11; Pong pong = 12; - Version version = 13; + Handshake handshake = 13; + GetPeerList get_peer_list = 35; PeerList peer_list = 14; // State-sync messages: @@ -56,9 +59,7 @@ message Message { AppRequest app_request = 30; AppResponse app_response = 31; AppGossip app_gossip = 32; - - PeerListAck peer_list_ack = 33; - AppRequestFailed app_request_failed = 34; + AppError app_error = 34; } } @@ -91,17 +92,17 @@ message Pong { repeated SubnetUptime subnet_uptimes = 2; } -// Version is the first outbound message sent to a peer when a connection is +// Handshake is the first outbound message sent to a peer when a connection is // established to start the p2p handshake. // -// Peers must respond to a Version message with a PeerList message to allow the +// Peers must respond to a Handshake message with a PeerList message to allow the // peer to connect to other peers in the network. // // Peers should drop connections to peers with incompatible versions. -message Version { +message Handshake { // Network the peer is running on (e.g local, testnet, mainnet) uint32 network_id = 1; - // Unix timestamp when this Version message was created + // Unix timestamp when this Handshake message was created uint64 my_time = 2; // IP address of the peer bytes ip_addr = 3; @@ -110,11 +111,31 @@ message Version { // Avalanche client version string my_version = 5; // Timestamp of the IP - uint64 my_version_time = 6; + uint64 ip_signing_time = 6; // Signature of the peer IP port pair at a provided timestamp bytes sig = 7; // Subnets the peer is tracking repeated bytes tracked_subnets = 8; + Client client = 9; + repeated uint32 supported_acps = 10; + repeated uint32 objected_acps = 11; + BloomFilter known_peers = 12; +} + +// Metadata about a peer's P2P client used to determine compatibility +message Client { + // Client name (e.g avalanchego) + string name = 1; + // Client semantic version + uint32 major = 2; + uint32 minor = 3; + uint32 patch = 4; +} + +// BloomFilter with a random salt to prevent consistent hash collisions +message BloomFilter { + bytes filter = 1; + bytes salt = 2; } // ClaimedIpPort contains metadata needed to connect to a peer @@ -133,38 +154,29 @@ message ClaimedIpPort { bytes tx_id = 6; } +// GetPeerList contains a bloom filter of the currently known validator IPs. +// +// GetPeerList must not be responded to until finishing the handshake. After the +// handshake is completed, GetPeerlist messages should be responded to with a +// Peerlist message containing validators that are not present in the bloom +// filter. +message GetPeerList { + BloomFilter known_peers = 1; +} + // PeerList contains network-level metadata for a set of validators. // -// PeerList must be sent in response to an inbound Version message from a +// PeerList must be sent in response to an inbound Handshake message from a // remote peer a peer wants to connect to. Once a PeerList is received after -// a version message, the p2p handshake is complete and the connection is +// a Handshake message, the p2p handshake is complete and the connection is // established. - -// Peers should periodically send PeerList messages to allow peers to -// discover each other. // -// PeerListAck should be sent in response to a PeerList. +// PeerList should be sent in response to a GetPeerlist message if the handshake +// has been completed. message PeerList { repeated ClaimedIpPort claimed_ip_ports = 1; } -// PeerAck acknowledges that a gossiped peer in a PeerList message will be -// tracked by the remote peer. -message PeerAck { - // P-Chain transaction that added the acknowledged peer to the validator - // set - bytes tx_id = 1; - // Timestamp of the signed ip of the peer - uint64 timestamp = 2; -} - -// PeerListAck is sent in response to PeerList to acknowledge the subset of -// peers that the peer will attempt to connect to. -message PeerListAck { - reserved 1; // deprecated; used to be tx_ids - repeated PeerAck peer_acks = 2; -} - // GetStateSummaryFrontier requests a peer's most recently accepted state // summary message GetStateSummaryFrontier { @@ -387,7 +399,7 @@ message Chits { // AppRequest is a VM-defined request. // // Remote peers must respond to AppRequest with a corresponding AppResponse or -// AppRequestFailed +// AppError message AppRequest { // Chain being requested from bytes chain_id = 1; @@ -409,14 +421,14 @@ message AppResponse { bytes app_bytes = 3; } -// AppRequestFailed is a VM-defined error sent in response to AppRequest -message AppRequestFailed { +// AppError is a VM-defined error sent in response to AppRequest +message AppError { // Chain the message is for bytes chain_id = 1; // Request id of the original AppRequest uint32 request_id = 2; - // VM defined error code - uint32 error_code = 3; + // VM defined error code. VMs may define error codes > 0. + sint32 error_code = 3; // VM defined error message string error_message = 4; } diff --git a/proto/pb/p2p/p2p.pb.go b/proto/pb/p2p/p2p.pb.go index 89fb2e201520..0732bf1a13c8 100644 --- a/proto/pb/p2p/p2p.pb.go +++ b/proto/pb/p2p/p2p.pb.go @@ -88,8 +88,9 @@ type Message struct { // *Message_CompressedZstd // *Message_Ping // *Message_Pong - // *Message_Version - // *Message_PeerList + // *Message_Handshake + // *Message_GetPeerList + // *Message_PeerList_ // *Message_GetStateSummaryFrontier // *Message_StateSummaryFrontier_ // *Message_GetAcceptedStateSummary @@ -108,8 +109,7 @@ type Message struct { // *Message_AppRequest // *Message_AppResponse // *Message_AppGossip - // *Message_PeerListAck - // *Message_AppRequestFailed + // *Message_AppError Message isMessage_Message `protobuf_oneof:"message"` } @@ -180,16 +180,23 @@ func (x *Message) GetPong() *Pong { return nil } -func (x *Message) GetVersion() *Version { - if x, ok := x.GetMessage().(*Message_Version); ok { - return x.Version +func (x *Message) GetHandshake() *Handshake { + if x, ok := x.GetMessage().(*Message_Handshake); ok { + return x.Handshake } return nil } -func (x *Message) GetPeerList() *PeerList { - if x, ok := x.GetMessage().(*Message_PeerList); ok { - return x.PeerList +func (x *Message) GetGetPeerList() *GetPeerList { + if x, ok := x.GetMessage().(*Message_GetPeerList); ok { + return x.GetPeerList + } + return nil +} + +func (x *Message) GetPeerList_() *PeerList { + if x, ok := x.GetMessage().(*Message_PeerList_); ok { + return x.PeerList_ } return nil } @@ -320,16 +327,9 @@ func (x *Message) GetAppGossip() *AppGossip { return nil } -func (x *Message) GetPeerListAck() *PeerListAck { - if x, ok := x.GetMessage().(*Message_PeerListAck); ok { - return x.PeerListAck - } - return nil -} - -func (x *Message) GetAppRequestFailed() *AppRequestFailed { - if x, ok := x.GetMessage().(*Message_AppRequestFailed); ok { - return x.AppRequestFailed +func (x *Message) GetAppError() *AppError { + if x, ok := x.GetMessage().(*Message_AppError); ok { + return x.AppError } return nil } @@ -361,12 +361,16 @@ type Message_Pong struct { Pong *Pong `protobuf:"bytes,12,opt,name=pong,proto3,oneof"` } -type Message_Version struct { - Version *Version `protobuf:"bytes,13,opt,name=version,proto3,oneof"` +type Message_Handshake struct { + Handshake *Handshake `protobuf:"bytes,13,opt,name=handshake,proto3,oneof"` +} + +type Message_GetPeerList struct { + GetPeerList *GetPeerList `protobuf:"bytes,35,opt,name=get_peer_list,json=getPeerList,proto3,oneof"` } -type Message_PeerList struct { - PeerList *PeerList `protobuf:"bytes,14,opt,name=peer_list,json=peerList,proto3,oneof"` +type Message_PeerList_ struct { + PeerList_ *PeerList `protobuf:"bytes,14,opt,name=peer_list,json=peerList,proto3,oneof"` } type Message_GetStateSummaryFrontier struct { @@ -445,12 +449,8 @@ type Message_AppGossip struct { AppGossip *AppGossip `protobuf:"bytes,32,opt,name=app_gossip,json=appGossip,proto3,oneof"` } -type Message_PeerListAck struct { - PeerListAck *PeerListAck `protobuf:"bytes,33,opt,name=peer_list_ack,json=peerListAck,proto3,oneof"` -} - -type Message_AppRequestFailed struct { - AppRequestFailed *AppRequestFailed `protobuf:"bytes,34,opt,name=app_request_failed,json=appRequestFailed,proto3,oneof"` +type Message_AppError struct { + AppError *AppError `protobuf:"bytes,34,opt,name=app_error,json=appError,proto3,oneof"` } func (*Message_CompressedGzip) isMessage_Message() {} @@ -461,9 +461,11 @@ func (*Message_Ping) isMessage_Message() {} func (*Message_Pong) isMessage_Message() {} -func (*Message_Version) isMessage_Message() {} +func (*Message_Handshake) isMessage_Message() {} + +func (*Message_GetPeerList) isMessage_Message() {} -func (*Message_PeerList) isMessage_Message() {} +func (*Message_PeerList_) isMessage_Message() {} func (*Message_GetStateSummaryFrontier) isMessage_Message() {} @@ -501,9 +503,7 @@ func (*Message_AppResponse) isMessage_Message() {} func (*Message_AppGossip) isMessage_Message() {} -func (*Message_PeerListAck) isMessage_Message() {} - -func (*Message_AppRequestFailed) isMessage_Message() {} +func (*Message_AppError) isMessage_Message() {} // Ping reports a peer's perceived uptime percentage. // @@ -684,21 +684,21 @@ func (x *Pong) GetSubnetUptimes() []*SubnetUptime { return nil } -// Version is the first outbound message sent to a peer when a connection is +// Handshake is the first outbound message sent to a peer when a connection is // established to start the p2p handshake. // -// Peers must respond to a Version message with a PeerList message to allow the +// Peers must respond to a Handshake message with a PeerList message to allow the // peer to connect to other peers in the network. // // Peers should drop connections to peers with incompatible versions. -type Version struct { +type Handshake struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Network the peer is running on (e.g local, testnet, mainnet) NetworkId uint32 `protobuf:"varint,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` - // Unix timestamp when this Version message was created + // Unix timestamp when this Handshake message was created MyTime uint64 `protobuf:"varint,2,opt,name=my_time,json=myTime,proto3" json:"my_time,omitempty"` // IP address of the peer IpAddr []byte `protobuf:"bytes,3,opt,name=ip_addr,json=ipAddr,proto3" json:"ip_addr,omitempty"` @@ -707,15 +707,19 @@ type Version struct { // Avalanche client version MyVersion string `protobuf:"bytes,5,opt,name=my_version,json=myVersion,proto3" json:"my_version,omitempty"` // Timestamp of the IP - MyVersionTime uint64 `protobuf:"varint,6,opt,name=my_version_time,json=myVersionTime,proto3" json:"my_version_time,omitempty"` + IpSigningTime uint64 `protobuf:"varint,6,opt,name=ip_signing_time,json=ipSigningTime,proto3" json:"ip_signing_time,omitempty"` // Signature of the peer IP port pair at a provided timestamp Sig []byte `protobuf:"bytes,7,opt,name=sig,proto3" json:"sig,omitempty"` // Subnets the peer is tracking - TrackedSubnets [][]byte `protobuf:"bytes,8,rep,name=tracked_subnets,json=trackedSubnets,proto3" json:"tracked_subnets,omitempty"` + TrackedSubnets [][]byte `protobuf:"bytes,8,rep,name=tracked_subnets,json=trackedSubnets,proto3" json:"tracked_subnets,omitempty"` + Client *Client `protobuf:"bytes,9,opt,name=client,proto3" json:"client,omitempty"` + SupportedAcps []uint32 `protobuf:"varint,10,rep,packed,name=supported_acps,json=supportedAcps,proto3" json:"supported_acps,omitempty"` + ObjectedAcps []uint32 `protobuf:"varint,11,rep,packed,name=objected_acps,json=objectedAcps,proto3" json:"objected_acps,omitempty"` + KnownPeers *BloomFilter `protobuf:"bytes,12,opt,name=known_peers,json=knownPeers,proto3" json:"known_peers,omitempty"` } -func (x *Version) Reset() { - *x = Version{} +func (x *Handshake) Reset() { + *x = Handshake{} if protoimpl.UnsafeEnabled { mi := &file_p2p_p2p_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -723,13 +727,13 @@ func (x *Version) Reset() { } } -func (x *Version) String() string { +func (x *Handshake) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Version) ProtoMessage() {} +func (*Handshake) ProtoMessage() {} -func (x *Version) ProtoReflect() protoreflect.Message { +func (x *Handshake) ProtoReflect() protoreflect.Message { mi := &file_p2p_p2p_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -741,89 +745,111 @@ func (x *Version) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Version.ProtoReflect.Descriptor instead. -func (*Version) Descriptor() ([]byte, []int) { +// Deprecated: Use Handshake.ProtoReflect.Descriptor instead. +func (*Handshake) Descriptor() ([]byte, []int) { return file_p2p_p2p_proto_rawDescGZIP(), []int{4} } -func (x *Version) GetNetworkId() uint32 { +func (x *Handshake) GetNetworkId() uint32 { if x != nil { return x.NetworkId } return 0 } -func (x *Version) GetMyTime() uint64 { +func (x *Handshake) GetMyTime() uint64 { if x != nil { return x.MyTime } return 0 } -func (x *Version) GetIpAddr() []byte { +func (x *Handshake) GetIpAddr() []byte { if x != nil { return x.IpAddr } return nil } -func (x *Version) GetIpPort() uint32 { +func (x *Handshake) GetIpPort() uint32 { if x != nil { return x.IpPort } return 0 } -func (x *Version) GetMyVersion() string { +func (x *Handshake) GetMyVersion() string { if x != nil { return x.MyVersion } return "" } -func (x *Version) GetMyVersionTime() uint64 { +func (x *Handshake) GetIpSigningTime() uint64 { if x != nil { - return x.MyVersionTime + return x.IpSigningTime } return 0 } -func (x *Version) GetSig() []byte { +func (x *Handshake) GetSig() []byte { if x != nil { return x.Sig } return nil } -func (x *Version) GetTrackedSubnets() [][]byte { +func (x *Handshake) GetTrackedSubnets() [][]byte { if x != nil { return x.TrackedSubnets } return nil } -// ClaimedIpPort contains metadata needed to connect to a peer -type ClaimedIpPort struct { +func (x *Handshake) GetClient() *Client { + if x != nil { + return x.Client + } + return nil +} + +func (x *Handshake) GetSupportedAcps() []uint32 { + if x != nil { + return x.SupportedAcps + } + return nil +} + +func (x *Handshake) GetObjectedAcps() []uint32 { + if x != nil { + return x.ObjectedAcps + } + return nil +} + +func (x *Handshake) GetKnownPeers() *BloomFilter { + if x != nil { + return x.KnownPeers + } + return nil +} + +// Metadata about a peer's P2P client used to determine compatibility +type Client struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // X509 certificate of the peer - X509Certificate []byte `protobuf:"bytes,1,opt,name=x509_certificate,json=x509Certificate,proto3" json:"x509_certificate,omitempty"` - // IP address of the peer - IpAddr []byte `protobuf:"bytes,2,opt,name=ip_addr,json=ipAddr,proto3" json:"ip_addr,omitempty"` - // IP port of the peer - IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` - // Timestamp of the IP address + port pair - Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Signature of the IP port pair at a provided timestamp - Signature []byte `protobuf:"bytes,5,opt,name=signature,proto3" json:"signature,omitempty"` - // P-Chain transaction that added this peer to the validator set - TxId []byte `protobuf:"bytes,6,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` + // Client name (e.g avalanchego) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Client semantic version + Major uint32 `protobuf:"varint,2,opt,name=major,proto3" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,3,opt,name=minor,proto3" json:"minor,omitempty"` + Patch uint32 `protobuf:"varint,4,opt,name=patch,proto3" json:"patch,omitempty"` } -func (x *ClaimedIpPort) Reset() { - *x = ClaimedIpPort{} +func (x *Client) Reset() { + *x = Client{} if protoimpl.UnsafeEnabled { mi := &file_p2p_p2p_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -831,13 +857,13 @@ func (x *ClaimedIpPort) Reset() { } } -func (x *ClaimedIpPort) String() string { +func (x *Client) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ClaimedIpPort) ProtoMessage() {} +func (*Client) ProtoMessage() {} -func (x *ClaimedIpPort) ProtoReflect() protoreflect.Message { +func (x *Client) ProtoReflect() protoreflect.Message { mi := &file_p2p_p2p_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -849,67 +875,51 @@ func (x *ClaimedIpPort) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ClaimedIpPort.ProtoReflect.Descriptor instead. -func (*ClaimedIpPort) Descriptor() ([]byte, []int) { +// Deprecated: Use Client.ProtoReflect.Descriptor instead. +func (*Client) Descriptor() ([]byte, []int) { return file_p2p_p2p_proto_rawDescGZIP(), []int{5} } -func (x *ClaimedIpPort) GetX509Certificate() []byte { +func (x *Client) GetName() string { if x != nil { - return x.X509Certificate + return x.Name } - return nil -} - -func (x *ClaimedIpPort) GetIpAddr() []byte { - if x != nil { - return x.IpAddr - } - return nil + return "" } -func (x *ClaimedIpPort) GetIpPort() uint32 { +func (x *Client) GetMajor() uint32 { if x != nil { - return x.IpPort + return x.Major } return 0 } -func (x *ClaimedIpPort) GetTimestamp() uint64 { +func (x *Client) GetMinor() uint32 { if x != nil { - return x.Timestamp + return x.Minor } return 0 } -func (x *ClaimedIpPort) GetSignature() []byte { - if x != nil { - return x.Signature - } - return nil -} - -func (x *ClaimedIpPort) GetTxId() []byte { +func (x *Client) GetPatch() uint32 { if x != nil { - return x.TxId + return x.Patch } - return nil + return 0 } -// Peers should periodically send PeerList messages to allow peers to -// discover each other. -// -// PeerListAck should be sent in response to a PeerList. -type PeerList struct { +// BloomFilter with a random salt to prevent consistent hash collisions +type BloomFilter struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClaimedIpPorts []*ClaimedIpPort `protobuf:"bytes,1,rep,name=claimed_ip_ports,json=claimedIpPorts,proto3" json:"claimed_ip_ports,omitempty"` + Filter []byte `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + Salt []byte `protobuf:"bytes,2,opt,name=salt,proto3" json:"salt,omitempty"` } -func (x *PeerList) Reset() { - *x = PeerList{} +func (x *BloomFilter) Reset() { + *x = BloomFilter{} if protoimpl.UnsafeEnabled { mi := &file_p2p_p2p_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -917,13 +927,13 @@ func (x *PeerList) Reset() { } } -func (x *PeerList) String() string { +func (x *BloomFilter) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PeerList) ProtoMessage() {} +func (*BloomFilter) ProtoMessage() {} -func (x *PeerList) ProtoReflect() protoreflect.Message { +func (x *BloomFilter) ProtoReflect() protoreflect.Message { mi := &file_p2p_p2p_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -935,34 +945,47 @@ func (x *PeerList) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PeerList.ProtoReflect.Descriptor instead. -func (*PeerList) Descriptor() ([]byte, []int) { +// Deprecated: Use BloomFilter.ProtoReflect.Descriptor instead. +func (*BloomFilter) Descriptor() ([]byte, []int) { return file_p2p_p2p_proto_rawDescGZIP(), []int{6} } -func (x *PeerList) GetClaimedIpPorts() []*ClaimedIpPort { +func (x *BloomFilter) GetFilter() []byte { if x != nil { - return x.ClaimedIpPorts + return x.Filter + } + return nil +} + +func (x *BloomFilter) GetSalt() []byte { + if x != nil { + return x.Salt } return nil } -// PeerAck acknowledges that a gossiped peer in a PeerList message will be -// tracked by the remote peer. -type PeerAck struct { +// ClaimedIpPort contains metadata needed to connect to a peer +type ClaimedIpPort struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // P-Chain transaction that added the acknowledged peer to the validator - // set - TxId []byte `protobuf:"bytes,1,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` - // Timestamp of the signed ip of the peer - Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // X509 certificate of the peer + X509Certificate []byte `protobuf:"bytes,1,opt,name=x509_certificate,json=x509Certificate,proto3" json:"x509_certificate,omitempty"` + // IP address of the peer + IpAddr []byte `protobuf:"bytes,2,opt,name=ip_addr,json=ipAddr,proto3" json:"ip_addr,omitempty"` + // IP port of the peer + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + // Timestamp of the IP address + port pair + Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Signature of the IP port pair at a provided timestamp + Signature []byte `protobuf:"bytes,5,opt,name=signature,proto3" json:"signature,omitempty"` + // P-Chain transaction that added this peer to the validator set + TxId []byte `protobuf:"bytes,6,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` } -func (x *PeerAck) Reset() { - *x = PeerAck{} +func (x *ClaimedIpPort) Reset() { + *x = ClaimedIpPort{} if protoimpl.UnsafeEnabled { mi := &file_p2p_p2p_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -970,13 +993,13 @@ func (x *PeerAck) Reset() { } } -func (x *PeerAck) String() string { +func (x *ClaimedIpPort) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PeerAck) ProtoMessage() {} +func (*ClaimedIpPort) ProtoMessage() {} -func (x *PeerAck) ProtoReflect() protoreflect.Message { +func (x *ClaimedIpPort) ProtoReflect() protoreflect.Message { mi := &file_p2p_p2p_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -988,37 +1011,69 @@ func (x *PeerAck) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PeerAck.ProtoReflect.Descriptor instead. -func (*PeerAck) Descriptor() ([]byte, []int) { +// Deprecated: Use ClaimedIpPort.ProtoReflect.Descriptor instead. +func (*ClaimedIpPort) Descriptor() ([]byte, []int) { return file_p2p_p2p_proto_rawDescGZIP(), []int{7} } -func (x *PeerAck) GetTxId() []byte { +func (x *ClaimedIpPort) GetX509Certificate() []byte { if x != nil { - return x.TxId + return x.X509Certificate } return nil } -func (x *PeerAck) GetTimestamp() uint64 { +func (x *ClaimedIpPort) GetIpAddr() []byte { + if x != nil { + return x.IpAddr + } + return nil +} + +func (x *ClaimedIpPort) GetIpPort() uint32 { + if x != nil { + return x.IpPort + } + return 0 +} + +func (x *ClaimedIpPort) GetTimestamp() uint64 { if x != nil { return x.Timestamp } return 0 } -// PeerListAck is sent in response to PeerList to acknowledge the subset of -// peers that the peer will attempt to connect to. -type PeerListAck struct { +func (x *ClaimedIpPort) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +func (x *ClaimedIpPort) GetTxId() []byte { + if x != nil { + return x.TxId + } + return nil +} + +// GetPeerList contains a bloom filter of the currently known validator IPs. +// +// GetPeerList must not be responded to until finishing the handshake. After the +// handshake is completed, GetPeerlist messages should be responded to with a +// Peerlist message containing validators that are not present in the bloom +// filter. +type GetPeerList struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - PeerAcks []*PeerAck `protobuf:"bytes,2,rep,name=peer_acks,json=peerAcks,proto3" json:"peer_acks,omitempty"` + KnownPeers *BloomFilter `protobuf:"bytes,1,opt,name=known_peers,json=knownPeers,proto3" json:"known_peers,omitempty"` } -func (x *PeerListAck) Reset() { - *x = PeerListAck{} +func (x *GetPeerList) Reset() { + *x = GetPeerList{} if protoimpl.UnsafeEnabled { mi := &file_p2p_p2p_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1026,13 +1081,13 @@ func (x *PeerListAck) Reset() { } } -func (x *PeerListAck) String() string { +func (x *GetPeerList) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PeerListAck) ProtoMessage() {} +func (*GetPeerList) ProtoMessage() {} -func (x *PeerListAck) ProtoReflect() protoreflect.Message { +func (x *GetPeerList) ProtoReflect() protoreflect.Message { mi := &file_p2p_p2p_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1044,14 +1099,70 @@ func (x *PeerListAck) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PeerListAck.ProtoReflect.Descriptor instead. -func (*PeerListAck) Descriptor() ([]byte, []int) { +// Deprecated: Use GetPeerList.ProtoReflect.Descriptor instead. +func (*GetPeerList) Descriptor() ([]byte, []int) { return file_p2p_p2p_proto_rawDescGZIP(), []int{8} } -func (x *PeerListAck) GetPeerAcks() []*PeerAck { +func (x *GetPeerList) GetKnownPeers() *BloomFilter { + if x != nil { + return x.KnownPeers + } + return nil +} + +// PeerList contains network-level metadata for a set of validators. +// +// PeerList must be sent in response to an inbound Handshake message from a +// remote peer a peer wants to connect to. Once a PeerList is received after +// a Handshake message, the p2p handshake is complete and the connection is +// established. +// +// PeerList should be sent in response to a GetPeerlist message if the handshake +// has been completed. +type PeerList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClaimedIpPorts []*ClaimedIpPort `protobuf:"bytes,1,rep,name=claimed_ip_ports,json=claimedIpPorts,proto3" json:"claimed_ip_ports,omitempty"` +} + +func (x *PeerList) Reset() { + *x = PeerList{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_p2p_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeerList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeerList) ProtoMessage() {} + +func (x *PeerList) ProtoReflect() protoreflect.Message { + mi := &file_p2p_p2p_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeerList.ProtoReflect.Descriptor instead. +func (*PeerList) Descriptor() ([]byte, []int) { + return file_p2p_p2p_proto_rawDescGZIP(), []int{9} +} + +func (x *PeerList) GetClaimedIpPorts() []*ClaimedIpPort { if x != nil { - return x.PeerAcks + return x.ClaimedIpPorts } return nil } @@ -1074,7 +1185,7 @@ type GetStateSummaryFrontier struct { func (x *GetStateSummaryFrontier) Reset() { *x = GetStateSummaryFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[9] + mi := &file_p2p_p2p_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1087,7 +1198,7 @@ func (x *GetStateSummaryFrontier) String() string { func (*GetStateSummaryFrontier) ProtoMessage() {} func (x *GetStateSummaryFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[9] + mi := &file_p2p_p2p_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1100,7 +1211,7 @@ func (x *GetStateSummaryFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStateSummaryFrontier.ProtoReflect.Descriptor instead. func (*GetStateSummaryFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{9} + return file_p2p_p2p_proto_rawDescGZIP(), []int{10} } func (x *GetStateSummaryFrontier) GetChainId() []byte { @@ -1141,7 +1252,7 @@ type StateSummaryFrontier struct { func (x *StateSummaryFrontier) Reset() { *x = StateSummaryFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[10] + mi := &file_p2p_p2p_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1154,7 +1265,7 @@ func (x *StateSummaryFrontier) String() string { func (*StateSummaryFrontier) ProtoMessage() {} func (x *StateSummaryFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[10] + mi := &file_p2p_p2p_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1167,7 +1278,7 @@ func (x *StateSummaryFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSummaryFrontier.ProtoReflect.Descriptor instead. func (*StateSummaryFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{10} + return file_p2p_p2p_proto_rawDescGZIP(), []int{11} } func (x *StateSummaryFrontier) GetChainId() []byte { @@ -1211,7 +1322,7 @@ type GetAcceptedStateSummary struct { func (x *GetAcceptedStateSummary) Reset() { *x = GetAcceptedStateSummary{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[11] + mi := &file_p2p_p2p_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1224,7 +1335,7 @@ func (x *GetAcceptedStateSummary) String() string { func (*GetAcceptedStateSummary) ProtoMessage() {} func (x *GetAcceptedStateSummary) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[11] + mi := &file_p2p_p2p_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1237,7 +1348,7 @@ func (x *GetAcceptedStateSummary) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAcceptedStateSummary.ProtoReflect.Descriptor instead. func (*GetAcceptedStateSummary) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{11} + return file_p2p_p2p_proto_rawDescGZIP(), []int{12} } func (x *GetAcceptedStateSummary) GetChainId() []byte { @@ -1285,7 +1396,7 @@ type AcceptedStateSummary struct { func (x *AcceptedStateSummary) Reset() { *x = AcceptedStateSummary{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[12] + mi := &file_p2p_p2p_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1298,7 +1409,7 @@ func (x *AcceptedStateSummary) String() string { func (*AcceptedStateSummary) ProtoMessage() {} func (x *AcceptedStateSummary) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[12] + mi := &file_p2p_p2p_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1311,7 +1422,7 @@ func (x *AcceptedStateSummary) ProtoReflect() protoreflect.Message { // Deprecated: Use AcceptedStateSummary.ProtoReflect.Descriptor instead. func (*AcceptedStateSummary) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{12} + return file_p2p_p2p_proto_rawDescGZIP(), []int{13} } func (x *AcceptedStateSummary) GetChainId() []byte { @@ -1356,7 +1467,7 @@ type GetAcceptedFrontier struct { func (x *GetAcceptedFrontier) Reset() { *x = GetAcceptedFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[13] + mi := &file_p2p_p2p_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1369,7 +1480,7 @@ func (x *GetAcceptedFrontier) String() string { func (*GetAcceptedFrontier) ProtoMessage() {} func (x *GetAcceptedFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[13] + mi := &file_p2p_p2p_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1382,7 +1493,7 @@ func (x *GetAcceptedFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAcceptedFrontier.ProtoReflect.Descriptor instead. func (*GetAcceptedFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{13} + return file_p2p_p2p_proto_rawDescGZIP(), []int{14} } func (x *GetAcceptedFrontier) GetChainId() []byte { @@ -1432,7 +1543,7 @@ type AcceptedFrontier struct { func (x *AcceptedFrontier) Reset() { *x = AcceptedFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[14] + mi := &file_p2p_p2p_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1445,7 +1556,7 @@ func (x *AcceptedFrontier) String() string { func (*AcceptedFrontier) ProtoMessage() {} func (x *AcceptedFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[14] + mi := &file_p2p_p2p_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1458,7 +1569,7 @@ func (x *AcceptedFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use AcceptedFrontier.ProtoReflect.Descriptor instead. func (*AcceptedFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{14} + return file_p2p_p2p_proto_rawDescGZIP(), []int{15} } func (x *AcceptedFrontier) GetChainId() []byte { @@ -1506,7 +1617,7 @@ type GetAccepted struct { func (x *GetAccepted) Reset() { *x = GetAccepted{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[15] + mi := &file_p2p_p2p_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1519,7 +1630,7 @@ func (x *GetAccepted) String() string { func (*GetAccepted) ProtoMessage() {} func (x *GetAccepted) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[15] + mi := &file_p2p_p2p_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1532,7 +1643,7 @@ func (x *GetAccepted) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAccepted.ProtoReflect.Descriptor instead. func (*GetAccepted) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{15} + return file_p2p_p2p_proto_rawDescGZIP(), []int{16} } func (x *GetAccepted) GetChainId() []byte { @@ -1590,7 +1701,7 @@ type Accepted struct { func (x *Accepted) Reset() { *x = Accepted{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[16] + mi := &file_p2p_p2p_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1603,7 +1714,7 @@ func (x *Accepted) String() string { func (*Accepted) ProtoMessage() {} func (x *Accepted) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[16] + mi := &file_p2p_p2p_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1616,7 +1727,7 @@ func (x *Accepted) ProtoReflect() protoreflect.Message { // Deprecated: Use Accepted.ProtoReflect.Descriptor instead. func (*Accepted) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{16} + return file_p2p_p2p_proto_rawDescGZIP(), []int{17} } func (x *Accepted) GetChainId() []byte { @@ -1663,7 +1774,7 @@ type GetAncestors struct { func (x *GetAncestors) Reset() { *x = GetAncestors{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[17] + mi := &file_p2p_p2p_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1676,7 +1787,7 @@ func (x *GetAncestors) String() string { func (*GetAncestors) ProtoMessage() {} func (x *GetAncestors) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[17] + mi := &file_p2p_p2p_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1689,7 +1800,7 @@ func (x *GetAncestors) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAncestors.ProtoReflect.Descriptor instead. func (*GetAncestors) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{17} + return file_p2p_p2p_proto_rawDescGZIP(), []int{18} } func (x *GetAncestors) GetChainId() []byte { @@ -1747,7 +1858,7 @@ type Ancestors struct { func (x *Ancestors) Reset() { *x = Ancestors{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[18] + mi := &file_p2p_p2p_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1760,7 +1871,7 @@ func (x *Ancestors) String() string { func (*Ancestors) ProtoMessage() {} func (x *Ancestors) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[18] + mi := &file_p2p_p2p_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1773,7 +1884,7 @@ func (x *Ancestors) ProtoReflect() protoreflect.Message { // Deprecated: Use Ancestors.ProtoReflect.Descriptor instead. func (*Ancestors) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{18} + return file_p2p_p2p_proto_rawDescGZIP(), []int{19} } func (x *Ancestors) GetChainId() []byte { @@ -1820,7 +1931,7 @@ type Get struct { func (x *Get) Reset() { *x = Get{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[19] + mi := &file_p2p_p2p_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1833,7 +1944,7 @@ func (x *Get) String() string { func (*Get) ProtoMessage() {} func (x *Get) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[19] + mi := &file_p2p_p2p_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1846,7 +1957,7 @@ func (x *Get) ProtoReflect() protoreflect.Message { // Deprecated: Use Get.ProtoReflect.Descriptor instead. func (*Get) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{19} + return file_p2p_p2p_proto_rawDescGZIP(), []int{20} } func (x *Get) GetChainId() []byte { @@ -1903,7 +2014,7 @@ type Put struct { func (x *Put) Reset() { *x = Put{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[20] + mi := &file_p2p_p2p_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1916,7 +2027,7 @@ func (x *Put) String() string { func (*Put) ProtoMessage() {} func (x *Put) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[20] + mi := &file_p2p_p2p_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1929,7 +2040,7 @@ func (x *Put) ProtoReflect() protoreflect.Message { // Deprecated: Use Put.ProtoReflect.Descriptor instead. func (*Put) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{20} + return file_p2p_p2p_proto_rawDescGZIP(), []int{21} } func (x *Put) GetChainId() []byte { @@ -1985,7 +2096,7 @@ type PushQuery struct { func (x *PushQuery) Reset() { *x = PushQuery{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[21] + mi := &file_p2p_p2p_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1998,7 +2109,7 @@ func (x *PushQuery) String() string { func (*PushQuery) ProtoMessage() {} func (x *PushQuery) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[21] + mi := &file_p2p_p2p_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2011,7 +2122,7 @@ func (x *PushQuery) ProtoReflect() protoreflect.Message { // Deprecated: Use PushQuery.ProtoReflect.Descriptor instead. func (*PushQuery) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{21} + return file_p2p_p2p_proto_rawDescGZIP(), []int{22} } func (x *PushQuery) GetChainId() []byte { @@ -2081,7 +2192,7 @@ type PullQuery struct { func (x *PullQuery) Reset() { *x = PullQuery{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[22] + mi := &file_p2p_p2p_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2094,7 +2205,7 @@ func (x *PullQuery) String() string { func (*PullQuery) ProtoMessage() {} func (x *PullQuery) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[22] + mi := &file_p2p_p2p_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2107,7 +2218,7 @@ func (x *PullQuery) ProtoReflect() protoreflect.Message { // Deprecated: Use PullQuery.ProtoReflect.Descriptor instead. func (*PullQuery) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{22} + return file_p2p_p2p_proto_rawDescGZIP(), []int{23} } func (x *PullQuery) GetChainId() []byte { @@ -2174,7 +2285,7 @@ type Chits struct { func (x *Chits) Reset() { *x = Chits{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[23] + mi := &file_p2p_p2p_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2187,7 +2298,7 @@ func (x *Chits) String() string { func (*Chits) ProtoMessage() {} func (x *Chits) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[23] + mi := &file_p2p_p2p_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2200,7 +2311,7 @@ func (x *Chits) ProtoReflect() protoreflect.Message { // Deprecated: Use Chits.ProtoReflect.Descriptor instead. func (*Chits) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{23} + return file_p2p_p2p_proto_rawDescGZIP(), []int{24} } func (x *Chits) GetChainId() []byte { @@ -2241,7 +2352,7 @@ func (x *Chits) GetPreferredIdAtHeight() []byte { // AppRequest is a VM-defined request. // // Remote peers must respond to AppRequest with a corresponding AppResponse or -// AppRequestFailed +// AppError type AppRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2260,7 +2371,7 @@ type AppRequest struct { func (x *AppRequest) Reset() { *x = AppRequest{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[24] + mi := &file_p2p_p2p_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2273,7 +2384,7 @@ func (x *AppRequest) String() string { func (*AppRequest) ProtoMessage() {} func (x *AppRequest) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[24] + mi := &file_p2p_p2p_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2286,7 +2397,7 @@ func (x *AppRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AppRequest.ProtoReflect.Descriptor instead. func (*AppRequest) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{24} + return file_p2p_p2p_proto_rawDescGZIP(), []int{25} } func (x *AppRequest) GetChainId() []byte { @@ -2334,7 +2445,7 @@ type AppResponse struct { func (x *AppResponse) Reset() { *x = AppResponse{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[25] + mi := &file_p2p_p2p_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2347,7 +2458,7 @@ func (x *AppResponse) String() string { func (*AppResponse) ProtoMessage() {} func (x *AppResponse) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[25] + mi := &file_p2p_p2p_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2360,7 +2471,7 @@ func (x *AppResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AppResponse.ProtoReflect.Descriptor instead. func (*AppResponse) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{25} + return file_p2p_p2p_proto_rawDescGZIP(), []int{26} } func (x *AppResponse) GetChainId() []byte { @@ -2384,8 +2495,8 @@ func (x *AppResponse) GetAppBytes() []byte { return nil } -// AppRequestFailed is a VM-defined error sent in response to AppRequest -type AppRequestFailed struct { +// AppError is a VM-defined error sent in response to AppRequest +type AppError struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -2394,29 +2505,29 @@ type AppRequestFailed struct { ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` // Request id of the original AppRequest RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - // VM defined error code - ErrorCode uint32 `protobuf:"varint,3,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // VM defined error code. VMs may define error codes > 0. + ErrorCode int32 `protobuf:"zigzag32,3,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` // VM defined error message ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` } -func (x *AppRequestFailed) Reset() { - *x = AppRequestFailed{} +func (x *AppError) Reset() { + *x = AppError{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[26] + mi := &file_p2p_p2p_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *AppRequestFailed) String() string { +func (x *AppError) String() string { return protoimpl.X.MessageStringOf(x) } -func (*AppRequestFailed) ProtoMessage() {} +func (*AppError) ProtoMessage() {} -func (x *AppRequestFailed) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[26] +func (x *AppError) ProtoReflect() protoreflect.Message { + mi := &file_p2p_p2p_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2427,33 +2538,33 @@ func (x *AppRequestFailed) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AppRequestFailed.ProtoReflect.Descriptor instead. -func (*AppRequestFailed) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{26} +// Deprecated: Use AppError.ProtoReflect.Descriptor instead. +func (*AppError) Descriptor() ([]byte, []int) { + return file_p2p_p2p_proto_rawDescGZIP(), []int{27} } -func (x *AppRequestFailed) GetChainId() []byte { +func (x *AppError) GetChainId() []byte { if x != nil { return x.ChainId } return nil } -func (x *AppRequestFailed) GetRequestId() uint32 { +func (x *AppError) GetRequestId() uint32 { if x != nil { return x.RequestId } return 0 } -func (x *AppRequestFailed) GetErrorCode() uint32 { +func (x *AppError) GetErrorCode() int32 { if x != nil { return x.ErrorCode } return 0 } -func (x *AppRequestFailed) GetErrorMessage() string { +func (x *AppError) GetErrorMessage() string { if x != nil { return x.ErrorMessage } @@ -2475,7 +2586,7 @@ type AppGossip struct { func (x *AppGossip) Reset() { *x = AppGossip{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[27] + mi := &file_p2p_p2p_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2488,7 +2599,7 @@ func (x *AppGossip) String() string { func (*AppGossip) ProtoMessage() {} func (x *AppGossip) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[27] + mi := &file_p2p_p2p_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2501,7 +2612,7 @@ func (x *AppGossip) ProtoReflect() protoreflect.Message { // Deprecated: Use AppGossip.ProtoReflect.Descriptor instead. func (*AppGossip) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{27} + return file_p2p_p2p_proto_rawDescGZIP(), []int{28} } func (x *AppGossip) GetChainId() []byte { @@ -2522,7 +2633,7 @@ var File_p2p_p2p_proto protoreflect.FileDescriptor var file_p2p_p2p_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x70, 0x32, 0x70, 0x2f, 0x70, 0x32, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x03, 0x70, 0x32, 0x70, 0x22, 0xa5, 0x0b, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x03, 0x70, 0x32, 0x70, 0x22, 0x9e, 0x0b, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x67, 0x7a, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x47, 0x7a, 0x69, 0x70, 0x12, 0x29, 0x0a, 0x0f, 0x63, @@ -2532,325 +2643,340 @@ var file_p2p_p2p_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x04, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x6f, 0x6e, 0x67, - 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x12, 0x28, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x32, 0x70, 0x2e, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x65, 0x65, 0x72, - 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, - 0x12, 0x5b, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x0f, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, - 0x65, 0x72, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x51, 0x0a, - 0x16, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x66, - 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x70, 0x32, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x14, 0x73, 0x74, 0x61, 0x74, + 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x12, 0x2e, 0x0a, 0x09, 0x68, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, + 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x48, 0x00, 0x52, 0x09, 0x68, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x36, 0x0a, 0x0d, 0x67, 0x65, 0x74, 0x5f, + 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x0b, 0x67, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, + 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, + 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5b, + 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, - 0x12, 0x5b, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x11, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, - 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x51, 0x0a, - 0x16, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, - 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x48, 0x00, 0x52, 0x14, 0x61, 0x63, 0x63, 0x65, + 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x16, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x72, 0x6f, + 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x32, + 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, + 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x14, 0x73, 0x74, 0x61, 0x74, 0x65, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x5b, + 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x12, 0x4e, 0x0a, 0x15, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, - 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, - 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65, 0x74, + 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x51, 0x0a, 0x16, 0x61, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x32, + 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x48, 0x00, 0x52, 0x14, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, + 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x4e, + 0x0a, 0x15, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x66, + 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, + 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, + 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65, 0x74, 0x41, 0x63, + 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x44, + 0x0a, 0x11, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, + 0x69, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, - 0x12, 0x44, 0x0a, 0x11, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, - 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x32, - 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, - 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, - 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0c, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, - 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x48, 0x00, - 0x52, 0x0b, 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x2b, 0x0a, - 0x08, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x48, 0x00, - 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x38, 0x0a, 0x0d, 0x67, 0x65, - 0x74, 0x5f, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x09, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x6e, - 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x48, 0x00, 0x52, 0x09, 0x61, 0x6e, 0x63, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x00, 0x52, 0x03, 0x67, - 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x03, 0x70, 0x75, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x08, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x74, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, - 0x12, 0x2f, 0x0a, 0x0a, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x1b, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x12, 0x2f, 0x0a, 0x0a, 0x70, 0x75, 0x6c, 0x6c, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, - 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x6c, 0x6c, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x12, 0x22, 0x0a, 0x05, 0x63, 0x68, 0x69, 0x74, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x68, 0x69, 0x74, 0x73, 0x48, 0x00, 0x52, - 0x05, 0x63, 0x68, 0x69, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x32, - 0x70, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, - 0x61, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x0c, 0x61, 0x70, - 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x61, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2f, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x18, - 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x47, - 0x6f, 0x73, 0x73, 0x69, 0x70, 0x48, 0x00, 0x52, 0x09, 0x61, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, - 0x69, 0x70, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, - 0x61, 0x63, 0x6b, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, - 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0b, 0x70, - 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x6b, 0x12, 0x45, 0x0a, 0x12, 0x61, 0x70, - 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, - 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x48, 0x00, 0x52, - 0x10, 0x61, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, - 0x64, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x58, 0x0a, 0x04, - 0x50, 0x69, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, - 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, - 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, - 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x0c, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, - 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x75, 0x62, 0x6e, 0x65, - 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x58, 0x0a, 0x04, 0x50, - 0x6f, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x73, - 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, - 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x22, 0xf5, 0x01, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, - 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x06, 0x6d, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, - 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, - 0x64, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, - 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x6d, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x79, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x03, 0x73, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, - 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0e, 0x74, - 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x22, 0xbd, 0x01, - 0x0a, 0x0d, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, - 0x29, 0x0a, 0x10, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, - 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, - 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x22, 0x48, 0x0a, - 0x08, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x10, 0x63, 0x6c, 0x61, - 0x69, 0x6d, 0x65, 0x64, 0x5f, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, - 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, - 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x41, - 0x63, 0x6b, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3e, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, - 0x74, 0x41, 0x63, 0x6b, 0x12, 0x29, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x63, 0x6b, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x65, - 0x65, 0x72, 0x41, 0x63, 0x6b, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x41, 0x63, 0x6b, 0x73, 0x4a, - 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x6f, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, - 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0x6a, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, + 0x48, 0x00, 0x52, 0x10, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, + 0x74, 0x69, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0c, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x65, 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, + 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x0b, + 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x2b, 0x0a, 0x08, 0x61, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, + 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x08, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x38, 0x0a, 0x0d, 0x67, 0x65, 0x74, 0x5f, + 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x09, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x18, + 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x6e, 0x63, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x73, 0x48, 0x00, 0x52, 0x09, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x08, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, + 0x12, 0x1c, 0x0a, 0x03, 0x70, 0x75, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, + 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x74, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x2f, + 0x0a, 0x0a, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, + 0x2f, 0x0a, 0x0a, 0x70, 0x75, 0x6c, 0x6c, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x1c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x12, 0x22, 0x0a, 0x05, 0x63, 0x68, 0x69, 0x74, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0a, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x68, 0x69, 0x74, 0x73, 0x48, 0x00, 0x52, 0x05, 0x63, + 0x68, 0x69, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, + 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x70, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x0c, 0x61, 0x70, 0x70, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x48, 0x00, 0x52, 0x0b, 0x61, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2f, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x18, 0x20, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, + 0x73, 0x69, 0x70, 0x48, 0x00, 0x52, 0x09, 0x61, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, + 0x12, 0x2c, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x22, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x48, 0x00, 0x52, 0x08, 0x61, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4a, 0x04, 0x08, 0x21, 0x10, 0x22, 0x4a, + 0x04, 0x08, 0x24, 0x10, 0x25, 0x22, 0x58, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x16, 0x0a, + 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, + 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, + 0x70, 0x32, 0x70, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x52, 0x0d, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x22, + 0x43, 0x0a, 0x0c, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x08, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, + 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x22, 0x58, 0x0a, 0x04, 0x50, 0x6f, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, + 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x75, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, + 0x32, 0x70, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x52, + 0x0d, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x22, 0x9b, + 0x03, 0x0a, 0x09, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6d, + 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, 0x79, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, + 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, + 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x79, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x69, 0x70, 0x5f, 0x73, 0x69, 0x67, 0x6e, + 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, + 0x69, 0x70, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a, + 0x03, 0x73, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x69, 0x67, 0x12, + 0x27, 0x0a, 0x0f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x62, 0x6e, 0x65, + 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, + 0x64, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x25, 0x0a, + 0x0e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x70, 0x73, 0x18, + 0x0a, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0d, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, + 0x41, 0x63, 0x70, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x5f, 0x61, 0x63, 0x70, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x41, 0x63, 0x70, 0x73, 0x12, 0x31, 0x0a, 0x0b, 0x6b, 0x6e, 0x6f, + 0x77, 0x6e, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x52, 0x0a, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x22, 0x5e, 0x0a, 0x06, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, + 0x6a, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, + 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x22, 0x39, 0x0a, 0x0b, + 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x22, 0xbd, 0x01, 0x0a, 0x0d, 0x43, 0x6c, 0x61, 0x69, + 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x78, 0x35, 0x30, + 0x39, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, + 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, + 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x22, 0x40, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x50, 0x65, + 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x0b, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, + 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, + 0x70, 0x2e, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x6b, + 0x6e, 0x6f, 0x77, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x22, 0x48, 0x0a, 0x08, 0x50, 0x65, 0x65, + 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x10, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, + 0x5f, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, + 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, + 0x72, 0x74, 0x73, 0x22, 0x6f, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x22, 0x89, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, - 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, - 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, - 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x04, 0x52, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x22, 0x71, - 0x0a, 0x14, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x6c, 0x69, 0x6e, 0x65, 0x22, 0x6a, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, + 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x22, 0x89, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, + 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, + 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x04, 0x52, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x22, 0x71, 0x0a, 0x14, + 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, + 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x49, 0x64, 0x73, 0x22, + 0x9d, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, + 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x30, 0x0a, + 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, + 0x75, 0x0a, 0x10, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, + 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, + 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, + 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xba, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x41, 0x63, + 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, - 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x49, 0x64, - 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, - 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, - 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, - 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, - 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x22, 0x75, 0x0a, 0x10, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, - 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, + 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, + 0x73, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x22, 0x6f, 0x0a, 0x08, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, + 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, + 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x22, 0xb9, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, - 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x49, 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xba, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, - 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, + 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, + 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x22, 0x6b, 0x0a, 0x09, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, + 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xb0, 0x01, + 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, + 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, + 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x22, 0x8f, 0x01, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x23, - 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x49, 0x64, 0x73, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, - 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6f, 0x0a, 0x08, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, - 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, - 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xb9, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, - 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, - 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, + 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x22, 0x6b, 0x0a, 0x09, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, + 0x70, 0x65, 0x22, 0xdc, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, + 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, + 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x22, 0xe1, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, - 0xb0, 0x01, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, - 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, - 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, - 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x22, 0x8f, 0x01, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, - 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x22, 0xdc, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, - 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, - 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, - 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, - 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, - 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x22, 0xe1, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, - 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, - 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, - 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, - 0x64, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0xba, 0x01, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x74, - 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x12, 0x1f, - 0x0a, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x49, 0x64, 0x12, - 0x33, 0x0a, 0x16, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x5f, - 0x61, 0x74, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x13, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x41, 0x74, 0x48, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x22, 0x7f, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, - 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, - 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, - 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, - 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, - 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1b, - 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x90, 0x01, 0x0a, 0x10, - 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, + 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, + 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, + 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, + 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0xba, 0x01, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x74, 0x73, 0x12, + 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0b, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x49, 0x64, 0x12, 0x33, 0x0a, + 0x16, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x5f, 0x61, 0x74, + 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x22, 0x7f, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x43, - 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x2a, 0x5d, 0x0a, 0x0a, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, - 0x0a, 0x15, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x56, - 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x48, 0x45, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x4e, 0x47, - 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x4e, 0x4f, 0x57, 0x4d, 0x41, 0x4e, - 0x10, 0x02, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x70, - 0x32, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, + 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, + 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, + 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x88, 0x01, 0x0a, 0x08, 0x41, 0x70, + 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x11, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x43, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, + 0x70, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x2a, 0x5d, 0x0a, 0x0a, 0x45, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x4e, 0x47, 0x49, 0x4e, + 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x41, 0x56, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x48, 0x45, 0x10, 0x01, 0x12, + 0x17, 0x0a, 0x13, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x4e, 0x4f, 0x57, 0x4d, 0x41, 0x4e, 0x10, 0x02, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, + 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x70, 0x32, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2866,79 +2992,82 @@ func file_p2p_p2p_proto_rawDescGZIP() []byte { } var file_p2p_p2p_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_p2p_p2p_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_p2p_p2p_proto_msgTypes = make([]protoimpl.MessageInfo, 29) var file_p2p_p2p_proto_goTypes = []interface{}{ (EngineType)(0), // 0: p2p.EngineType (*Message)(nil), // 1: p2p.Message (*Ping)(nil), // 2: p2p.Ping (*SubnetUptime)(nil), // 3: p2p.SubnetUptime (*Pong)(nil), // 4: p2p.Pong - (*Version)(nil), // 5: p2p.Version - (*ClaimedIpPort)(nil), // 6: p2p.ClaimedIpPort - (*PeerList)(nil), // 7: p2p.PeerList - (*PeerAck)(nil), // 8: p2p.PeerAck - (*PeerListAck)(nil), // 9: p2p.PeerListAck - (*GetStateSummaryFrontier)(nil), // 10: p2p.GetStateSummaryFrontier - (*StateSummaryFrontier)(nil), // 11: p2p.StateSummaryFrontier - (*GetAcceptedStateSummary)(nil), // 12: p2p.GetAcceptedStateSummary - (*AcceptedStateSummary)(nil), // 13: p2p.AcceptedStateSummary - (*GetAcceptedFrontier)(nil), // 14: p2p.GetAcceptedFrontier - (*AcceptedFrontier)(nil), // 15: p2p.AcceptedFrontier - (*GetAccepted)(nil), // 16: p2p.GetAccepted - (*Accepted)(nil), // 17: p2p.Accepted - (*GetAncestors)(nil), // 18: p2p.GetAncestors - (*Ancestors)(nil), // 19: p2p.Ancestors - (*Get)(nil), // 20: p2p.Get - (*Put)(nil), // 21: p2p.Put - (*PushQuery)(nil), // 22: p2p.PushQuery - (*PullQuery)(nil), // 23: p2p.PullQuery - (*Chits)(nil), // 24: p2p.Chits - (*AppRequest)(nil), // 25: p2p.AppRequest - (*AppResponse)(nil), // 26: p2p.AppResponse - (*AppRequestFailed)(nil), // 27: p2p.AppRequestFailed - (*AppGossip)(nil), // 28: p2p.AppGossip + (*Handshake)(nil), // 5: p2p.Handshake + (*Client)(nil), // 6: p2p.Client + (*BloomFilter)(nil), // 7: p2p.BloomFilter + (*ClaimedIpPort)(nil), // 8: p2p.ClaimedIpPort + (*GetPeerList)(nil), // 9: p2p.GetPeerList + (*PeerList)(nil), // 10: p2p.PeerList + (*GetStateSummaryFrontier)(nil), // 11: p2p.GetStateSummaryFrontier + (*StateSummaryFrontier)(nil), // 12: p2p.StateSummaryFrontier + (*GetAcceptedStateSummary)(nil), // 13: p2p.GetAcceptedStateSummary + (*AcceptedStateSummary)(nil), // 14: p2p.AcceptedStateSummary + (*GetAcceptedFrontier)(nil), // 15: p2p.GetAcceptedFrontier + (*AcceptedFrontier)(nil), // 16: p2p.AcceptedFrontier + (*GetAccepted)(nil), // 17: p2p.GetAccepted + (*Accepted)(nil), // 18: p2p.Accepted + (*GetAncestors)(nil), // 19: p2p.GetAncestors + (*Ancestors)(nil), // 20: p2p.Ancestors + (*Get)(nil), // 21: p2p.Get + (*Put)(nil), // 22: p2p.Put + (*PushQuery)(nil), // 23: p2p.PushQuery + (*PullQuery)(nil), // 24: p2p.PullQuery + (*Chits)(nil), // 25: p2p.Chits + (*AppRequest)(nil), // 26: p2p.AppRequest + (*AppResponse)(nil), // 27: p2p.AppResponse + (*AppError)(nil), // 28: p2p.AppError + (*AppGossip)(nil), // 29: p2p.AppGossip } var file_p2p_p2p_proto_depIdxs = []int32{ 2, // 0: p2p.Message.ping:type_name -> p2p.Ping 4, // 1: p2p.Message.pong:type_name -> p2p.Pong - 5, // 2: p2p.Message.version:type_name -> p2p.Version - 7, // 3: p2p.Message.peer_list:type_name -> p2p.PeerList - 10, // 4: p2p.Message.get_state_summary_frontier:type_name -> p2p.GetStateSummaryFrontier - 11, // 5: p2p.Message.state_summary_frontier:type_name -> p2p.StateSummaryFrontier - 12, // 6: p2p.Message.get_accepted_state_summary:type_name -> p2p.GetAcceptedStateSummary - 13, // 7: p2p.Message.accepted_state_summary:type_name -> p2p.AcceptedStateSummary - 14, // 8: p2p.Message.get_accepted_frontier:type_name -> p2p.GetAcceptedFrontier - 15, // 9: p2p.Message.accepted_frontier:type_name -> p2p.AcceptedFrontier - 16, // 10: p2p.Message.get_accepted:type_name -> p2p.GetAccepted - 17, // 11: p2p.Message.accepted:type_name -> p2p.Accepted - 18, // 12: p2p.Message.get_ancestors:type_name -> p2p.GetAncestors - 19, // 13: p2p.Message.ancestors:type_name -> p2p.Ancestors - 20, // 14: p2p.Message.get:type_name -> p2p.Get - 21, // 15: p2p.Message.put:type_name -> p2p.Put - 22, // 16: p2p.Message.push_query:type_name -> p2p.PushQuery - 23, // 17: p2p.Message.pull_query:type_name -> p2p.PullQuery - 24, // 18: p2p.Message.chits:type_name -> p2p.Chits - 25, // 19: p2p.Message.app_request:type_name -> p2p.AppRequest - 26, // 20: p2p.Message.app_response:type_name -> p2p.AppResponse - 28, // 21: p2p.Message.app_gossip:type_name -> p2p.AppGossip - 9, // 22: p2p.Message.peer_list_ack:type_name -> p2p.PeerListAck - 27, // 23: p2p.Message.app_request_failed:type_name -> p2p.AppRequestFailed + 5, // 2: p2p.Message.handshake:type_name -> p2p.Handshake + 9, // 3: p2p.Message.get_peer_list:type_name -> p2p.GetPeerList + 10, // 4: p2p.Message.peer_list:type_name -> p2p.PeerList + 11, // 5: p2p.Message.get_state_summary_frontier:type_name -> p2p.GetStateSummaryFrontier + 12, // 6: p2p.Message.state_summary_frontier:type_name -> p2p.StateSummaryFrontier + 13, // 7: p2p.Message.get_accepted_state_summary:type_name -> p2p.GetAcceptedStateSummary + 14, // 8: p2p.Message.accepted_state_summary:type_name -> p2p.AcceptedStateSummary + 15, // 9: p2p.Message.get_accepted_frontier:type_name -> p2p.GetAcceptedFrontier + 16, // 10: p2p.Message.accepted_frontier:type_name -> p2p.AcceptedFrontier + 17, // 11: p2p.Message.get_accepted:type_name -> p2p.GetAccepted + 18, // 12: p2p.Message.accepted:type_name -> p2p.Accepted + 19, // 13: p2p.Message.get_ancestors:type_name -> p2p.GetAncestors + 20, // 14: p2p.Message.ancestors:type_name -> p2p.Ancestors + 21, // 15: p2p.Message.get:type_name -> p2p.Get + 22, // 16: p2p.Message.put:type_name -> p2p.Put + 23, // 17: p2p.Message.push_query:type_name -> p2p.PushQuery + 24, // 18: p2p.Message.pull_query:type_name -> p2p.PullQuery + 25, // 19: p2p.Message.chits:type_name -> p2p.Chits + 26, // 20: p2p.Message.app_request:type_name -> p2p.AppRequest + 27, // 21: p2p.Message.app_response:type_name -> p2p.AppResponse + 29, // 22: p2p.Message.app_gossip:type_name -> p2p.AppGossip + 28, // 23: p2p.Message.app_error:type_name -> p2p.AppError 3, // 24: p2p.Ping.subnet_uptimes:type_name -> p2p.SubnetUptime 3, // 25: p2p.Pong.subnet_uptimes:type_name -> p2p.SubnetUptime - 6, // 26: p2p.PeerList.claimed_ip_ports:type_name -> p2p.ClaimedIpPort - 8, // 27: p2p.PeerListAck.peer_acks:type_name -> p2p.PeerAck - 0, // 28: p2p.GetAcceptedFrontier.engine_type:type_name -> p2p.EngineType - 0, // 29: p2p.GetAccepted.engine_type:type_name -> p2p.EngineType - 0, // 30: p2p.GetAncestors.engine_type:type_name -> p2p.EngineType - 0, // 31: p2p.Get.engine_type:type_name -> p2p.EngineType - 0, // 32: p2p.Put.engine_type:type_name -> p2p.EngineType - 0, // 33: p2p.PushQuery.engine_type:type_name -> p2p.EngineType - 0, // 34: p2p.PullQuery.engine_type:type_name -> p2p.EngineType - 35, // [35:35] is the sub-list for method output_type - 35, // [35:35] is the sub-list for method input_type - 35, // [35:35] is the sub-list for extension type_name - 35, // [35:35] is the sub-list for extension extendee - 0, // [0:35] is the sub-list for field type_name + 6, // 26: p2p.Handshake.client:type_name -> p2p.Client + 7, // 27: p2p.Handshake.known_peers:type_name -> p2p.BloomFilter + 7, // 28: p2p.GetPeerList.known_peers:type_name -> p2p.BloomFilter + 8, // 29: p2p.PeerList.claimed_ip_ports:type_name -> p2p.ClaimedIpPort + 0, // 30: p2p.GetAcceptedFrontier.engine_type:type_name -> p2p.EngineType + 0, // 31: p2p.GetAccepted.engine_type:type_name -> p2p.EngineType + 0, // 32: p2p.GetAncestors.engine_type:type_name -> p2p.EngineType + 0, // 33: p2p.Get.engine_type:type_name -> p2p.EngineType + 0, // 34: p2p.Put.engine_type:type_name -> p2p.EngineType + 0, // 35: p2p.PushQuery.engine_type:type_name -> p2p.EngineType + 0, // 36: p2p.PullQuery.engine_type:type_name -> p2p.EngineType + 37, // [37:37] is the sub-list for method output_type + 37, // [37:37] is the sub-list for method input_type + 37, // [37:37] is the sub-list for extension type_name + 37, // [37:37] is the sub-list for extension extendee + 0, // [0:37] is the sub-list for field type_name } func init() { file_p2p_p2p_proto_init() } @@ -2996,7 +3125,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Version); i { + switch v := v.(*Handshake); i { case 0: return &v.state case 1: @@ -3008,7 +3137,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClaimedIpPort); i { + switch v := v.(*Client); i { case 0: return &v.state case 1: @@ -3020,7 +3149,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerList); i { + switch v := v.(*BloomFilter); i { case 0: return &v.state case 1: @@ -3032,7 +3161,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerAck); i { + switch v := v.(*ClaimedIpPort); i { case 0: return &v.state case 1: @@ -3044,7 +3173,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerListAck); i { + switch v := v.(*GetPeerList); i { case 0: return &v.state case 1: @@ -3056,7 +3185,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetStateSummaryFrontier); i { + switch v := v.(*PeerList); i { case 0: return &v.state case 1: @@ -3068,7 +3197,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StateSummaryFrontier); i { + switch v := v.(*GetStateSummaryFrontier); i { case 0: return &v.state case 1: @@ -3080,7 +3209,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAcceptedStateSummary); i { + switch v := v.(*StateSummaryFrontier); i { case 0: return &v.state case 1: @@ -3092,7 +3221,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AcceptedStateSummary); i { + switch v := v.(*GetAcceptedStateSummary); i { case 0: return &v.state case 1: @@ -3104,7 +3233,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAcceptedFrontier); i { + switch v := v.(*AcceptedStateSummary); i { case 0: return &v.state case 1: @@ -3116,7 +3245,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AcceptedFrontier); i { + switch v := v.(*GetAcceptedFrontier); i { case 0: return &v.state case 1: @@ -3128,7 +3257,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAccepted); i { + switch v := v.(*AcceptedFrontier); i { case 0: return &v.state case 1: @@ -3140,7 +3269,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Accepted); i { + switch v := v.(*GetAccepted); i { case 0: return &v.state case 1: @@ -3152,7 +3281,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAncestors); i { + switch v := v.(*Accepted); i { case 0: return &v.state case 1: @@ -3164,7 +3293,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Ancestors); i { + switch v := v.(*GetAncestors); i { case 0: return &v.state case 1: @@ -3176,7 +3305,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Get); i { + switch v := v.(*Ancestors); i { case 0: return &v.state case 1: @@ -3188,7 +3317,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Put); i { + switch v := v.(*Get); i { case 0: return &v.state case 1: @@ -3200,7 +3329,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PushQuery); i { + switch v := v.(*Put); i { case 0: return &v.state case 1: @@ -3212,7 +3341,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PullQuery); i { + switch v := v.(*PushQuery); i { case 0: return &v.state case 1: @@ -3224,7 +3353,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Chits); i { + switch v := v.(*PullQuery); i { case 0: return &v.state case 1: @@ -3236,7 +3365,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppRequest); i { + switch v := v.(*Chits); i { case 0: return &v.state case 1: @@ -3248,7 +3377,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppResponse); i { + switch v := v.(*AppRequest); i { case 0: return &v.state case 1: @@ -3260,7 +3389,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppRequestFailed); i { + switch v := v.(*AppResponse); i { case 0: return &v.state case 1: @@ -3272,6 +3401,18 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_p2p_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppGossip); i { case 0: return &v.state @@ -3289,8 +3430,9 @@ func file_p2p_p2p_proto_init() { (*Message_CompressedZstd)(nil), (*Message_Ping)(nil), (*Message_Pong)(nil), - (*Message_Version)(nil), - (*Message_PeerList)(nil), + (*Message_Handshake)(nil), + (*Message_GetPeerList)(nil), + (*Message_PeerList_)(nil), (*Message_GetStateSummaryFrontier)(nil), (*Message_StateSummaryFrontier_)(nil), (*Message_GetAcceptedStateSummary)(nil), @@ -3309,8 +3451,7 @@ func file_p2p_p2p_proto_init() { (*Message_AppRequest)(nil), (*Message_AppResponse)(nil), (*Message_AppGossip)(nil), - (*Message_PeerListAck)(nil), - (*Message_AppRequestFailed)(nil), + (*Message_AppError)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -3318,7 +3459,7 @@ func file_p2p_p2p_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_p2p_p2p_proto_rawDesc, NumEnums: 1, - NumMessages: 28, + NumMessages: 29, NumExtensions: 0, NumServices: 0, }, diff --git a/proto/pb/sdk/sdk.pb.go b/proto/pb/sdk/sdk.pb.go index 120974ee5976..b90c23450270 100644 --- a/proto/pb/sdk/sdk.pb.go +++ b/proto/pb/sdk/sdk.pb.go @@ -25,8 +25,8 @@ type PullGossipRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Filter []byte `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` Salt []byte `protobuf:"bytes,2,opt,name=salt,proto3" json:"salt,omitempty"` + Filter []byte `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"` } func (x *PullGossipRequest) Reset() { @@ -61,16 +61,16 @@ func (*PullGossipRequest) Descriptor() ([]byte, []int) { return file_sdk_sdk_proto_rawDescGZIP(), []int{0} } -func (x *PullGossipRequest) GetFilter() []byte { +func (x *PullGossipRequest) GetSalt() []byte { if x != nil { - return x.Filter + return x.Salt } return nil } -func (x *PullGossipRequest) GetSalt() []byte { +func (x *PullGossipRequest) GetFilter() []byte { if x != nil { - return x.Salt + return x.Filter } return nil } @@ -122,21 +122,71 @@ func (x *PullGossipResponse) GetGossip() [][]byte { return nil } +type PushGossip struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Gossip [][]byte `protobuf:"bytes,1,rep,name=gossip,proto3" json:"gossip,omitempty"` +} + +func (x *PushGossip) Reset() { + *x = PushGossip{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_sdk_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PushGossip) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushGossip) ProtoMessage() {} + +func (x *PushGossip) ProtoReflect() protoreflect.Message { + mi := &file_sdk_sdk_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushGossip.ProtoReflect.Descriptor instead. +func (*PushGossip) Descriptor() ([]byte, []int) { + return file_sdk_sdk_proto_rawDescGZIP(), []int{2} +} + +func (x *PushGossip) GetGossip() [][]byte { + if x != nil { + return x.Gossip + } + return nil +} + var File_sdk_sdk_proto protoreflect.FileDescriptor var file_sdk_sdk_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x73, 0x64, 0x6b, 0x2f, 0x73, 0x64, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x03, 0x73, 0x64, 0x6b, 0x22, 0x3f, 0x0a, 0x11, 0x50, 0x75, 0x6c, 0x6c, 0x47, 0x6f, 0x73, 0x73, - 0x69, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x04, 0x73, 0x61, 0x6c, 0x74, 0x22, 0x2c, 0x0a, 0x12, 0x50, 0x75, 0x6c, 0x6c, 0x47, 0x6f, 0x73, - 0x73, 0x69, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x67, - 0x6f, 0x73, 0x73, 0x69, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x67, 0x6f, 0x73, - 0x73, 0x69, 0x70, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, - 0x73, 0x64, 0x6b, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x03, 0x73, 0x64, 0x6b, 0x22, 0x45, 0x0a, 0x11, 0x50, 0x75, 0x6c, 0x6c, 0x47, 0x6f, 0x73, 0x73, + 0x69, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x2c, 0x0a, 0x12, 0x50, + 0x75, 0x6c, 0x6c, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x06, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x22, 0x24, 0x0a, 0x0a, 0x50, 0x75, 0x73, + 0x68, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x67, 0x6f, 0x73, 0x73, 0x69, + 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x42, + 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, + 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, + 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x64, 0x6b, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -151,10 +201,11 @@ func file_sdk_sdk_proto_rawDescGZIP() []byte { return file_sdk_sdk_proto_rawDescData } -var file_sdk_sdk_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_sdk_sdk_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_sdk_sdk_proto_goTypes = []interface{}{ (*PullGossipRequest)(nil), // 0: sdk.PullGossipRequest (*PullGossipResponse)(nil), // 1: sdk.PullGossipResponse + (*PushGossip)(nil), // 2: sdk.PushGossip } var file_sdk_sdk_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type @@ -194,6 +245,18 @@ func file_sdk_sdk_proto_init() { return nil } } + file_sdk_sdk_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushGossip); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -201,7 +264,7 @@ func file_sdk_sdk_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_sdk_sdk_proto_rawDesc, NumEnums: 0, - NumMessages: 2, + NumMessages: 3, NumExtensions: 0, NumServices: 0, }, diff --git a/proto/pb/sync/sync.pb.go b/proto/pb/sync/sync.pb.go index 92cd3d88351e..eb72e145420a 100644 --- a/proto/pb/sync/sync.pb.go +++ b/proto/pb/sync/sync.pb.go @@ -1666,44 +1666,47 @@ var file_sync_sync_proto_rawDesc = []byte{ 0x4e, 0x6f, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x32, 0x8a, 0x04, 0x0a, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x32, 0xc3, 0x04, 0x0a, 0x02, 0x44, 0x42, 0x12, 0x44, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1b, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f, 0x6f, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x50, - 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x15, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x50, - 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x79, - 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1b, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x54, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, 0x2e, 0x73, 0x79, - 0x6e, 0x63, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, - 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x12, 0x48, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, - 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1a, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1b, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, - 0x10, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, - 0x66, 0x12, 0x1d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, - 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x05, 0x43, 0x6c, 0x65, 0x61, + 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x39, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x15, 0x2e, + 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0e, + 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1b, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x73, 0x79, + 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x11, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x4b, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x48, 0x0a, 0x0d, + 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1a, 0x2e, + 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x79, 0x6e, 0x63, + 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x10, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1d, 0x2e, 0x73, 0x79, 0x6e, + 0x63, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x79, + 0x6e, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1782,21 +1785,23 @@ var file_sync_sync_proto_depIdxs = []int32{ 23, // 32: sync.ProofNode.children:type_name -> sync.ProofNode.ChildrenEntry 21, // 33: sync.KeyChange.value:type_name -> sync.MaybeBytes 24, // 34: sync.DB.GetMerkleRoot:input_type -> google.protobuf.Empty - 2, // 35: sync.DB.GetProof:input_type -> sync.GetProofRequest - 7, // 36: sync.DB.GetChangeProof:input_type -> sync.GetChangeProofRequest - 9, // 37: sync.DB.VerifyChangeProof:input_type -> sync.VerifyChangeProofRequest - 11, // 38: sync.DB.CommitChangeProof:input_type -> sync.CommitChangeProofRequest - 13, // 39: sync.DB.GetRangeProof:input_type -> sync.GetRangeProofRequest - 15, // 40: sync.DB.CommitRangeProof:input_type -> sync.CommitRangeProofRequest - 1, // 41: sync.DB.GetMerkleRoot:output_type -> sync.GetMerkleRootResponse - 3, // 42: sync.DB.GetProof:output_type -> sync.GetProofResponse - 8, // 43: sync.DB.GetChangeProof:output_type -> sync.GetChangeProofResponse - 10, // 44: sync.DB.VerifyChangeProof:output_type -> sync.VerifyChangeProofResponse - 24, // 45: sync.DB.CommitChangeProof:output_type -> google.protobuf.Empty - 14, // 46: sync.DB.GetRangeProof:output_type -> sync.GetRangeProofResponse - 24, // 47: sync.DB.CommitRangeProof:output_type -> google.protobuf.Empty - 41, // [41:48] is the sub-list for method output_type - 34, // [34:41] is the sub-list for method input_type + 24, // 35: sync.DB.Clear:input_type -> google.protobuf.Empty + 2, // 36: sync.DB.GetProof:input_type -> sync.GetProofRequest + 7, // 37: sync.DB.GetChangeProof:input_type -> sync.GetChangeProofRequest + 9, // 38: sync.DB.VerifyChangeProof:input_type -> sync.VerifyChangeProofRequest + 11, // 39: sync.DB.CommitChangeProof:input_type -> sync.CommitChangeProofRequest + 13, // 40: sync.DB.GetRangeProof:input_type -> sync.GetRangeProofRequest + 15, // 41: sync.DB.CommitRangeProof:input_type -> sync.CommitRangeProofRequest + 1, // 42: sync.DB.GetMerkleRoot:output_type -> sync.GetMerkleRootResponse + 24, // 43: sync.DB.Clear:output_type -> google.protobuf.Empty + 3, // 44: sync.DB.GetProof:output_type -> sync.GetProofResponse + 8, // 45: sync.DB.GetChangeProof:output_type -> sync.GetChangeProofResponse + 10, // 46: sync.DB.VerifyChangeProof:output_type -> sync.VerifyChangeProofResponse + 24, // 47: sync.DB.CommitChangeProof:output_type -> google.protobuf.Empty + 14, // 48: sync.DB.GetRangeProof:output_type -> sync.GetRangeProofResponse + 24, // 49: sync.DB.CommitRangeProof:output_type -> google.protobuf.Empty + 42, // [42:50] is the sub-list for method output_type + 34, // [34:42] is the sub-list for method input_type 34, // [34:34] is the sub-list for extension type_name 34, // [34:34] is the sub-list for extension extendee 0, // [0:34] is the sub-list for field type_name diff --git a/proto/pb/sync/sync_grpc.pb.go b/proto/pb/sync/sync_grpc.pb.go index 3fb420c7273a..5f79687b4d01 100644 --- a/proto/pb/sync/sync_grpc.pb.go +++ b/proto/pb/sync/sync_grpc.pb.go @@ -21,6 +21,7 @@ const _ = grpc.SupportPackageIsVersion7 const ( DB_GetMerkleRoot_FullMethodName = "/sync.DB/GetMerkleRoot" + DB_Clear_FullMethodName = "/sync.DB/Clear" DB_GetProof_FullMethodName = "/sync.DB/GetProof" DB_GetChangeProof_FullMethodName = "/sync.DB/GetChangeProof" DB_VerifyChangeProof_FullMethodName = "/sync.DB/VerifyChangeProof" @@ -34,6 +35,7 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type DBClient interface { GetMerkleRoot(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMerkleRootResponse, error) + Clear(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) GetProof(ctx context.Context, in *GetProofRequest, opts ...grpc.CallOption) (*GetProofResponse, error) GetChangeProof(ctx context.Context, in *GetChangeProofRequest, opts ...grpc.CallOption) (*GetChangeProofResponse, error) VerifyChangeProof(ctx context.Context, in *VerifyChangeProofRequest, opts ...grpc.CallOption) (*VerifyChangeProofResponse, error) @@ -59,6 +61,15 @@ func (c *dBClient) GetMerkleRoot(ctx context.Context, in *emptypb.Empty, opts .. return out, nil } +func (c *dBClient) Clear(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, DB_Clear_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *dBClient) GetProof(ctx context.Context, in *GetProofRequest, opts ...grpc.CallOption) (*GetProofResponse, error) { out := new(GetProofResponse) err := c.cc.Invoke(ctx, DB_GetProof_FullMethodName, in, out, opts...) @@ -118,6 +129,7 @@ func (c *dBClient) CommitRangeProof(ctx context.Context, in *CommitRangeProofReq // for forward compatibility type DBServer interface { GetMerkleRoot(context.Context, *emptypb.Empty) (*GetMerkleRootResponse, error) + Clear(context.Context, *emptypb.Empty) (*emptypb.Empty, error) GetProof(context.Context, *GetProofRequest) (*GetProofResponse, error) GetChangeProof(context.Context, *GetChangeProofRequest) (*GetChangeProofResponse, error) VerifyChangeProof(context.Context, *VerifyChangeProofRequest) (*VerifyChangeProofResponse, error) @@ -134,6 +146,9 @@ type UnimplementedDBServer struct { func (UnimplementedDBServer) GetMerkleRoot(context.Context, *emptypb.Empty) (*GetMerkleRootResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetMerkleRoot not implemented") } +func (UnimplementedDBServer) Clear(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Clear not implemented") +} func (UnimplementedDBServer) GetProof(context.Context, *GetProofRequest) (*GetProofResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetProof not implemented") } @@ -183,6 +198,24 @@ func _DB_GetMerkleRoot_Handler(srv interface{}, ctx context.Context, dec func(in return interceptor(ctx, in, info, handler) } +func _DB_Clear_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Clear(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DB_Clear_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Clear(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + func _DB_GetProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetProofRequest) if err := dec(in); err != nil { @@ -302,6 +335,10 @@ var DB_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetMerkleRoot", Handler: _DB_GetMerkleRoot_Handler, }, + { + MethodName: "Clear", + Handler: _DB_Clear_Handler, + }, { MethodName: "GetProof", Handler: _DB_GetProof_Handler, diff --git a/proto/pb/vm/vm.pb.go b/proto/pb/vm/vm.pb.go index ebc64f5c3a48..7f38e5bbf4a7 100644 --- a/proto/pb/vm/vm.pb.go +++ b/proto/pb/vm/vm.pb.go @@ -232,7 +232,7 @@ func (x StateSummaryAcceptResponse_Mode) Number() protoreflect.EnumNumber { // Deprecated: Use StateSummaryAcceptResponse_Mode.Descriptor instead. func (StateSummaryAcceptResponse_Mode) EnumDescriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{45, 0} + return file_vm_vm_proto_rawDescGZIP(), []int{44, 0} } type InitializeRequest struct { @@ -643,53 +643,6 @@ func (x *CreateHandlersResponse) GetHandlers() []*Handler { return nil } -type CreateStaticHandlersResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Handlers []*Handler `protobuf:"bytes,1,rep,name=handlers,proto3" json:"handlers,omitempty"` -} - -func (x *CreateStaticHandlersResponse) Reset() { - *x = CreateStaticHandlersResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateStaticHandlersResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateStaticHandlersResponse) ProtoMessage() {} - -func (x *CreateStaticHandlersResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateStaticHandlersResponse.ProtoReflect.Descriptor instead. -func (*CreateStaticHandlersResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{5} -} - -func (x *CreateStaticHandlersResponse) GetHandlers() []*Handler { - if x != nil { - return x.Handlers - } - return nil -} - type Handler struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -704,7 +657,7 @@ type Handler struct { func (x *Handler) Reset() { *x = Handler{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[6] + mi := &file_vm_vm_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -717,7 +670,7 @@ func (x *Handler) String() string { func (*Handler) ProtoMessage() {} func (x *Handler) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[6] + mi := &file_vm_vm_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -730,7 +683,7 @@ func (x *Handler) ProtoReflect() protoreflect.Message { // Deprecated: Use Handler.ProtoReflect.Descriptor instead. func (*Handler) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{6} + return file_vm_vm_proto_rawDescGZIP(), []int{5} } func (x *Handler) GetPrefix() string { @@ -758,7 +711,7 @@ type BuildBlockRequest struct { func (x *BuildBlockRequest) Reset() { *x = BuildBlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[7] + mi := &file_vm_vm_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -771,7 +724,7 @@ func (x *BuildBlockRequest) String() string { func (*BuildBlockRequest) ProtoMessage() {} func (x *BuildBlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[7] + mi := &file_vm_vm_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -784,7 +737,7 @@ func (x *BuildBlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BuildBlockRequest.ProtoReflect.Descriptor instead. func (*BuildBlockRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{7} + return file_vm_vm_proto_rawDescGZIP(), []int{6} } func (x *BuildBlockRequest) GetPChainHeight() uint64 { @@ -811,7 +764,7 @@ type BuildBlockResponse struct { func (x *BuildBlockResponse) Reset() { *x = BuildBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[8] + mi := &file_vm_vm_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -824,7 +777,7 @@ func (x *BuildBlockResponse) String() string { func (*BuildBlockResponse) ProtoMessage() {} func (x *BuildBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[8] + mi := &file_vm_vm_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -837,7 +790,7 @@ func (x *BuildBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BuildBlockResponse.ProtoReflect.Descriptor instead. func (*BuildBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{8} + return file_vm_vm_proto_rawDescGZIP(), []int{7} } func (x *BuildBlockResponse) GetId() []byte { @@ -893,7 +846,7 @@ type ParseBlockRequest struct { func (x *ParseBlockRequest) Reset() { *x = ParseBlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[9] + mi := &file_vm_vm_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -906,7 +859,7 @@ func (x *ParseBlockRequest) String() string { func (*ParseBlockRequest) ProtoMessage() {} func (x *ParseBlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[9] + mi := &file_vm_vm_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -919,7 +872,7 @@ func (x *ParseBlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseBlockRequest.ProtoReflect.Descriptor instead. func (*ParseBlockRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{9} + return file_vm_vm_proto_rawDescGZIP(), []int{8} } func (x *ParseBlockRequest) GetBytes() []byte { @@ -945,7 +898,7 @@ type ParseBlockResponse struct { func (x *ParseBlockResponse) Reset() { *x = ParseBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[10] + mi := &file_vm_vm_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -958,7 +911,7 @@ func (x *ParseBlockResponse) String() string { func (*ParseBlockResponse) ProtoMessage() {} func (x *ParseBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[10] + mi := &file_vm_vm_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -971,7 +924,7 @@ func (x *ParseBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseBlockResponse.ProtoReflect.Descriptor instead. func (*ParseBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{10} + return file_vm_vm_proto_rawDescGZIP(), []int{9} } func (x *ParseBlockResponse) GetId() []byte { @@ -1027,7 +980,7 @@ type GetBlockRequest struct { func (x *GetBlockRequest) Reset() { *x = GetBlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[11] + mi := &file_vm_vm_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1040,7 +993,7 @@ func (x *GetBlockRequest) String() string { func (*GetBlockRequest) ProtoMessage() {} func (x *GetBlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[11] + mi := &file_vm_vm_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1053,7 +1006,7 @@ func (x *GetBlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockRequest.ProtoReflect.Descriptor instead. func (*GetBlockRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{11} + return file_vm_vm_proto_rawDescGZIP(), []int{10} } func (x *GetBlockRequest) GetId() []byte { @@ -1081,7 +1034,7 @@ type GetBlockResponse struct { func (x *GetBlockResponse) Reset() { *x = GetBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[12] + mi := &file_vm_vm_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1094,7 +1047,7 @@ func (x *GetBlockResponse) String() string { func (*GetBlockResponse) ProtoMessage() {} func (x *GetBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[12] + mi := &file_vm_vm_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1107,7 +1060,7 @@ func (x *GetBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockResponse.ProtoReflect.Descriptor instead. func (*GetBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{12} + return file_vm_vm_proto_rawDescGZIP(), []int{11} } func (x *GetBlockResponse) GetParentId() []byte { @@ -1170,7 +1123,7 @@ type SetPreferenceRequest struct { func (x *SetPreferenceRequest) Reset() { *x = SetPreferenceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[13] + mi := &file_vm_vm_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1183,7 +1136,7 @@ func (x *SetPreferenceRequest) String() string { func (*SetPreferenceRequest) ProtoMessage() {} func (x *SetPreferenceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[13] + mi := &file_vm_vm_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1196,7 +1149,7 @@ func (x *SetPreferenceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetPreferenceRequest.ProtoReflect.Descriptor instead. func (*SetPreferenceRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{13} + return file_vm_vm_proto_rawDescGZIP(), []int{12} } func (x *SetPreferenceRequest) GetId() []byte { @@ -1220,7 +1173,7 @@ type BlockVerifyRequest struct { func (x *BlockVerifyRequest) Reset() { *x = BlockVerifyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[14] + mi := &file_vm_vm_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1233,7 +1186,7 @@ func (x *BlockVerifyRequest) String() string { func (*BlockVerifyRequest) ProtoMessage() {} func (x *BlockVerifyRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[14] + mi := &file_vm_vm_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1246,7 +1199,7 @@ func (x *BlockVerifyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockVerifyRequest.ProtoReflect.Descriptor instead. func (*BlockVerifyRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{14} + return file_vm_vm_proto_rawDescGZIP(), []int{13} } func (x *BlockVerifyRequest) GetBytes() []byte { @@ -1274,7 +1227,7 @@ type BlockVerifyResponse struct { func (x *BlockVerifyResponse) Reset() { *x = BlockVerifyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[15] + mi := &file_vm_vm_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1287,7 +1240,7 @@ func (x *BlockVerifyResponse) String() string { func (*BlockVerifyResponse) ProtoMessage() {} func (x *BlockVerifyResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[15] + mi := &file_vm_vm_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1300,7 +1253,7 @@ func (x *BlockVerifyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockVerifyResponse.ProtoReflect.Descriptor instead. func (*BlockVerifyResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{15} + return file_vm_vm_proto_rawDescGZIP(), []int{14} } func (x *BlockVerifyResponse) GetTimestamp() *timestamppb.Timestamp { @@ -1321,7 +1274,7 @@ type BlockAcceptRequest struct { func (x *BlockAcceptRequest) Reset() { *x = BlockAcceptRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[16] + mi := &file_vm_vm_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1334,7 +1287,7 @@ func (x *BlockAcceptRequest) String() string { func (*BlockAcceptRequest) ProtoMessage() {} func (x *BlockAcceptRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[16] + mi := &file_vm_vm_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1347,7 +1300,7 @@ func (x *BlockAcceptRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockAcceptRequest.ProtoReflect.Descriptor instead. func (*BlockAcceptRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{16} + return file_vm_vm_proto_rawDescGZIP(), []int{15} } func (x *BlockAcceptRequest) GetId() []byte { @@ -1368,7 +1321,7 @@ type BlockRejectRequest struct { func (x *BlockRejectRequest) Reset() { *x = BlockRejectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[17] + mi := &file_vm_vm_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1381,7 +1334,7 @@ func (x *BlockRejectRequest) String() string { func (*BlockRejectRequest) ProtoMessage() {} func (x *BlockRejectRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[17] + mi := &file_vm_vm_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1394,7 +1347,7 @@ func (x *BlockRejectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockRejectRequest.ProtoReflect.Descriptor instead. func (*BlockRejectRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{17} + return file_vm_vm_proto_rawDescGZIP(), []int{16} } func (x *BlockRejectRequest) GetId() []byte { @@ -1415,7 +1368,7 @@ type HealthResponse struct { func (x *HealthResponse) Reset() { *x = HealthResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[18] + mi := &file_vm_vm_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1428,7 +1381,7 @@ func (x *HealthResponse) String() string { func (*HealthResponse) ProtoMessage() {} func (x *HealthResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[18] + mi := &file_vm_vm_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1441,7 +1394,7 @@ func (x *HealthResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use HealthResponse.ProtoReflect.Descriptor instead. func (*HealthResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{18} + return file_vm_vm_proto_rawDescGZIP(), []int{17} } func (x *HealthResponse) GetDetails() []byte { @@ -1462,7 +1415,7 @@ type VersionResponse struct { func (x *VersionResponse) Reset() { *x = VersionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[19] + mi := &file_vm_vm_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1475,7 +1428,7 @@ func (x *VersionResponse) String() string { func (*VersionResponse) ProtoMessage() {} func (x *VersionResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[19] + mi := &file_vm_vm_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1488,7 +1441,7 @@ func (x *VersionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VersionResponse.ProtoReflect.Descriptor instead. func (*VersionResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{19} + return file_vm_vm_proto_rawDescGZIP(), []int{18} } func (x *VersionResponse) GetVersion() string { @@ -1516,7 +1469,7 @@ type AppRequestMsg struct { func (x *AppRequestMsg) Reset() { *x = AppRequestMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[20] + mi := &file_vm_vm_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1529,7 +1482,7 @@ func (x *AppRequestMsg) String() string { func (*AppRequestMsg) ProtoMessage() {} func (x *AppRequestMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[20] + mi := &file_vm_vm_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1542,7 +1495,7 @@ func (x *AppRequestMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppRequestMsg.ProtoReflect.Descriptor instead. func (*AppRequestMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{20} + return file_vm_vm_proto_rawDescGZIP(), []int{19} } func (x *AppRequestMsg) GetNodeId() []byte { @@ -1582,12 +1535,16 @@ type AppRequestFailedMsg struct { NodeId []byte `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` // The ID of the request we sent and didn't get a response to RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Application-defined error code + ErrorCode int32 `protobuf:"zigzag32,3,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // Application-defined error message + ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` } func (x *AppRequestFailedMsg) Reset() { *x = AppRequestFailedMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[21] + mi := &file_vm_vm_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1600,7 +1557,7 @@ func (x *AppRequestFailedMsg) String() string { func (*AppRequestFailedMsg) ProtoMessage() {} func (x *AppRequestFailedMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[21] + mi := &file_vm_vm_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1613,7 +1570,7 @@ func (x *AppRequestFailedMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppRequestFailedMsg.ProtoReflect.Descriptor instead. func (*AppRequestFailedMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{21} + return file_vm_vm_proto_rawDescGZIP(), []int{20} } func (x *AppRequestFailedMsg) GetNodeId() []byte { @@ -1630,6 +1587,20 @@ func (x *AppRequestFailedMsg) GetRequestId() uint32 { return 0 } +func (x *AppRequestFailedMsg) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *AppRequestFailedMsg) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + type AppResponseMsg struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1646,7 +1617,7 @@ type AppResponseMsg struct { func (x *AppResponseMsg) Reset() { *x = AppResponseMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[22] + mi := &file_vm_vm_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1659,7 +1630,7 @@ func (x *AppResponseMsg) String() string { func (*AppResponseMsg) ProtoMessage() {} func (x *AppResponseMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[22] + mi := &file_vm_vm_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1672,7 +1643,7 @@ func (x *AppResponseMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppResponseMsg.ProtoReflect.Descriptor instead. func (*AppResponseMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{22} + return file_vm_vm_proto_rawDescGZIP(), []int{21} } func (x *AppResponseMsg) GetNodeId() []byte { @@ -1710,7 +1681,7 @@ type AppGossipMsg struct { func (x *AppGossipMsg) Reset() { *x = AppGossipMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[23] + mi := &file_vm_vm_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1723,7 +1694,7 @@ func (x *AppGossipMsg) String() string { func (*AppGossipMsg) ProtoMessage() {} func (x *AppGossipMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[23] + mi := &file_vm_vm_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1736,7 +1707,7 @@ func (x *AppGossipMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppGossipMsg.ProtoReflect.Descriptor instead. func (*AppGossipMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{23} + return file_vm_vm_proto_rawDescGZIP(), []int{22} } func (x *AppGossipMsg) GetNodeId() []byte { @@ -1771,7 +1742,7 @@ type CrossChainAppRequestMsg struct { func (x *CrossChainAppRequestMsg) Reset() { *x = CrossChainAppRequestMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[24] + mi := &file_vm_vm_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1784,7 +1755,7 @@ func (x *CrossChainAppRequestMsg) String() string { func (*CrossChainAppRequestMsg) ProtoMessage() {} func (x *CrossChainAppRequestMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[24] + mi := &file_vm_vm_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1797,7 +1768,7 @@ func (x *CrossChainAppRequestMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use CrossChainAppRequestMsg.ProtoReflect.Descriptor instead. func (*CrossChainAppRequestMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{24} + return file_vm_vm_proto_rawDescGZIP(), []int{23} } func (x *CrossChainAppRequestMsg) GetChainId() []byte { @@ -1837,12 +1808,16 @@ type CrossChainAppRequestFailedMsg struct { ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` // The ID of the request we sent and didn't get a response to RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Application-defined error code + ErrorCode int32 `protobuf:"zigzag32,3,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // Application-defined error message + ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` } func (x *CrossChainAppRequestFailedMsg) Reset() { *x = CrossChainAppRequestFailedMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[25] + mi := &file_vm_vm_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1855,7 +1830,7 @@ func (x *CrossChainAppRequestFailedMsg) String() string { func (*CrossChainAppRequestFailedMsg) ProtoMessage() {} func (x *CrossChainAppRequestFailedMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[25] + mi := &file_vm_vm_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1868,7 +1843,7 @@ func (x *CrossChainAppRequestFailedMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use CrossChainAppRequestFailedMsg.ProtoReflect.Descriptor instead. func (*CrossChainAppRequestFailedMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{25} + return file_vm_vm_proto_rawDescGZIP(), []int{24} } func (x *CrossChainAppRequestFailedMsg) GetChainId() []byte { @@ -1885,6 +1860,20 @@ func (x *CrossChainAppRequestFailedMsg) GetRequestId() uint32 { return 0 } +func (x *CrossChainAppRequestFailedMsg) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *CrossChainAppRequestFailedMsg) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + type CrossChainAppResponseMsg struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1901,7 +1890,7 @@ type CrossChainAppResponseMsg struct { func (x *CrossChainAppResponseMsg) Reset() { *x = CrossChainAppResponseMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[26] + mi := &file_vm_vm_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1914,7 +1903,7 @@ func (x *CrossChainAppResponseMsg) String() string { func (*CrossChainAppResponseMsg) ProtoMessage() {} func (x *CrossChainAppResponseMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[26] + mi := &file_vm_vm_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1927,7 +1916,7 @@ func (x *CrossChainAppResponseMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use CrossChainAppResponseMsg.ProtoReflect.Descriptor instead. func (*CrossChainAppResponseMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{26} + return file_vm_vm_proto_rawDescGZIP(), []int{25} } func (x *CrossChainAppResponseMsg) GetChainId() []byte { @@ -1956,14 +1945,19 @@ type ConnectedRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - NodeId []byte `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + NodeId []byte `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Client name (e.g avalanchego) + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Client semantic version + Major uint32 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"` + Patch uint32 `protobuf:"varint,5,opt,name=patch,proto3" json:"patch,omitempty"` } func (x *ConnectedRequest) Reset() { *x = ConnectedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[27] + mi := &file_vm_vm_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1976,7 +1970,7 @@ func (x *ConnectedRequest) String() string { func (*ConnectedRequest) ProtoMessage() {} func (x *ConnectedRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[27] + mi := &file_vm_vm_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1989,7 +1983,7 @@ func (x *ConnectedRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ConnectedRequest.ProtoReflect.Descriptor instead. func (*ConnectedRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{27} + return file_vm_vm_proto_rawDescGZIP(), []int{26} } func (x *ConnectedRequest) GetNodeId() []byte { @@ -1999,13 +1993,34 @@ func (x *ConnectedRequest) GetNodeId() []byte { return nil } -func (x *ConnectedRequest) GetVersion() string { +func (x *ConnectedRequest) GetName() string { if x != nil { - return x.Version + return x.Name } return "" } +func (x *ConnectedRequest) GetMajor() uint32 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *ConnectedRequest) GetMinor() uint32 { + if x != nil { + return x.Minor + } + return 0 +} + +func (x *ConnectedRequest) GetPatch() uint32 { + if x != nil { + return x.Patch + } + return 0 +} + type DisconnectedRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2017,7 +2032,7 @@ type DisconnectedRequest struct { func (x *DisconnectedRequest) Reset() { *x = DisconnectedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[28] + mi := &file_vm_vm_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2030,7 +2045,7 @@ func (x *DisconnectedRequest) String() string { func (*DisconnectedRequest) ProtoMessage() {} func (x *DisconnectedRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[28] + mi := &file_vm_vm_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2043,7 +2058,7 @@ func (x *DisconnectedRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DisconnectedRequest.ProtoReflect.Descriptor instead. func (*DisconnectedRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{28} + return file_vm_vm_proto_rawDescGZIP(), []int{27} } func (x *DisconnectedRequest) GetNodeId() []byte { @@ -2067,7 +2082,7 @@ type GetAncestorsRequest struct { func (x *GetAncestorsRequest) Reset() { *x = GetAncestorsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[29] + mi := &file_vm_vm_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2080,7 +2095,7 @@ func (x *GetAncestorsRequest) String() string { func (*GetAncestorsRequest) ProtoMessage() {} func (x *GetAncestorsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[29] + mi := &file_vm_vm_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2093,7 +2108,7 @@ func (x *GetAncestorsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAncestorsRequest.ProtoReflect.Descriptor instead. func (*GetAncestorsRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{29} + return file_vm_vm_proto_rawDescGZIP(), []int{28} } func (x *GetAncestorsRequest) GetBlkId() []byte { @@ -2135,7 +2150,7 @@ type GetAncestorsResponse struct { func (x *GetAncestorsResponse) Reset() { *x = GetAncestorsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[30] + mi := &file_vm_vm_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2148,7 +2163,7 @@ func (x *GetAncestorsResponse) String() string { func (*GetAncestorsResponse) ProtoMessage() {} func (x *GetAncestorsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[30] + mi := &file_vm_vm_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2161,7 +2176,7 @@ func (x *GetAncestorsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAncestorsResponse.ProtoReflect.Descriptor instead. func (*GetAncestorsResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{30} + return file_vm_vm_proto_rawDescGZIP(), []int{29} } func (x *GetAncestorsResponse) GetBlksBytes() [][]byte { @@ -2182,7 +2197,7 @@ type BatchedParseBlockRequest struct { func (x *BatchedParseBlockRequest) Reset() { *x = BatchedParseBlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[31] + mi := &file_vm_vm_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2195,7 +2210,7 @@ func (x *BatchedParseBlockRequest) String() string { func (*BatchedParseBlockRequest) ProtoMessage() {} func (x *BatchedParseBlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[31] + mi := &file_vm_vm_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2208,7 +2223,7 @@ func (x *BatchedParseBlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BatchedParseBlockRequest.ProtoReflect.Descriptor instead. func (*BatchedParseBlockRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{31} + return file_vm_vm_proto_rawDescGZIP(), []int{30} } func (x *BatchedParseBlockRequest) GetRequest() [][]byte { @@ -2229,7 +2244,7 @@ type BatchedParseBlockResponse struct { func (x *BatchedParseBlockResponse) Reset() { *x = BatchedParseBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[32] + mi := &file_vm_vm_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2242,7 +2257,7 @@ func (x *BatchedParseBlockResponse) String() string { func (*BatchedParseBlockResponse) ProtoMessage() {} func (x *BatchedParseBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[32] + mi := &file_vm_vm_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2255,7 +2270,7 @@ func (x *BatchedParseBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BatchedParseBlockResponse.ProtoReflect.Descriptor instead. func (*BatchedParseBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{32} + return file_vm_vm_proto_rawDescGZIP(), []int{31} } func (x *BatchedParseBlockResponse) GetResponse() []*ParseBlockResponse { @@ -2276,7 +2291,7 @@ type VerifyHeightIndexResponse struct { func (x *VerifyHeightIndexResponse) Reset() { *x = VerifyHeightIndexResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[33] + mi := &file_vm_vm_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2289,7 +2304,7 @@ func (x *VerifyHeightIndexResponse) String() string { func (*VerifyHeightIndexResponse) ProtoMessage() {} func (x *VerifyHeightIndexResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[33] + mi := &file_vm_vm_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2302,7 +2317,7 @@ func (x *VerifyHeightIndexResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VerifyHeightIndexResponse.ProtoReflect.Descriptor instead. func (*VerifyHeightIndexResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{33} + return file_vm_vm_proto_rawDescGZIP(), []int{32} } func (x *VerifyHeightIndexResponse) GetErr() Error { @@ -2323,7 +2338,7 @@ type GetBlockIDAtHeightRequest struct { func (x *GetBlockIDAtHeightRequest) Reset() { *x = GetBlockIDAtHeightRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[34] + mi := &file_vm_vm_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2336,7 +2351,7 @@ func (x *GetBlockIDAtHeightRequest) String() string { func (*GetBlockIDAtHeightRequest) ProtoMessage() {} func (x *GetBlockIDAtHeightRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[34] + mi := &file_vm_vm_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2349,7 +2364,7 @@ func (x *GetBlockIDAtHeightRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockIDAtHeightRequest.ProtoReflect.Descriptor instead. func (*GetBlockIDAtHeightRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{34} + return file_vm_vm_proto_rawDescGZIP(), []int{33} } func (x *GetBlockIDAtHeightRequest) GetHeight() uint64 { @@ -2371,7 +2386,7 @@ type GetBlockIDAtHeightResponse struct { func (x *GetBlockIDAtHeightResponse) Reset() { *x = GetBlockIDAtHeightResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[35] + mi := &file_vm_vm_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2384,7 +2399,7 @@ func (x *GetBlockIDAtHeightResponse) String() string { func (*GetBlockIDAtHeightResponse) ProtoMessage() {} func (x *GetBlockIDAtHeightResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[35] + mi := &file_vm_vm_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2397,7 +2412,7 @@ func (x *GetBlockIDAtHeightResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockIDAtHeightResponse.ProtoReflect.Descriptor instead. func (*GetBlockIDAtHeightResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{35} + return file_vm_vm_proto_rawDescGZIP(), []int{34} } func (x *GetBlockIDAtHeightResponse) GetBlkId() []byte { @@ -2425,7 +2440,7 @@ type GatherResponse struct { func (x *GatherResponse) Reset() { *x = GatherResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[36] + mi := &file_vm_vm_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2438,7 +2453,7 @@ func (x *GatherResponse) String() string { func (*GatherResponse) ProtoMessage() {} func (x *GatherResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[36] + mi := &file_vm_vm_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2451,7 +2466,7 @@ func (x *GatherResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GatherResponse.ProtoReflect.Descriptor instead. func (*GatherResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{36} + return file_vm_vm_proto_rawDescGZIP(), []int{35} } func (x *GatherResponse) GetMetricFamilies() []*_go.MetricFamily { @@ -2473,7 +2488,7 @@ type StateSyncEnabledResponse struct { func (x *StateSyncEnabledResponse) Reset() { *x = StateSyncEnabledResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[37] + mi := &file_vm_vm_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2486,7 +2501,7 @@ func (x *StateSyncEnabledResponse) String() string { func (*StateSyncEnabledResponse) ProtoMessage() {} func (x *StateSyncEnabledResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[37] + mi := &file_vm_vm_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2499,7 +2514,7 @@ func (x *StateSyncEnabledResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSyncEnabledResponse.ProtoReflect.Descriptor instead. func (*StateSyncEnabledResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{37} + return file_vm_vm_proto_rawDescGZIP(), []int{36} } func (x *StateSyncEnabledResponse) GetEnabled() bool { @@ -2530,7 +2545,7 @@ type GetOngoingSyncStateSummaryResponse struct { func (x *GetOngoingSyncStateSummaryResponse) Reset() { *x = GetOngoingSyncStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[38] + mi := &file_vm_vm_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2543,7 +2558,7 @@ func (x *GetOngoingSyncStateSummaryResponse) String() string { func (*GetOngoingSyncStateSummaryResponse) ProtoMessage() {} func (x *GetOngoingSyncStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[38] + mi := &file_vm_vm_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2556,7 +2571,7 @@ func (x *GetOngoingSyncStateSummaryResponse) ProtoReflect() protoreflect.Message // Deprecated: Use GetOngoingSyncStateSummaryResponse.ProtoReflect.Descriptor instead. func (*GetOngoingSyncStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{38} + return file_vm_vm_proto_rawDescGZIP(), []int{37} } func (x *GetOngoingSyncStateSummaryResponse) GetId() []byte { @@ -2601,7 +2616,7 @@ type GetLastStateSummaryResponse struct { func (x *GetLastStateSummaryResponse) Reset() { *x = GetLastStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[39] + mi := &file_vm_vm_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2614,7 +2629,7 @@ func (x *GetLastStateSummaryResponse) String() string { func (*GetLastStateSummaryResponse) ProtoMessage() {} func (x *GetLastStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[39] + mi := &file_vm_vm_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2627,7 +2642,7 @@ func (x *GetLastStateSummaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetLastStateSummaryResponse.ProtoReflect.Descriptor instead. func (*GetLastStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{39} + return file_vm_vm_proto_rawDescGZIP(), []int{38} } func (x *GetLastStateSummaryResponse) GetId() []byte { @@ -2669,7 +2684,7 @@ type ParseStateSummaryRequest struct { func (x *ParseStateSummaryRequest) Reset() { *x = ParseStateSummaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[40] + mi := &file_vm_vm_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2682,7 +2697,7 @@ func (x *ParseStateSummaryRequest) String() string { func (*ParseStateSummaryRequest) ProtoMessage() {} func (x *ParseStateSummaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[40] + mi := &file_vm_vm_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2695,7 +2710,7 @@ func (x *ParseStateSummaryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseStateSummaryRequest.ProtoReflect.Descriptor instead. func (*ParseStateSummaryRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{40} + return file_vm_vm_proto_rawDescGZIP(), []int{39} } func (x *ParseStateSummaryRequest) GetBytes() []byte { @@ -2718,7 +2733,7 @@ type ParseStateSummaryResponse struct { func (x *ParseStateSummaryResponse) Reset() { *x = ParseStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[41] + mi := &file_vm_vm_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2731,7 +2746,7 @@ func (x *ParseStateSummaryResponse) String() string { func (*ParseStateSummaryResponse) ProtoMessage() {} func (x *ParseStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[41] + mi := &file_vm_vm_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2744,7 +2759,7 @@ func (x *ParseStateSummaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseStateSummaryResponse.ProtoReflect.Descriptor instead. func (*ParseStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{41} + return file_vm_vm_proto_rawDescGZIP(), []int{40} } func (x *ParseStateSummaryResponse) GetId() []byte { @@ -2779,7 +2794,7 @@ type GetStateSummaryRequest struct { func (x *GetStateSummaryRequest) Reset() { *x = GetStateSummaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[42] + mi := &file_vm_vm_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2792,7 +2807,7 @@ func (x *GetStateSummaryRequest) String() string { func (*GetStateSummaryRequest) ProtoMessage() {} func (x *GetStateSummaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[42] + mi := &file_vm_vm_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2805,7 +2820,7 @@ func (x *GetStateSummaryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStateSummaryRequest.ProtoReflect.Descriptor instead. func (*GetStateSummaryRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{42} + return file_vm_vm_proto_rawDescGZIP(), []int{41} } func (x *GetStateSummaryRequest) GetHeight() uint64 { @@ -2828,7 +2843,7 @@ type GetStateSummaryResponse struct { func (x *GetStateSummaryResponse) Reset() { *x = GetStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[43] + mi := &file_vm_vm_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2841,7 +2856,7 @@ func (x *GetStateSummaryResponse) String() string { func (*GetStateSummaryResponse) ProtoMessage() {} func (x *GetStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[43] + mi := &file_vm_vm_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2854,7 +2869,7 @@ func (x *GetStateSummaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStateSummaryResponse.ProtoReflect.Descriptor instead. func (*GetStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{43} + return file_vm_vm_proto_rawDescGZIP(), []int{42} } func (x *GetStateSummaryResponse) GetId() []byte { @@ -2889,7 +2904,7 @@ type StateSummaryAcceptRequest struct { func (x *StateSummaryAcceptRequest) Reset() { *x = StateSummaryAcceptRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[44] + mi := &file_vm_vm_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2902,7 +2917,7 @@ func (x *StateSummaryAcceptRequest) String() string { func (*StateSummaryAcceptRequest) ProtoMessage() {} func (x *StateSummaryAcceptRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[44] + mi := &file_vm_vm_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2915,7 +2930,7 @@ func (x *StateSummaryAcceptRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSummaryAcceptRequest.ProtoReflect.Descriptor instead. func (*StateSummaryAcceptRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{44} + return file_vm_vm_proto_rawDescGZIP(), []int{43} } func (x *StateSummaryAcceptRequest) GetBytes() []byte { @@ -2937,7 +2952,7 @@ type StateSummaryAcceptResponse struct { func (x *StateSummaryAcceptResponse) Reset() { *x = StateSummaryAcceptResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[45] + mi := &file_vm_vm_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2950,7 +2965,7 @@ func (x *StateSummaryAcceptResponse) String() string { func (*StateSummaryAcceptResponse) ProtoMessage() {} func (x *StateSummaryAcceptResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[45] + mi := &file_vm_vm_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2963,7 +2978,7 @@ func (x *StateSummaryAcceptResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSummaryAcceptResponse.ProtoReflect.Descriptor instead. func (*StateSummaryAcceptResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{45} + return file_vm_vm_proto_rawDescGZIP(), []int{44} } func (x *StateSummaryAcceptResponse) GetMode() StateSummaryAcceptResponse_Mode { @@ -3055,418 +3070,421 @@ var file_vm_vm_proto_rawDesc = []byte{ 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x76, 0x6d, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x08, 0x68, - 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x22, 0x47, 0x0a, 0x1c, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, - 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x76, 0x6d, 0x2e, 0x48, - 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, - 0x22, 0x42, 0x0a, 0x07, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, - 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x41, 0x64, 0x64, 0x72, 0x22, 0x51, 0x0a, 0x11, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x0e, 0x70, 0x5f, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x04, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x48, 0x65, 0x69, 0x67, 0x68, - 0x74, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0xd9, 0x01, 0x0a, 0x12, 0x42, 0x75, 0x69, 0x6c, - 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, - 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x77, 0x69, - 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x57, 0x69, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x22, 0x29, 0x0a, 0x11, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, - 0x01, 0x0a, 0x12, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, + 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x22, 0x42, 0x0a, 0x07, 0x48, 0x61, 0x6e, 0x64, 0x6c, + 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x22, 0x51, 0x0a, 0x11, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x29, 0x0a, 0x0e, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, + 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0xd9, + 0x01, 0x0a, 0x12, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x49, 0x64, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x38, - 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x57, 0x69, 0x74, - 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x21, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x88, 0x02, 0x0a, 0x10, - 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, - 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, - 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, - 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x57, 0x69, 0x74, 0x68, 0x43, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x26, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x50, 0x72, 0x65, - 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x68, - 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x0e, 0x70, 0x5f, - 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x04, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x48, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x4f, 0x0a, 0x13, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x24, 0x0a, 0x12, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, - 0x24, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x57, + 0x69, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x29, 0x0a, 0x11, 0x50, 0x61, + 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, 0x0a, 0x12, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x76, 0x6d, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, + 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x2e, 0x0a, 0x13, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x57, 0x69, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, + 0x21, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, + 0x69, 0x64, 0x22, 0x88, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x76, 0x6d, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, + 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, + 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, + 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x12, 0x2e, 0x0a, + 0x13, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x57, 0x69, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x26, 0x0a, + 0x14, 0x53, 0x65, 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x2a, 0x0a, 0x0e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, - 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, - 0x73, 0x22, 0x2b, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x99, - 0x01, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, - 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, - 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4d, 0x0a, 0x13, 0x41, 0x70, - 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, - 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x22, 0x64, 0x0a, 0x0e, 0x41, 0x70, 0x70, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, + 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x68, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x12, 0x29, 0x0a, 0x0e, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, + 0x5f, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, + 0x4f, 0x0a, 0x13, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x22, 0x24, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x24, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, + 0x65, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x2a, 0x0a, 0x0e, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x2b, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x99, 0x01, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, + 0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x91, 0x01, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, + 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x64, 0x0a, 0x0e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, 0x0a, 0x0c, 0x41, + 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, - 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x39, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, 0x73, 0x67, 0x12, - 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0xa5, 0x01, 0x0a, 0x17, 0x43, - 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, - 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, - 0x12, 0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, - 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x22, 0x59, 0x0a, 0x1d, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, - 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, - 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x22, 0x70, 0x0a, - 0x18, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, - 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, - 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x45, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x2e, 0x0a, 0x13, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, - 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, - 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x22, 0xb3, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x6e, - 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, - 0x0a, 0x06, 0x62, 0x6c, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, - 0x62, 0x6c, 0x6b, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x73, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6d, - 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x4e, 0x75, 0x6d, 0x12, 0x26, 0x0a, 0x0f, 0x6d, - 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x53, - 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x73, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x76, 0x61, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, - 0x52, 0x65, 0x74, 0x72, 0x69, 0x76, 0x61, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x35, 0x0a, 0x14, - 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x62, 0x6c, 0x6b, 0x73, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x18, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, - 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, 0x19, 0x42, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, - 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, - 0x03, 0x65, 0x72, 0x72, 0x22, 0x33, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x50, 0x0a, 0x1a, 0x47, 0x65, 0x74, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x6c, 0x6b, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6b, 0x49, 0x64, 0x12, 0x1b, + 0x64, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0xa5, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x6f, 0x73, 0x73, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, + 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, + 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x08, + 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x9d, + 0x01, 0x0a, 0x1d, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, + 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x09, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x70, + 0x0a, 0x18, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x81, 0x01, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x70, + 0x61, 0x74, 0x63, 0x68, 0x22, 0x2e, 0x0a, 0x13, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, + 0x64, 0x65, 0x49, 0x64, 0x22, 0xb3, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, + 0x62, 0x6c, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, + 0x6b, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x73, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6d, 0x61, 0x78, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x4e, 0x75, 0x6d, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, + 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, + 0x72, 0x65, 0x74, 0x72, 0x69, 0x76, 0x61, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, + 0x74, 0x72, 0x69, 0x76, 0x61, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x35, 0x0a, 0x14, 0x47, 0x65, + 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x62, 0x6c, 0x6b, 0x73, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x22, 0x34, 0x0a, 0x18, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, + 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, + 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, 0x19, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, + 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, + 0x72, 0x72, 0x22, 0x33, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, + 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x50, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x6c, 0x6b, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6b, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x03, + 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x5d, 0x0a, 0x0e, 0x47, 0x61, 0x74, + 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, - 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x5d, 0x0a, 0x0e, 0x47, - 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, - 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, - 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x18, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, - 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x7f, 0x0a, - 0x22, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, - 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x78, - 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, - 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, - 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, - 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x30, 0x0a, 0x18, 0x50, 0x61, 0x72, 0x73, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x60, 0x0a, 0x19, 0x50, 0x61, - 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, - 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, - 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x30, 0x0a, 0x16, + 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x7f, 0x0a, 0x22, 0x47, + 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, + 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, + 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x78, 0x0a, 0x1b, + 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x30, 0x0a, 0x18, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x60, 0x0a, 0x19, 0x50, 0x61, 0x72, 0x73, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x1b, 0x0a, + 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x30, 0x0a, 0x16, 0x47, 0x65, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x5c, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x5c, - 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, - 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, - 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x31, 0x0a, 0x19, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, + 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x31, 0x0a, 0x19, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0xc5, 0x01, + 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, + 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x04, + 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, - 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, - 0xc5, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, - 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x76, - 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x6f, 0x64, - 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, - 0x03, 0x65, 0x72, 0x72, 0x22, 0x51, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x10, - 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x4b, 0x49, 0x50, 0x50, - 0x45, 0x44, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x54, 0x41, - 0x54, 0x49, 0x43, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x59, - 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, 0x03, 0x2a, 0x65, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x54, 0x45, - 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x49, 0x4e, 0x47, 0x10, 0x01, - 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, - 0x52, 0x41, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, - 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x5f, 0x4f, 0x50, 0x10, 0x03, 0x2a, 0x61, - 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, 0x54, - 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, - 0x53, 0x53, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x55, - 0x53, 0x5f, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, - 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x45, 0x44, 0x10, - 0x03, 0x2a, 0x8e, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x15, 0x0a, 0x11, 0x45, - 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4c, 0x4f, 0x53, - 0x45, 0x44, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4e, 0x4f, - 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x52, 0x52, - 0x4f, 0x52, 0x5f, 0x48, 0x45, 0x49, 0x47, 0x48, 0x54, 0x5f, 0x49, 0x4e, 0x44, 0x45, 0x58, 0x5f, - 0x49, 0x4e, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, - 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, - 0x10, 0x04, 0x32, 0xa4, 0x12, 0x0a, 0x02, 0x56, 0x4d, 0x12, 0x3b, 0x0a, 0x0a, 0x49, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x49, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, - 0x2e, 0x76, 0x6d, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, - 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x1a, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, - 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x50, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, - 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, + 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, + 0x72, 0x72, 0x22, 0x51, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x4d, 0x4f, + 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x4b, 0x49, 0x50, 0x50, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x49, + 0x43, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x59, 0x4e, 0x41, + 0x4d, 0x49, 0x43, 0x10, 0x03, 0x2a, 0x65, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, + 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, + 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, + 0x0a, 0x13, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, + 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x5f, 0x4f, 0x50, 0x10, 0x03, 0x2a, 0x61, 0x0a, 0x06, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x15, + 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, + 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, + 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x45, 0x44, 0x10, 0x03, 0x2a, + 0x8e, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x52, 0x52, + 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, + 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x5f, 0x48, 0x45, 0x49, 0x47, 0x48, 0x54, 0x5f, 0x49, 0x4e, 0x44, 0x45, 0x58, 0x5f, 0x49, 0x4e, + 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4e, + 0x4f, 0x54, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x04, + 0x32, 0xd2, 0x11, 0x0a, 0x02, 0x56, 0x4d, 0x12, 0x3b, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, + 0x6d, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x53, + 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x20, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, - 0x63, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x39, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x14, - 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, - 0x44, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, - 0x6d, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3b, 0x0a, - 0x0a, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, - 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x50, 0x61, - 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, - 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x12, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, - 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, - 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, - 0x18, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, - 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x12, 0x34, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x76, 0x6d, 0x2e, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x37, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x11, 0x2e, - 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x10, 0x41, 0x70, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, - 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, - 0x65, 0x64, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x39, 0x0a, - 0x0b, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x2e, 0x76, - 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, + 0x79, 0x1a, 0x1a, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x61, 0x6e, + 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, + 0x09, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x14, 0x2e, 0x76, 0x6d, 0x2e, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, - 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x10, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x47, 0x6f, - 0x73, 0x73, 0x69, 0x70, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, - 0x34, 0x0a, 0x06, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x14, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, - 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x2e, - 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x12, 0x57, 0x0a, 0x1a, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, - 0x12, 0x21, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x44, 0x69, + 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3b, 0x0a, 0x0a, 0x42, 0x75, 0x69, + 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, + 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, 0x6d, + 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, + 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x53, 0x65, + 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x18, 0x2e, 0x76, 0x6d, + 0x2e, 0x53, 0x65, 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, + 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x41, + 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x11, 0x2e, 0x76, 0x6d, 0x2e, 0x41, + 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x10, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, + 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0b, 0x41, 0x70, 0x70, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, + 0x70, 0x12, 0x10, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4d, 0x0a, 0x15, 0x43, - 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, - 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0c, 0x47, 0x65, - 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, - 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, - 0x11, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, - 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, - 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x4a, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, - 0x6e, 0x64, 0x65, 0x78, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x76, - 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, - 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x12, 0x47, - 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, - 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, - 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1e, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, - 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x48, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1c, 0x2e, 0x76, - 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x1a, 0x47, 0x65, - 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x26, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, - 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4c, - 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x47, + 0x61, 0x74, 0x68, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, + 0x76, 0x6d, 0x2e, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x4b, 0x0a, 0x14, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, + 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x6d, 0x2e, 0x43, + 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x57, + 0x0a, 0x1a, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x21, 0x2e, 0x76, + 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1f, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, - 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x50, 0x61, 0x72, 0x73, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1c, 0x2e, - 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, - 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, - 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0f, 0x47, 0x65, - 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1a, 0x2e, - 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x6d, 0x2e, 0x47, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4d, 0x0a, 0x15, 0x43, 0x72, 0x6f, 0x73, 0x73, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, + 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x18, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x42, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1c, + 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, + 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x11, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x1d, 0x2e, + 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, + 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x10, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, + 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, + 0x6d, 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x76, + 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x1f, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x50, + 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, + 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1a, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, - 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, - 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, - 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, - 0x65, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x12, 0x53, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, - 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, - 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6d, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, - 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x76, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x12, 0x53, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, + 0x62, 0x2f, 0x76, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3482,7 +3500,7 @@ func file_vm_vm_proto_rawDescGZIP() []byte { } var file_vm_vm_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_vm_vm_proto_msgTypes = make([]protoimpl.MessageInfo, 46) +var file_vm_vm_proto_msgTypes = make([]protoimpl.MessageInfo, 45) var file_vm_vm_proto_goTypes = []interface{}{ (State)(0), // 0: vm.State (Status)(0), // 1: vm.Status @@ -3493,150 +3511,146 @@ var file_vm_vm_proto_goTypes = []interface{}{ (*SetStateRequest)(nil), // 6: vm.SetStateRequest (*SetStateResponse)(nil), // 7: vm.SetStateResponse (*CreateHandlersResponse)(nil), // 8: vm.CreateHandlersResponse - (*CreateStaticHandlersResponse)(nil), // 9: vm.CreateStaticHandlersResponse - (*Handler)(nil), // 10: vm.Handler - (*BuildBlockRequest)(nil), // 11: vm.BuildBlockRequest - (*BuildBlockResponse)(nil), // 12: vm.BuildBlockResponse - (*ParseBlockRequest)(nil), // 13: vm.ParseBlockRequest - (*ParseBlockResponse)(nil), // 14: vm.ParseBlockResponse - (*GetBlockRequest)(nil), // 15: vm.GetBlockRequest - (*GetBlockResponse)(nil), // 16: vm.GetBlockResponse - (*SetPreferenceRequest)(nil), // 17: vm.SetPreferenceRequest - (*BlockVerifyRequest)(nil), // 18: vm.BlockVerifyRequest - (*BlockVerifyResponse)(nil), // 19: vm.BlockVerifyResponse - (*BlockAcceptRequest)(nil), // 20: vm.BlockAcceptRequest - (*BlockRejectRequest)(nil), // 21: vm.BlockRejectRequest - (*HealthResponse)(nil), // 22: vm.HealthResponse - (*VersionResponse)(nil), // 23: vm.VersionResponse - (*AppRequestMsg)(nil), // 24: vm.AppRequestMsg - (*AppRequestFailedMsg)(nil), // 25: vm.AppRequestFailedMsg - (*AppResponseMsg)(nil), // 26: vm.AppResponseMsg - (*AppGossipMsg)(nil), // 27: vm.AppGossipMsg - (*CrossChainAppRequestMsg)(nil), // 28: vm.CrossChainAppRequestMsg - (*CrossChainAppRequestFailedMsg)(nil), // 29: vm.CrossChainAppRequestFailedMsg - (*CrossChainAppResponseMsg)(nil), // 30: vm.CrossChainAppResponseMsg - (*ConnectedRequest)(nil), // 31: vm.ConnectedRequest - (*DisconnectedRequest)(nil), // 32: vm.DisconnectedRequest - (*GetAncestorsRequest)(nil), // 33: vm.GetAncestorsRequest - (*GetAncestorsResponse)(nil), // 34: vm.GetAncestorsResponse - (*BatchedParseBlockRequest)(nil), // 35: vm.BatchedParseBlockRequest - (*BatchedParseBlockResponse)(nil), // 36: vm.BatchedParseBlockResponse - (*VerifyHeightIndexResponse)(nil), // 37: vm.VerifyHeightIndexResponse - (*GetBlockIDAtHeightRequest)(nil), // 38: vm.GetBlockIDAtHeightRequest - (*GetBlockIDAtHeightResponse)(nil), // 39: vm.GetBlockIDAtHeightResponse - (*GatherResponse)(nil), // 40: vm.GatherResponse - (*StateSyncEnabledResponse)(nil), // 41: vm.StateSyncEnabledResponse - (*GetOngoingSyncStateSummaryResponse)(nil), // 42: vm.GetOngoingSyncStateSummaryResponse - (*GetLastStateSummaryResponse)(nil), // 43: vm.GetLastStateSummaryResponse - (*ParseStateSummaryRequest)(nil), // 44: vm.ParseStateSummaryRequest - (*ParseStateSummaryResponse)(nil), // 45: vm.ParseStateSummaryResponse - (*GetStateSummaryRequest)(nil), // 46: vm.GetStateSummaryRequest - (*GetStateSummaryResponse)(nil), // 47: vm.GetStateSummaryResponse - (*StateSummaryAcceptRequest)(nil), // 48: vm.StateSummaryAcceptRequest - (*StateSummaryAcceptResponse)(nil), // 49: vm.StateSummaryAcceptResponse - (*timestamppb.Timestamp)(nil), // 50: google.protobuf.Timestamp - (*_go.MetricFamily)(nil), // 51: io.prometheus.client.MetricFamily - (*emptypb.Empty)(nil), // 52: google.protobuf.Empty + (*Handler)(nil), // 9: vm.Handler + (*BuildBlockRequest)(nil), // 10: vm.BuildBlockRequest + (*BuildBlockResponse)(nil), // 11: vm.BuildBlockResponse + (*ParseBlockRequest)(nil), // 12: vm.ParseBlockRequest + (*ParseBlockResponse)(nil), // 13: vm.ParseBlockResponse + (*GetBlockRequest)(nil), // 14: vm.GetBlockRequest + (*GetBlockResponse)(nil), // 15: vm.GetBlockResponse + (*SetPreferenceRequest)(nil), // 16: vm.SetPreferenceRequest + (*BlockVerifyRequest)(nil), // 17: vm.BlockVerifyRequest + (*BlockVerifyResponse)(nil), // 18: vm.BlockVerifyResponse + (*BlockAcceptRequest)(nil), // 19: vm.BlockAcceptRequest + (*BlockRejectRequest)(nil), // 20: vm.BlockRejectRequest + (*HealthResponse)(nil), // 21: vm.HealthResponse + (*VersionResponse)(nil), // 22: vm.VersionResponse + (*AppRequestMsg)(nil), // 23: vm.AppRequestMsg + (*AppRequestFailedMsg)(nil), // 24: vm.AppRequestFailedMsg + (*AppResponseMsg)(nil), // 25: vm.AppResponseMsg + (*AppGossipMsg)(nil), // 26: vm.AppGossipMsg + (*CrossChainAppRequestMsg)(nil), // 27: vm.CrossChainAppRequestMsg + (*CrossChainAppRequestFailedMsg)(nil), // 28: vm.CrossChainAppRequestFailedMsg + (*CrossChainAppResponseMsg)(nil), // 29: vm.CrossChainAppResponseMsg + (*ConnectedRequest)(nil), // 30: vm.ConnectedRequest + (*DisconnectedRequest)(nil), // 31: vm.DisconnectedRequest + (*GetAncestorsRequest)(nil), // 32: vm.GetAncestorsRequest + (*GetAncestorsResponse)(nil), // 33: vm.GetAncestorsResponse + (*BatchedParseBlockRequest)(nil), // 34: vm.BatchedParseBlockRequest + (*BatchedParseBlockResponse)(nil), // 35: vm.BatchedParseBlockResponse + (*VerifyHeightIndexResponse)(nil), // 36: vm.VerifyHeightIndexResponse + (*GetBlockIDAtHeightRequest)(nil), // 37: vm.GetBlockIDAtHeightRequest + (*GetBlockIDAtHeightResponse)(nil), // 38: vm.GetBlockIDAtHeightResponse + (*GatherResponse)(nil), // 39: vm.GatherResponse + (*StateSyncEnabledResponse)(nil), // 40: vm.StateSyncEnabledResponse + (*GetOngoingSyncStateSummaryResponse)(nil), // 41: vm.GetOngoingSyncStateSummaryResponse + (*GetLastStateSummaryResponse)(nil), // 42: vm.GetLastStateSummaryResponse + (*ParseStateSummaryRequest)(nil), // 43: vm.ParseStateSummaryRequest + (*ParseStateSummaryResponse)(nil), // 44: vm.ParseStateSummaryResponse + (*GetStateSummaryRequest)(nil), // 45: vm.GetStateSummaryRequest + (*GetStateSummaryResponse)(nil), // 46: vm.GetStateSummaryResponse + (*StateSummaryAcceptRequest)(nil), // 47: vm.StateSummaryAcceptRequest + (*StateSummaryAcceptResponse)(nil), // 48: vm.StateSummaryAcceptResponse + (*timestamppb.Timestamp)(nil), // 49: google.protobuf.Timestamp + (*_go.MetricFamily)(nil), // 50: io.prometheus.client.MetricFamily + (*emptypb.Empty)(nil), // 51: google.protobuf.Empty } var file_vm_vm_proto_depIdxs = []int32{ - 50, // 0: vm.InitializeResponse.timestamp:type_name -> google.protobuf.Timestamp + 49, // 0: vm.InitializeResponse.timestamp:type_name -> google.protobuf.Timestamp 0, // 1: vm.SetStateRequest.state:type_name -> vm.State - 50, // 2: vm.SetStateResponse.timestamp:type_name -> google.protobuf.Timestamp - 10, // 3: vm.CreateHandlersResponse.handlers:type_name -> vm.Handler - 10, // 4: vm.CreateStaticHandlersResponse.handlers:type_name -> vm.Handler - 50, // 5: vm.BuildBlockResponse.timestamp:type_name -> google.protobuf.Timestamp - 1, // 6: vm.ParseBlockResponse.status:type_name -> vm.Status - 50, // 7: vm.ParseBlockResponse.timestamp:type_name -> google.protobuf.Timestamp - 1, // 8: vm.GetBlockResponse.status:type_name -> vm.Status - 50, // 9: vm.GetBlockResponse.timestamp:type_name -> google.protobuf.Timestamp - 2, // 10: vm.GetBlockResponse.err:type_name -> vm.Error - 50, // 11: vm.BlockVerifyResponse.timestamp:type_name -> google.protobuf.Timestamp - 50, // 12: vm.AppRequestMsg.deadline:type_name -> google.protobuf.Timestamp - 50, // 13: vm.CrossChainAppRequestMsg.deadline:type_name -> google.protobuf.Timestamp - 14, // 14: vm.BatchedParseBlockResponse.response:type_name -> vm.ParseBlockResponse - 2, // 15: vm.VerifyHeightIndexResponse.err:type_name -> vm.Error - 2, // 16: vm.GetBlockIDAtHeightResponse.err:type_name -> vm.Error - 51, // 17: vm.GatherResponse.metric_families:type_name -> io.prometheus.client.MetricFamily - 2, // 18: vm.StateSyncEnabledResponse.err:type_name -> vm.Error - 2, // 19: vm.GetOngoingSyncStateSummaryResponse.err:type_name -> vm.Error - 2, // 20: vm.GetLastStateSummaryResponse.err:type_name -> vm.Error - 2, // 21: vm.ParseStateSummaryResponse.err:type_name -> vm.Error - 2, // 22: vm.GetStateSummaryResponse.err:type_name -> vm.Error - 3, // 23: vm.StateSummaryAcceptResponse.mode:type_name -> vm.StateSummaryAcceptResponse.Mode - 2, // 24: vm.StateSummaryAcceptResponse.err:type_name -> vm.Error - 4, // 25: vm.VM.Initialize:input_type -> vm.InitializeRequest - 6, // 26: vm.VM.SetState:input_type -> vm.SetStateRequest - 52, // 27: vm.VM.Shutdown:input_type -> google.protobuf.Empty - 52, // 28: vm.VM.CreateHandlers:input_type -> google.protobuf.Empty - 52, // 29: vm.VM.CreateStaticHandlers:input_type -> google.protobuf.Empty - 31, // 30: vm.VM.Connected:input_type -> vm.ConnectedRequest - 32, // 31: vm.VM.Disconnected:input_type -> vm.DisconnectedRequest - 11, // 32: vm.VM.BuildBlock:input_type -> vm.BuildBlockRequest - 13, // 33: vm.VM.ParseBlock:input_type -> vm.ParseBlockRequest - 15, // 34: vm.VM.GetBlock:input_type -> vm.GetBlockRequest - 17, // 35: vm.VM.SetPreference:input_type -> vm.SetPreferenceRequest - 52, // 36: vm.VM.Health:input_type -> google.protobuf.Empty - 52, // 37: vm.VM.Version:input_type -> google.protobuf.Empty - 24, // 38: vm.VM.AppRequest:input_type -> vm.AppRequestMsg - 25, // 39: vm.VM.AppRequestFailed:input_type -> vm.AppRequestFailedMsg - 26, // 40: vm.VM.AppResponse:input_type -> vm.AppResponseMsg - 27, // 41: vm.VM.AppGossip:input_type -> vm.AppGossipMsg - 52, // 42: vm.VM.Gather:input_type -> google.protobuf.Empty - 28, // 43: vm.VM.CrossChainAppRequest:input_type -> vm.CrossChainAppRequestMsg - 29, // 44: vm.VM.CrossChainAppRequestFailed:input_type -> vm.CrossChainAppRequestFailedMsg - 30, // 45: vm.VM.CrossChainAppResponse:input_type -> vm.CrossChainAppResponseMsg - 33, // 46: vm.VM.GetAncestors:input_type -> vm.GetAncestorsRequest - 35, // 47: vm.VM.BatchedParseBlock:input_type -> vm.BatchedParseBlockRequest - 52, // 48: vm.VM.VerifyHeightIndex:input_type -> google.protobuf.Empty - 38, // 49: vm.VM.GetBlockIDAtHeight:input_type -> vm.GetBlockIDAtHeightRequest - 52, // 50: vm.VM.StateSyncEnabled:input_type -> google.protobuf.Empty - 52, // 51: vm.VM.GetOngoingSyncStateSummary:input_type -> google.protobuf.Empty - 52, // 52: vm.VM.GetLastStateSummary:input_type -> google.protobuf.Empty - 44, // 53: vm.VM.ParseStateSummary:input_type -> vm.ParseStateSummaryRequest - 46, // 54: vm.VM.GetStateSummary:input_type -> vm.GetStateSummaryRequest - 18, // 55: vm.VM.BlockVerify:input_type -> vm.BlockVerifyRequest - 20, // 56: vm.VM.BlockAccept:input_type -> vm.BlockAcceptRequest - 21, // 57: vm.VM.BlockReject:input_type -> vm.BlockRejectRequest - 48, // 58: vm.VM.StateSummaryAccept:input_type -> vm.StateSummaryAcceptRequest - 5, // 59: vm.VM.Initialize:output_type -> vm.InitializeResponse - 7, // 60: vm.VM.SetState:output_type -> vm.SetStateResponse - 52, // 61: vm.VM.Shutdown:output_type -> google.protobuf.Empty - 8, // 62: vm.VM.CreateHandlers:output_type -> vm.CreateHandlersResponse - 9, // 63: vm.VM.CreateStaticHandlers:output_type -> vm.CreateStaticHandlersResponse - 52, // 64: vm.VM.Connected:output_type -> google.protobuf.Empty - 52, // 65: vm.VM.Disconnected:output_type -> google.protobuf.Empty - 12, // 66: vm.VM.BuildBlock:output_type -> vm.BuildBlockResponse - 14, // 67: vm.VM.ParseBlock:output_type -> vm.ParseBlockResponse - 16, // 68: vm.VM.GetBlock:output_type -> vm.GetBlockResponse - 52, // 69: vm.VM.SetPreference:output_type -> google.protobuf.Empty - 22, // 70: vm.VM.Health:output_type -> vm.HealthResponse - 23, // 71: vm.VM.Version:output_type -> vm.VersionResponse - 52, // 72: vm.VM.AppRequest:output_type -> google.protobuf.Empty - 52, // 73: vm.VM.AppRequestFailed:output_type -> google.protobuf.Empty - 52, // 74: vm.VM.AppResponse:output_type -> google.protobuf.Empty - 52, // 75: vm.VM.AppGossip:output_type -> google.protobuf.Empty - 40, // 76: vm.VM.Gather:output_type -> vm.GatherResponse - 52, // 77: vm.VM.CrossChainAppRequest:output_type -> google.protobuf.Empty - 52, // 78: vm.VM.CrossChainAppRequestFailed:output_type -> google.protobuf.Empty - 52, // 79: vm.VM.CrossChainAppResponse:output_type -> google.protobuf.Empty - 34, // 80: vm.VM.GetAncestors:output_type -> vm.GetAncestorsResponse - 36, // 81: vm.VM.BatchedParseBlock:output_type -> vm.BatchedParseBlockResponse - 37, // 82: vm.VM.VerifyHeightIndex:output_type -> vm.VerifyHeightIndexResponse - 39, // 83: vm.VM.GetBlockIDAtHeight:output_type -> vm.GetBlockIDAtHeightResponse - 41, // 84: vm.VM.StateSyncEnabled:output_type -> vm.StateSyncEnabledResponse - 42, // 85: vm.VM.GetOngoingSyncStateSummary:output_type -> vm.GetOngoingSyncStateSummaryResponse - 43, // 86: vm.VM.GetLastStateSummary:output_type -> vm.GetLastStateSummaryResponse - 45, // 87: vm.VM.ParseStateSummary:output_type -> vm.ParseStateSummaryResponse - 47, // 88: vm.VM.GetStateSummary:output_type -> vm.GetStateSummaryResponse - 19, // 89: vm.VM.BlockVerify:output_type -> vm.BlockVerifyResponse - 52, // 90: vm.VM.BlockAccept:output_type -> google.protobuf.Empty - 52, // 91: vm.VM.BlockReject:output_type -> google.protobuf.Empty - 49, // 92: vm.VM.StateSummaryAccept:output_type -> vm.StateSummaryAcceptResponse - 59, // [59:93] is the sub-list for method output_type - 25, // [25:59] is the sub-list for method input_type - 25, // [25:25] is the sub-list for extension type_name - 25, // [25:25] is the sub-list for extension extendee - 0, // [0:25] is the sub-list for field type_name + 49, // 2: vm.SetStateResponse.timestamp:type_name -> google.protobuf.Timestamp + 9, // 3: vm.CreateHandlersResponse.handlers:type_name -> vm.Handler + 49, // 4: vm.BuildBlockResponse.timestamp:type_name -> google.protobuf.Timestamp + 1, // 5: vm.ParseBlockResponse.status:type_name -> vm.Status + 49, // 6: vm.ParseBlockResponse.timestamp:type_name -> google.protobuf.Timestamp + 1, // 7: vm.GetBlockResponse.status:type_name -> vm.Status + 49, // 8: vm.GetBlockResponse.timestamp:type_name -> google.protobuf.Timestamp + 2, // 9: vm.GetBlockResponse.err:type_name -> vm.Error + 49, // 10: vm.BlockVerifyResponse.timestamp:type_name -> google.protobuf.Timestamp + 49, // 11: vm.AppRequestMsg.deadline:type_name -> google.protobuf.Timestamp + 49, // 12: vm.CrossChainAppRequestMsg.deadline:type_name -> google.protobuf.Timestamp + 13, // 13: vm.BatchedParseBlockResponse.response:type_name -> vm.ParseBlockResponse + 2, // 14: vm.VerifyHeightIndexResponse.err:type_name -> vm.Error + 2, // 15: vm.GetBlockIDAtHeightResponse.err:type_name -> vm.Error + 50, // 16: vm.GatherResponse.metric_families:type_name -> io.prometheus.client.MetricFamily + 2, // 17: vm.StateSyncEnabledResponse.err:type_name -> vm.Error + 2, // 18: vm.GetOngoingSyncStateSummaryResponse.err:type_name -> vm.Error + 2, // 19: vm.GetLastStateSummaryResponse.err:type_name -> vm.Error + 2, // 20: vm.ParseStateSummaryResponse.err:type_name -> vm.Error + 2, // 21: vm.GetStateSummaryResponse.err:type_name -> vm.Error + 3, // 22: vm.StateSummaryAcceptResponse.mode:type_name -> vm.StateSummaryAcceptResponse.Mode + 2, // 23: vm.StateSummaryAcceptResponse.err:type_name -> vm.Error + 4, // 24: vm.VM.Initialize:input_type -> vm.InitializeRequest + 6, // 25: vm.VM.SetState:input_type -> vm.SetStateRequest + 51, // 26: vm.VM.Shutdown:input_type -> google.protobuf.Empty + 51, // 27: vm.VM.CreateHandlers:input_type -> google.protobuf.Empty + 30, // 28: vm.VM.Connected:input_type -> vm.ConnectedRequest + 31, // 29: vm.VM.Disconnected:input_type -> vm.DisconnectedRequest + 10, // 30: vm.VM.BuildBlock:input_type -> vm.BuildBlockRequest + 12, // 31: vm.VM.ParseBlock:input_type -> vm.ParseBlockRequest + 14, // 32: vm.VM.GetBlock:input_type -> vm.GetBlockRequest + 16, // 33: vm.VM.SetPreference:input_type -> vm.SetPreferenceRequest + 51, // 34: vm.VM.Health:input_type -> google.protobuf.Empty + 51, // 35: vm.VM.Version:input_type -> google.protobuf.Empty + 23, // 36: vm.VM.AppRequest:input_type -> vm.AppRequestMsg + 24, // 37: vm.VM.AppRequestFailed:input_type -> vm.AppRequestFailedMsg + 25, // 38: vm.VM.AppResponse:input_type -> vm.AppResponseMsg + 26, // 39: vm.VM.AppGossip:input_type -> vm.AppGossipMsg + 51, // 40: vm.VM.Gather:input_type -> google.protobuf.Empty + 27, // 41: vm.VM.CrossChainAppRequest:input_type -> vm.CrossChainAppRequestMsg + 28, // 42: vm.VM.CrossChainAppRequestFailed:input_type -> vm.CrossChainAppRequestFailedMsg + 29, // 43: vm.VM.CrossChainAppResponse:input_type -> vm.CrossChainAppResponseMsg + 32, // 44: vm.VM.GetAncestors:input_type -> vm.GetAncestorsRequest + 34, // 45: vm.VM.BatchedParseBlock:input_type -> vm.BatchedParseBlockRequest + 51, // 46: vm.VM.VerifyHeightIndex:input_type -> google.protobuf.Empty + 37, // 47: vm.VM.GetBlockIDAtHeight:input_type -> vm.GetBlockIDAtHeightRequest + 51, // 48: vm.VM.StateSyncEnabled:input_type -> google.protobuf.Empty + 51, // 49: vm.VM.GetOngoingSyncStateSummary:input_type -> google.protobuf.Empty + 51, // 50: vm.VM.GetLastStateSummary:input_type -> google.protobuf.Empty + 43, // 51: vm.VM.ParseStateSummary:input_type -> vm.ParseStateSummaryRequest + 45, // 52: vm.VM.GetStateSummary:input_type -> vm.GetStateSummaryRequest + 17, // 53: vm.VM.BlockVerify:input_type -> vm.BlockVerifyRequest + 19, // 54: vm.VM.BlockAccept:input_type -> vm.BlockAcceptRequest + 20, // 55: vm.VM.BlockReject:input_type -> vm.BlockRejectRequest + 47, // 56: vm.VM.StateSummaryAccept:input_type -> vm.StateSummaryAcceptRequest + 5, // 57: vm.VM.Initialize:output_type -> vm.InitializeResponse + 7, // 58: vm.VM.SetState:output_type -> vm.SetStateResponse + 51, // 59: vm.VM.Shutdown:output_type -> google.protobuf.Empty + 8, // 60: vm.VM.CreateHandlers:output_type -> vm.CreateHandlersResponse + 51, // 61: vm.VM.Connected:output_type -> google.protobuf.Empty + 51, // 62: vm.VM.Disconnected:output_type -> google.protobuf.Empty + 11, // 63: vm.VM.BuildBlock:output_type -> vm.BuildBlockResponse + 13, // 64: vm.VM.ParseBlock:output_type -> vm.ParseBlockResponse + 15, // 65: vm.VM.GetBlock:output_type -> vm.GetBlockResponse + 51, // 66: vm.VM.SetPreference:output_type -> google.protobuf.Empty + 21, // 67: vm.VM.Health:output_type -> vm.HealthResponse + 22, // 68: vm.VM.Version:output_type -> vm.VersionResponse + 51, // 69: vm.VM.AppRequest:output_type -> google.protobuf.Empty + 51, // 70: vm.VM.AppRequestFailed:output_type -> google.protobuf.Empty + 51, // 71: vm.VM.AppResponse:output_type -> google.protobuf.Empty + 51, // 72: vm.VM.AppGossip:output_type -> google.protobuf.Empty + 39, // 73: vm.VM.Gather:output_type -> vm.GatherResponse + 51, // 74: vm.VM.CrossChainAppRequest:output_type -> google.protobuf.Empty + 51, // 75: vm.VM.CrossChainAppRequestFailed:output_type -> google.protobuf.Empty + 51, // 76: vm.VM.CrossChainAppResponse:output_type -> google.protobuf.Empty + 33, // 77: vm.VM.GetAncestors:output_type -> vm.GetAncestorsResponse + 35, // 78: vm.VM.BatchedParseBlock:output_type -> vm.BatchedParseBlockResponse + 36, // 79: vm.VM.VerifyHeightIndex:output_type -> vm.VerifyHeightIndexResponse + 38, // 80: vm.VM.GetBlockIDAtHeight:output_type -> vm.GetBlockIDAtHeightResponse + 40, // 81: vm.VM.StateSyncEnabled:output_type -> vm.StateSyncEnabledResponse + 41, // 82: vm.VM.GetOngoingSyncStateSummary:output_type -> vm.GetOngoingSyncStateSummaryResponse + 42, // 83: vm.VM.GetLastStateSummary:output_type -> vm.GetLastStateSummaryResponse + 44, // 84: vm.VM.ParseStateSummary:output_type -> vm.ParseStateSummaryResponse + 46, // 85: vm.VM.GetStateSummary:output_type -> vm.GetStateSummaryResponse + 18, // 86: vm.VM.BlockVerify:output_type -> vm.BlockVerifyResponse + 51, // 87: vm.VM.BlockAccept:output_type -> google.protobuf.Empty + 51, // 88: vm.VM.BlockReject:output_type -> google.protobuf.Empty + 48, // 89: vm.VM.StateSummaryAccept:output_type -> vm.StateSummaryAcceptResponse + 57, // [57:90] is the sub-list for method output_type + 24, // [24:57] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name } func init() { file_vm_vm_proto_init() } @@ -3706,18 +3720,6 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateStaticHandlersResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_vm_vm_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Handler); i { case 0: return &v.state @@ -3729,7 +3731,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BuildBlockRequest); i { case 0: return &v.state @@ -3741,7 +3743,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BuildBlockResponse); i { case 0: return &v.state @@ -3753,7 +3755,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ParseBlockRequest); i { case 0: return &v.state @@ -3765,7 +3767,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ParseBlockResponse); i { case 0: return &v.state @@ -3777,7 +3779,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBlockRequest); i { case 0: return &v.state @@ -3789,7 +3791,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBlockResponse); i { case 0: return &v.state @@ -3801,7 +3803,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetPreferenceRequest); i { case 0: return &v.state @@ -3813,7 +3815,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlockVerifyRequest); i { case 0: return &v.state @@ -3825,7 +3827,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlockVerifyResponse); i { case 0: return &v.state @@ -3837,7 +3839,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlockAcceptRequest); i { case 0: return &v.state @@ -3849,7 +3851,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlockRejectRequest); i { case 0: return &v.state @@ -3861,7 +3863,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HealthResponse); i { case 0: return &v.state @@ -3873,7 +3875,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VersionResponse); i { case 0: return &v.state @@ -3885,7 +3887,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppRequestMsg); i { case 0: return &v.state @@ -3897,7 +3899,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppRequestFailedMsg); i { case 0: return &v.state @@ -3909,7 +3911,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppResponseMsg); i { case 0: return &v.state @@ -3921,7 +3923,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppGossipMsg); i { case 0: return &v.state @@ -3933,7 +3935,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CrossChainAppRequestMsg); i { case 0: return &v.state @@ -3945,7 +3947,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CrossChainAppRequestFailedMsg); i { case 0: return &v.state @@ -3957,7 +3959,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CrossChainAppResponseMsg); i { case 0: return &v.state @@ -3969,7 +3971,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ConnectedRequest); i { case 0: return &v.state @@ -3981,7 +3983,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DisconnectedRequest); i { case 0: return &v.state @@ -3993,7 +3995,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetAncestorsRequest); i { case 0: return &v.state @@ -4005,7 +4007,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetAncestorsResponse); i { case 0: return &v.state @@ -4017,7 +4019,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BatchedParseBlockRequest); i { case 0: return &v.state @@ -4029,7 +4031,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BatchedParseBlockResponse); i { case 0: return &v.state @@ -4041,7 +4043,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VerifyHeightIndexResponse); i { case 0: return &v.state @@ -4053,7 +4055,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBlockIDAtHeightRequest); i { case 0: return &v.state @@ -4065,7 +4067,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBlockIDAtHeightResponse); i { case 0: return &v.state @@ -4077,7 +4079,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GatherResponse); i { case 0: return &v.state @@ -4089,7 +4091,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateSyncEnabledResponse); i { case 0: return &v.state @@ -4101,7 +4103,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetOngoingSyncStateSummaryResponse); i { case 0: return &v.state @@ -4113,7 +4115,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetLastStateSummaryResponse); i { case 0: return &v.state @@ -4125,7 +4127,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ParseStateSummaryRequest); i { case 0: return &v.state @@ -4137,7 +4139,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ParseStateSummaryResponse); i { case 0: return &v.state @@ -4149,7 +4151,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetStateSummaryRequest); i { case 0: return &v.state @@ -4161,7 +4163,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetStateSummaryResponse); i { case 0: return &v.state @@ -4173,7 +4175,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateSummaryAcceptRequest); i { case 0: return &v.state @@ -4185,7 +4187,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateSummaryAcceptResponse); i { case 0: return &v.state @@ -4198,15 +4200,15 @@ func file_vm_vm_proto_init() { } } } - file_vm_vm_proto_msgTypes[7].OneofWrappers = []interface{}{} - file_vm_vm_proto_msgTypes[14].OneofWrappers = []interface{}{} + file_vm_vm_proto_msgTypes[6].OneofWrappers = []interface{}{} + file_vm_vm_proto_msgTypes[13].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_vm_vm_proto_rawDesc, NumEnums: 4, - NumMessages: 46, + NumMessages: 45, NumExtensions: 0, NumServices: 1, }, diff --git a/proto/pb/vm/vm_grpc.pb.go b/proto/pb/vm/vm_grpc.pb.go index 5250af11f86f..6d7bb17f6c33 100644 --- a/proto/pb/vm/vm_grpc.pb.go +++ b/proto/pb/vm/vm_grpc.pb.go @@ -24,7 +24,6 @@ const ( VM_SetState_FullMethodName = "/vm.VM/SetState" VM_Shutdown_FullMethodName = "/vm.VM/Shutdown" VM_CreateHandlers_FullMethodName = "/vm.VM/CreateHandlers" - VM_CreateStaticHandlers_FullMethodName = "/vm.VM/CreateStaticHandlers" VM_Connected_FullMethodName = "/vm.VM/Connected" VM_Disconnected_FullMethodName = "/vm.VM/Disconnected" VM_BuildBlock_FullMethodName = "/vm.VM/BuildBlock" @@ -70,13 +69,6 @@ type VMClient interface { Shutdown(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) // Creates the HTTP handlers for custom chain network calls. CreateHandlers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*CreateHandlersResponse, error) - // Creates the HTTP handlers for custom VM network calls. - // - // Note: RPC Chain VM Factory will start a new instance of the VM in a - // seperate process which will populate the static handlers. After this - // process is created other processes will be created to populate blockchains, - // but they will not have the static handlers be called again. - CreateStaticHandlers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*CreateStaticHandlersResponse, error) Connected(ctx context.Context, in *ConnectedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) Disconnected(ctx context.Context, in *DisconnectedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Attempt to create a new block from data contained in the VM. @@ -177,15 +169,6 @@ func (c *vMClient) CreateHandlers(ctx context.Context, in *emptypb.Empty, opts . return out, nil } -func (c *vMClient) CreateStaticHandlers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*CreateStaticHandlersResponse, error) { - out := new(CreateStaticHandlersResponse) - err := c.cc.Invoke(ctx, VM_CreateStaticHandlers_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *vMClient) Connected(ctx context.Context, in *ConnectedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) err := c.cc.Invoke(ctx, VM_Connected_FullMethodName, in, out, opts...) @@ -461,13 +444,6 @@ type VMServer interface { Shutdown(context.Context, *emptypb.Empty) (*emptypb.Empty, error) // Creates the HTTP handlers for custom chain network calls. CreateHandlers(context.Context, *emptypb.Empty) (*CreateHandlersResponse, error) - // Creates the HTTP handlers for custom VM network calls. - // - // Note: RPC Chain VM Factory will start a new instance of the VM in a - // seperate process which will populate the static handlers. After this - // process is created other processes will be created to populate blockchains, - // but they will not have the static handlers be called again. - CreateStaticHandlers(context.Context, *emptypb.Empty) (*CreateStaticHandlersResponse, error) Connected(context.Context, *ConnectedRequest) (*emptypb.Empty, error) Disconnected(context.Context, *DisconnectedRequest) (*emptypb.Empty, error) // Attempt to create a new block from data contained in the VM. @@ -541,9 +517,6 @@ func (UnimplementedVMServer) Shutdown(context.Context, *emptypb.Empty) (*emptypb func (UnimplementedVMServer) CreateHandlers(context.Context, *emptypb.Empty) (*CreateHandlersResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateHandlers not implemented") } -func (UnimplementedVMServer) CreateStaticHandlers(context.Context, *emptypb.Empty) (*CreateStaticHandlersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateStaticHandlers not implemented") -} func (UnimplementedVMServer) Connected(context.Context, *ConnectedRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Connected not implemented") } @@ -716,24 +689,6 @@ func _VM_CreateHandlers_Handler(srv interface{}, ctx context.Context, dec func(i return interceptor(ctx, in, info, handler) } -func _VM_CreateStaticHandlers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(VMServer).CreateStaticHandlers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: VM_CreateStaticHandlers_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VMServer).CreateStaticHandlers(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - func _VM_Connected_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ConnectedRequest) if err := dec(in); err != nil { @@ -1279,10 +1234,6 @@ var VM_ServiceDesc = grpc.ServiceDesc{ MethodName: "CreateHandlers", Handler: _VM_CreateHandlers_Handler, }, - { - MethodName: "CreateStaticHandlers", - Handler: _VM_CreateStaticHandlers_Handler, - }, { MethodName: "Connected", Handler: _VM_Connected_Handler, diff --git a/proto/sdk/sdk.proto b/proto/sdk/sdk.proto index 20bfca081856..f42912391fe7 100644 --- a/proto/sdk/sdk.proto +++ b/proto/sdk/sdk.proto @@ -5,10 +5,16 @@ package sdk; option go_package = "github.com/ava-labs/avalanchego/proto/pb/sdk"; message PullGossipRequest { - bytes filter = 1; + // TODO: Remove reservation after v1.11.x activates. + reserved 1; bytes salt = 2; + bytes filter = 3; } message PullGossipResponse { repeated bytes gossip = 1; } + +message PushGossip { + repeated bytes gossip = 1; +} diff --git a/proto/sync/sync.proto b/proto/sync/sync.proto index 4c4c6f434722..1a799433d7e7 100644 --- a/proto/sync/sync.proto +++ b/proto/sync/sync.proto @@ -21,6 +21,8 @@ message Request { service DB { rpc GetMerkleRoot(google.protobuf.Empty) returns (GetMerkleRootResponse); + rpc Clear(google.protobuf.Empty) returns (google.protobuf.Empty); + rpc GetProof(GetProofRequest) returns (GetProofResponse); rpc GetChangeProof(GetChangeProofRequest) returns (GetChangeProofResponse); diff --git a/proto/vm/vm.proto b/proto/vm/vm.proto index 0eca74b46041..4a0557ba4e67 100644 --- a/proto/vm/vm.proto +++ b/proto/vm/vm.proto @@ -21,13 +21,6 @@ service VM { rpc Shutdown(google.protobuf.Empty) returns (google.protobuf.Empty); // Creates the HTTP handlers for custom chain network calls. rpc CreateHandlers(google.protobuf.Empty) returns (CreateHandlersResponse); - // Creates the HTTP handlers for custom VM network calls. - // - // Note: RPC Chain VM Factory will start a new instance of the VM in a - // seperate process which will populate the static handlers. After this - // process is created other processes will be created to populate blockchains, - // but they will not have the static handlers be called again. - rpc CreateStaticHandlers(google.protobuf.Empty) returns (CreateStaticHandlersResponse); rpc Connected(ConnectedRequest) returns (google.protobuf.Empty); rpc Disconnected(DisconnectedRequest) returns (google.protobuf.Empty); // Attempt to create a new block from data contained in the VM. @@ -158,10 +151,6 @@ message CreateHandlersResponse { repeated Handler handlers = 1; } -message CreateStaticHandlersResponse { - repeated Handler handlers = 1; -} - message Handler { string prefix = 1; // server_addr is the address of the gRPC server which serves the @@ -259,6 +248,10 @@ message AppRequestFailedMsg { bytes node_id = 1; // The ID of the request we sent and didn't get a response to uint32 request_id = 2; + // Application-defined error code + sint32 error_code = 3; + // Application-defined error message + string error_message = 4; } message AppResponseMsg { @@ -293,6 +286,10 @@ message CrossChainAppRequestFailedMsg { bytes chain_id = 1; // The ID of the request we sent and didn't get a response to uint32 request_id = 2; + // Application-defined error code + sint32 error_code = 3; + // Application-defined error message + string error_message = 4; } message CrossChainAppResponseMsg { @@ -306,7 +303,12 @@ message CrossChainAppResponseMsg { message ConnectedRequest { bytes node_id = 1; - string version = 2; + // Client name (e.g avalanchego) + string name = 2; + // Client semantic version + uint32 major = 3; + uint32 minor = 4; + uint32 patch = 5; } message DisconnectedRequest { diff --git a/pubsub/bloom/filter.go b/pubsub/bloom/filter.go new file mode 100644 index 000000000000..b0d023b51f19 --- /dev/null +++ b/pubsub/bloom/filter.go @@ -0,0 +1,51 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "errors" + + "github.com/ava-labs/avalanchego/utils/bloom" +) + +const bytesPerHash = 8 + +var ( + _ Filter = (*filter)(nil) + + errMaxBytes = errors.New("too large") +) + +type Filter interface { + // Add adds to filter, assumed thread safe + Add(...[]byte) + + // Check checks filter, assumed thread safe + Check([]byte) bool +} + +func New(maxN int, p float64, maxBytes int) (Filter, error) { + numHashes, numEntries := bloom.OptimalParameters(maxN, p) + if neededBytes := 1 + numHashes*bytesPerHash + numEntries; neededBytes > maxBytes { + return nil, errMaxBytes + } + f, err := bloom.New(numHashes, numEntries) + return &filter{ + filter: f, + }, err +} + +type filter struct { + filter *bloom.Filter +} + +func (f *filter) Add(bl ...[]byte) { + for _, b := range bl { + bloom.Add(f.filter, b, nil) + } +} + +func (f *filter) Check(b []byte) bool { + return bloom.Contains(f.filter, b, nil) +} diff --git a/utils/bloom/bloom_filter_test.go b/pubsub/bloom/filter_test.go similarity index 72% rename from utils/bloom/bloom_filter_test.go rename to pubsub/bloom/filter_test.go index 7e810add0f3e..3b2c4b71a59d 100644 --- a/utils/bloom/bloom_filter_test.go +++ b/pubsub/bloom/filter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bloom @@ -13,10 +13,10 @@ import ( func TestNew(t *testing.T) { var ( - require = require.New(t) - maxN uint64 = 10000 - p = 0.1 - maxBytes uint64 = 1 * units.MiB // 1 MiB + require = require.New(t) + maxN = 10000 + p = 0.1 + maxBytes = 1 * units.MiB // 1 MiB ) f, err := New(maxN, p, maxBytes) require.NoError(err) diff --git a/utils/bloom/map_filter.go b/pubsub/bloom/map_filter.go similarity index 88% rename from utils/bloom/map_filter.go rename to pubsub/bloom/map_filter.go index 19046bea4c11..d0edcbe88fd0 100644 --- a/utils/bloom/map_filter.go +++ b/pubsub/bloom/map_filter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bloom diff --git a/pubsub/connection.go b/pubsub/connection.go index 2dae38acd1e6..901a33a25da3 100644 --- a/pubsub/connection.go +++ b/pubsub/connection.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub @@ -14,7 +14,7 @@ import ( "go.uber.org/zap" - "github.com/ava-labs/avalanchego/utils/bloom" + "github.com/ava-labs/avalanchego/pubsub/bloom" ) var ( @@ -190,7 +190,7 @@ func (c *connection) handleNewBloom(cmd *NewBloom) error { if !cmd.IsParamsValid() { return ErrInvalidFilterParam } - filter, err := bloom.New(uint64(cmd.MaxElements), float64(cmd.CollisionProb), MaxBytes) + filter, err := bloom.New(int(cmd.MaxElements), float64(cmd.CollisionProb), MaxBytes) if err != nil { return fmt.Errorf("bloom filter creation failed %w", err) } diff --git a/pubsub/connections.go b/pubsub/connections.go index 417e1aa8f365..25d35ac8cd82 100644 --- a/pubsub/connections.go +++ b/pubsub/connections.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub diff --git a/pubsub/filter_param.go b/pubsub/filter_param.go index e7e2453c3e95..5fd80a2ad706 100644 --- a/pubsub/filter_param.go +++ b/pubsub/filter_param.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub @@ -6,7 +6,7 @@ package pubsub import ( "sync" - "github.com/ava-labs/avalanchego/utils/bloom" + "github.com/ava-labs/avalanchego/pubsub/bloom" "github.com/ava-labs/avalanchego/utils/set" ) diff --git a/pubsub/filter_test.go b/pubsub/filter_test.go index edc88794fa34..3b47a38e0237 100644 --- a/pubsub/filter_test.go +++ b/pubsub/filter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/bloom" + "github.com/ava-labs/avalanchego/pubsub/bloom" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/formatting/address" ) diff --git a/pubsub/filterer.go b/pubsub/filterer.go index 389448ea7af2..3ec2910a9c4c 100644 --- a/pubsub/filterer.go +++ b/pubsub/filterer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub diff --git a/pubsub/messages.go b/pubsub/messages.go index 525ae035f15a..ec41af813cdb 100644 --- a/pubsub/messages.go +++ b/pubsub/messages.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub diff --git a/pubsub/server.go b/pubsub/server.go index b7e4eaf74377..6cc8b649296c 100644 --- a/pubsub/server.go +++ b/pubsub/server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub diff --git a/scripts/build_camino.sh b/scripts/build_camino.sh index 46aab6ea679c..99bc63327f33 100755 --- a/scripts/build_camino.sh +++ b/scripts/build_camino.sh @@ -27,7 +27,7 @@ done # Dockerfile # README.md # go.mod -go_version_minimum="1.20.10" +go_version_minimum="1.20.12" go_version() { go version | sed -nE -e 's/[^0-9.]+([0-9.]+).+/\1/p' diff --git a/scripts/build_image.sh b/scripts/build_image.sh index ed9a288de67b..f87991adb3f9 100755 --- a/scripts/build_image.sh +++ b/scripts/build_image.sh @@ -1,4 +1,6 @@ #!/usr/bin/env bash +# shellcheck disable=all +# TODO: re-assess shellcheck disable=all after workflow cleanup set -o errexit set -o nounset diff --git a/scripts/build_testnetctl.sh b/scripts/build_tmpnetctl.sh similarity index 87% rename from scripts/build_testnetctl.sh rename to scripts/build_tmpnetctl.sh index 5cbdb13a61e7..974e83efc467 100755 --- a/scripts/build_testnetctl.sh +++ b/scripts/build_tmpnetctl.sh @@ -15,5 +15,5 @@ LDFLAGS="$LDFLAGS $static_ld_flags" echo "Building tmpnetctl..." go build -ldflags "$LDFLAGS"\ - -o "$CAMINOGO_PATH/build/testnetctl"\ - "$CAMINOGO_PATH/tests/fixture/testnet/cmd/"*.go + -o "$CAMINOGO_PATH/build/tmpnetctl"\ + "$CAMINOGO_PATH/tests/fixture/tmpnet/cmd/"*.go diff --git a/scripts/build_tools.sh b/scripts/build_tools.sh index 669850f79f3d..ab469c480c6e 100755 --- a/scripts/build_tools.sh +++ b/scripts/build_tools.sh @@ -20,14 +20,4 @@ tools_dir=$build_dir/tools/ mkdir -p "$tools_dir" echo "Building cert tool..." -go build -ldflags="-s -w" -o "$tools_dir/cert" "$CAMINOGO_PATH/tools/cert/"*.go - -echo "Building camino-network-runner tool..." -CAMINO_NETWORK_RUNNER_PATH="$CAMINOGO_PATH"/tools/camino-network-runner - -if [ ! -f "$CAMINO_NETWORK_RUNNER_PATH/.git" ]; then - echo "Initializing git submodules..." - git --git-dir "$CAMINOGO_PATH/.git" submodule update --init --recursive -fi - -"$CAMINO_NETWORK_RUNNER_PATH/scripts/build.sh" "$tools_dir" \ No newline at end of file +go build -ldflags="-s -w" -o "$tools_dir/cert" "$CAMINOGO_PATH/tools/cert/"*.go \ No newline at end of file diff --git a/scripts/camino_mocks.mockgen.txt b/scripts/camino_mocks.mockgen.txt deleted file mode 100644 index 287a9722c852..000000000000 --- a/scripts/camino_mocks.mockgen.txt +++ /dev/null @@ -1,22 +0,0 @@ -// TODO @evlekht - -// add this to 'scripts/mocks.mockgen.txt' when mockgen will -// be able to process this files correctly (some generics issues) - -github.com/ava-labs/avalanchego/cache=Cacher=cache/mock_cacher.go -github.com/ava-labs/avalanchego/vms/components/avax=AtomicUTXOManager=vms/components/avax/mock_atomic_utxos.go -github.com/ava-labs/avalanchego/vms/platformvm/state=Chain=vms/platformvm/state/mock_chain.go -github.com/ava-labs/avalanchego/vms/platformvm/state=Diff=vms/platformvm/state/mock_diff.go -github.com/ava-labs/avalanchego/vms/platformvm/state=State=vms/platformvm/state/mock_state.go - - -// avax also have their own mocks excluded from list, -// though there is no comment about not forgetting -// to add them back or why they were excluded: - -github.com/ava-labs/avalanchego/snow/networking/router=Router=snow/networking/router/mock_router.go -github.com/ava-labs/avalanchego/snow/networking/sender=ExternalSender=snow/networking/sender/mock_external_sender.go -github.com/ava-labs/avalanchego/snow/validators=Set=snow/validators/mock_set.go -github.com/ava-labs/avalanchego/snow/validators=Manager=snow/validators/mock_manager.go -github.com/ava-labs/avalanchego/vms/platformvm/txs=Staker=vms/platformvm/txs/mock_staker.go -github.com/ava-labs/avalanchego/vms/platformvm/txs=UnsignedTx=vms/platformvm/txs/mock_unsigned_tx.go \ No newline at end of file diff --git a/scripts/constants.sh b/scripts/constants.sh index 8429fdb0bf18..782b378c71e3 100755 --- a/scripts/constants.sh +++ b/scripts/constants.sh @@ -30,8 +30,8 @@ current_branch=${current_branch_temp////-} # caminogo and caminoethvm git tag and sha git_commit=${CAMINO_NODE_COMMIT:-$(git rev-parse --short HEAD)} git_tag=${CAMINO_NODE_TAG:-$(git describe --tags --abbrev=0 --always || echo unknown)} -caminoethvm_tag=${CAMINO_ETHVM_VERSION:-'v1.1.15-rc0'} -caminoethvm_commit=${CAMINOETHVM_COMMIT:-'ffc063541f3f645420b524fb367375adbf07ed7e'} +caminoethvm_tag=${CAMINO_ETHVM_VERSION:-'v1.1.16-rc0'} +caminoethvm_commit=${CAMINOETHVM_COMMIT:-'d258907cfd8a448ccb111e9e0f232980f85bb0da'} # Static compilation static_ld_flags='' @@ -50,3 +50,6 @@ export CGO_CFLAGS="-O2 -D__BLST_PORTABLE__" # While CGO_ENABLED doesn't need to be explicitly set, it produces a much more # clear error due to the default value change in go1.20. export CGO_ENABLED=1 + +# Disable version control fallbacks +export GOPROXY="https://proxy.golang.org" diff --git a/scripts/lint.sh b/scripts/lint.sh index fb487274c791..2785e5bc6da6 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -52,14 +52,14 @@ function test_license_header { } function test_single_import { - if grep -R -zo -P --exclude-dir='camino-network-runner' 'import \(\n\t".*"\n\)' .; then + if grep -R -zo -P 'import \(\n\t".*"\n\)' .; then echo "" return 1 fi } function test_require_error_is_no_funcs_as_params { - if grep -R -zo -P --exclude-dir='camino-network-runner' 'require.ErrorIs\(.+?\)[^\n]*\)\n' .; then + if grep -R -zo -P 'require.ErrorIs\(.+?\)[^\n]*\)\n' .; then echo "" return 1 fi @@ -67,7 +67,7 @@ function test_require_error_is_no_funcs_as_params { function test_require_equal_zero { # check if the first arg, other than t, is 0 - if grep -R -o -P --exclude-dir='camino-network-runner' 'require\.Equal\((t, )?(u?int\d*\(0\)|0)' .; then + if grep -R -o -P 'require\.Equal\((t, )?(u?int\d*\(0\)|0)' .; then echo "" echo "Use require.Zero instead of require.Equal when testing for 0." echo "" @@ -75,7 +75,7 @@ function test_require_equal_zero { fi # check if the last arg is 0 - if grep -R -zo -P --exclude-dir='camino-network-runner' 'require\.Equal\(.+?, (u?int\d*\(0\)|0)\)\n' .; then + if grep -R -zo -P 'require\.Equal\(.+?, (u?int\d*\(0\)|0)\)\n' .; then echo "" echo "Use require.Zero instead of require.Equal when testing for 0." echo "" @@ -84,7 +84,7 @@ function test_require_equal_zero { } function test_require_len_zero { - if grep -R -o -P --exclude-dir='camino-network-runner' 'require\.Len\((t, )?.+, 0(,|\))' .; then + if grep -R -o -P 'require\.Len\((t, )?.+, 0(,|\))' .; then echo "" echo "Use require.Empty instead of require.Len when testing for 0 length." echo "" @@ -102,7 +102,7 @@ function test_require_equal_len { # These should match: # - require.Equal(2, len(foo)) # - require.Equal(t, 2, len(foo)) - if grep -R -o -P --exclude-dir='scripts' --exclude-dir='camino-network-runner' 'require\.Equal\((t, )?.*, len\([^,]*$' .; then + if grep -R -o -P --exclude-dir='scripts' 'require\.Equal\((t, )?.*, len\([^,]*$' .; then echo "" echo "Use require.Len instead of require.Equal when testing for length." echo "" @@ -111,21 +111,21 @@ function test_require_equal_len { } function test_require_nil { - if grep -R -o -P --exclude-dir='camino-network-runner' 'require\..+?!= nil' .; then + if grep -R -o -P 'require\..+?!= nil' .; then echo "" echo "Use require.NotNil when testing for nil inequality." echo "" return 1 fi - if grep -R -o -P --exclude-dir='camino-network-runner' 'require\..+?== nil' .; then + if grep -R -o -P 'require\..+?== nil' .; then echo "" echo "Use require.Nil when testing for nil equality." echo "" return 1 fi - if grep -R -o -P --exclude-dir='camino-network-runner' 'require\.ErrorIs.+?nil\)' .; then + if grep -R -o -P 'require\.ErrorIs.+?nil\)' .; then echo "" echo "Use require.NoError instead of require.ErrorIs when testing for nil error." echo "" @@ -134,7 +134,7 @@ function test_require_nil { } function test_require_no_error_inline_func { - if grep -R -zo -P --exclude-dir='camino-network-runner' '\t+err :?= ((?!require|if).|\n)*require\.NoError\((t, )?err\)' .; then + if grep -R -zo -P '\t+err :?= ((?!require|if).|\n)*require\.NoError\((t, )?err\)' .; then echo "" echo "Checking that a function with a single error return doesn't error should be done in-line." echo "" @@ -144,7 +144,7 @@ function test_require_no_error_inline_func { # Ref: https://go.dev/doc/effective_go#blank_implements function test_interface_compliance_nil { - if grep -R -o -P --exclude-dir='camino-network-runner' '_ .+? = &.+?\{\}' .; then + if grep -R -o -P '_ .+? = &.+?\{\}' .; then echo "" echo "Interface compliance checks need to be of the form:" echo " var _ json.Marshaler = (*RawMessage)(nil)" diff --git a/scripts/mock.gen.sh b/scripts/mock.gen.sh index 4caa3b87bde4..39eac0d9196d 100755 --- a/scripts/mock.gen.sh +++ b/scripts/mock.gen.sh @@ -8,18 +8,56 @@ if ! [[ "$0" =~ scripts/mock.gen.sh ]]; then fi # https://github.com/uber-go/mock -go install -v go.uber.org/mock/mockgen@v0.2.0 +go install -v go.uber.org/mock/mockgen@v0.4.0 source ./scripts/constants.sh +outputted_files=() + # tuples of (source interface import path, comma-separated interface names, output file path) input="scripts/mocks.mockgen.txt" while IFS= read -r line do IFS='=' read -r src_import_path interface_name output_path <<< "${line}" + package_name="$(basename "$(dirname "$output_path")")" + echo "Generating ${output_path}..." + outputted_files+=("${output_path}") + mockgen -package="${package_name}" -destination="${output_path}" "${src_import_path}" "${interface_name}" + +done < "$input" + +# tuples of (source import path, comma-separated interface names to exclude, output file path) +input="scripts/mocks.mockgen.source.txt" +while IFS= read -r line +do + IFS='=' read -r source_path exclude_interfaces output_path <<< "${line}" package_name=$(basename "$(dirname "$output_path")") + outputted_files+=("${output_path}") echo "Generating ${output_path}..." - mockgen -copyright_file=./LICENSE.header -package="${package_name}" -destination="${output_path}" "${src_import_path}" "${interface_name}" + + mockgen \ + -source="${source_path}" \ + -destination="${output_path}" \ + -package="${package_name}" \ + -exclude_interfaces="${exclude_interfaces}" + done < "$input" +mapfile -t all_generated_files < <(grep -Rl 'Code generated by MockGen. DO NOT EDIT.') + +# Exclude certain files +outputted_files+=('scripts/mock.gen.sh') # This file +outputted_files+=('vms/components/avax/mock_transferable_out.go') # Embedded verify.IsState +outputted_files+=('vms/platformvm/fx/mock_fx.go') # Embedded verify.IsNotState +outputted_files+=('vms/platformvm/state/mock_state.go') # Can't use 2 (or 3) different files to generate one mock file in source mode + +mapfile -t diff_files < <(echo "${all_generated_files[@]}" "${outputted_files[@]}" | tr ' ' '\n' | sort | uniq -u) + +if (( ${#diff_files[@]} )); then + printf "\nFAILURE\n" + echo "Detected MockGen generated files that are not in scripts/mocks.mockgen.source.txt or scripts/mocks.mockgen.txt:" + printf "%s\n" "${diff_files[@]}" + exit 255 +fi + echo "SUCCESS" diff --git a/scripts/mocks.mockgen.source.txt b/scripts/mocks.mockgen.source.txt new file mode 100644 index 000000000000..0f9499dbd2e9 --- /dev/null +++ b/scripts/mocks.mockgen.source.txt @@ -0,0 +1,12 @@ +snow/engine/common/sender.go=StateSummarySender,AcceptedStateSummarySender,FrontierSender,AcceptedSender,FetchSender,AppSender,QuerySender,CrossChainAppSender,NetworkAppSender,Gossiper=snow/engine/common/mock_sender.go +snow/networking/router/router.go=InternalHandler=snow/networking/router/mock_router.go +snow/networking/sender/external_sender.go==snow/networking/sender/mock_external_sender.go +snow/validators/manager.go=SetCallbackListener=snow/validators/mock_manager.go +vms/avm/block/executor/manager.go==vms/avm/block/executor/mock_manager.go +vms/avm/txs/tx.go==vms/avm/txs/mock_unsigned_tx.go +vms/platformvm/block/executor/manager.go==vms/platformvm/block/executor/mock_manager.go +vms/platformvm/txs/staker_tx.go=ValidatorTx,DelegatorTx,StakerTx,PermissionlessStaker=vms/platformvm/txs/mock_staker_tx.go +vms/platformvm/txs/unsigned_tx.go==vms/platformvm/txs/mock_unsigned_tx.go +x/merkledb/db.go=ChangeProofer,RangeProofer,Clearer,Prefetcher=x/merkledb/mock_db.go +cache/cache.go==cache/mock_cacher.go +vms/components/avax/atomic_utxos.go==vms/components/avax/mock_atomic_utxos.go diff --git a/scripts/mocks.mockgen.txt b/scripts/mocks.mockgen.txt index bc0a18495971..f0add8536f17 100644 --- a/scripts/mocks.mockgen.txt +++ b/scripts/mocks.mockgen.txt @@ -5,14 +5,12 @@ github.com/ava-labs/avalanchego/database=Batch=database/mock_batch.go github.com/ava-labs/avalanchego/database=Iterator=database/mock_iterator.go github.com/ava-labs/avalanchego/message=OutboundMessage=message/mock_message.go github.com/ava-labs/avalanchego/message=OutboundMsgBuilder=message/mock_outbound_message_builder.go -github.com/ava-labs/avalanchego/network/peer=GossipTracker=network/peer/mock_gossip_tracker.go -github.com/ava-labs/avalanchego/network/p2p=Handler=network/p2p/mocks/mock_handler.go github.com/ava-labs/avalanchego/snow/consensus/snowman=Block=snow/consensus/snowman/mock_block.go github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex=LinearizableVM=snow/engine/avalanche/vertex/mock_vm.go -github.com/ava-labs/avalanchego/snow/engine/snowman/block=BuildBlockWithContextChainVM=snow/engine/snowman/block/mocks/build_block_with_context_vm.go -github.com/ava-labs/avalanchego/snow/engine/snowman/block=ChainVM=snow/engine/snowman/block/mocks/chain_vm.go -github.com/ava-labs/avalanchego/snow/engine/snowman/block=StateSyncableVM=snow/engine/snowman/block/mocks/state_syncable_vm.go -github.com/ava-labs/avalanchego/snow/engine/snowman/block=WithVerifyContext=snow/engine/snowman/block/mocks/with_verify_context.go +github.com/ava-labs/avalanchego/snow/engine/snowman/block=BuildBlockWithContextChainVM=snow/engine/snowman/block/mock_build_block_with_context_vm.go +github.com/ava-labs/avalanchego/snow/engine/snowman/block=ChainVM=snow/engine/snowman/block/mock_chain_vm.go +github.com/ava-labs/avalanchego/snow/engine/snowman/block=StateSyncableVM=snow/engine/snowman/block/mock_state_syncable_vm.go +github.com/ava-labs/avalanchego/snow/engine/snowman/block=WithVerifyContext=snow/engine/snowman/block/mock_with_verify_context.go github.com/ava-labs/avalanchego/snow/networking/handler=Handler=snow/networking/handler/mock_handler.go github.com/ava-labs/avalanchego/snow/networking/timeout=Manager=snow/networking/timeout/mock_manager.go github.com/ava-labs/avalanchego/snow/networking/tracker=Targeter=snow/networking/tracker/mock_targeter.go @@ -23,30 +21,26 @@ github.com/ava-labs/avalanchego/snow/validators=SubnetConnector=snow/validators/ github.com/ava-labs/avalanchego/utils/crypto/keychain=Ledger=utils/crypto/keychain/mock_ledger.go github.com/ava-labs/avalanchego/utils/filesystem=Reader=utils/filesystem/mock_io.go github.com/ava-labs/avalanchego/utils/hashing=Hasher=utils/hashing/mock_hasher.go -github.com/ava-labs/avalanchego/utils/logging=Logger=utils/logging/mock_logger.go github.com/ava-labs/avalanchego/utils/resource=User=utils/resource/mock_user.go github.com/ava-labs/avalanchego/vms/avm/block=Block=vms/avm/block/mock_block.go github.com/ava-labs/avalanchego/vms/avm/metrics=Metrics=vms/avm/metrics/mock_metrics.go -github.com/ava-labs/avalanchego/vms/avm/states=Chain,State,Diff=vms/avm/states/mock_states.go +github.com/ava-labs/avalanchego/vms/avm/state=Chain,State,Diff=vms/avm/state/mock_state.go github.com/ava-labs/avalanchego/vms/avm/txs/mempool=Mempool=vms/avm/txs/mempool/mock_mempool.go github.com/ava-labs/avalanchego/vms/components/avax=TransferableIn=vms/components/avax/mock_transferable_in.go github.com/ava-labs/avalanchego/vms/components/verify=Verifiable=vms/components/verify/mock_verifiable.go -github.com/ava-labs/avalanchego/vms/platformvm/block/executor=Manager=vms/platformvm/block/executor/mock_manager.go github.com/ava-labs/avalanchego/vms/platformvm/block=Block=vms/platformvm/block/mock_block.go github.com/ava-labs/avalanchego/vms/platformvm/state=StakerIterator=vms/platformvm/state/mock_staker_iterator.go -github.com/ava-labs/avalanchego/vms/platformvm/state=Versions=vms/platformvm/state/mock_versions.go -github.com/ava-labs/avalanchego/vms/platformvm/txs/builder=Builder=vms/platformvm/txs/builder/mock_builder.go github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool=Mempool=vms/platformvm/txs/mempool/mock_mempool.go github.com/ava-labs/avalanchego/vms/platformvm/utxo=Verifier=vms/platformvm/utxo/mock_verifier.go github.com/ava-labs/avalanchego/vms/proposervm/proposer=Windower=vms/proposervm/proposer/mock_windower.go +github.com/ava-labs/avalanchego/vms/proposervm/scheduler=Scheduler=vms/proposervm/scheduler/mock_scheduler.go github.com/ava-labs/avalanchego/vms/proposervm/state=State=vms/proposervm/state/mock_state.go github.com/ava-labs/avalanchego/vms/proposervm=PostForkBlock=vms/proposervm/mock_post_fork_block.go github.com/ava-labs/avalanchego/vms/registry=VMGetter=vms/registry/mock_vm_getter.go -github.com/ava-labs/avalanchego/vms/registry=VMRegisterer=vms/registry/mock_vm_registerer.go github.com/ava-labs/avalanchego/vms/registry=VMRegistry=vms/registry/mock_vm_registry.go github.com/ava-labs/avalanchego/vms=Factory,Manager=vms/mock_manager.go -github.com/ava-labs/avalanchego/x/merkledb=MerkleDB=x/merkledb/mock_db.go github.com/ava-labs/avalanchego/x/sync=Client=x/sync/mock_client.go +github.com/ava-labs/avalanchego/x/sync=NetworkClient=x/sync/mock_network_client.go github.com/ava-labs/avalanchego/database=Database=database/mock_database.go github.com/ava-labs/avalanchego/vms/platformvm/state=ProposalsIterator=vms/platformvm/state/mock_proposals_iterator.go github.com/ava-labs/avalanchego/vms/platformvm/dac=BondTxIDsGetter=vms/platformvm/dac/camino_mock_bond_tx_ids_getter.go diff --git a/scripts/tests.e2e.existing.sh b/scripts/tests.e2e.existing.sh new file mode 100755 index 000000000000..98f5e3f79482 --- /dev/null +++ b/scripts/tests.e2e.existing.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +set -euo pipefail + +################################################################ +# This script deploys a temporary network and configures +# tests.e2e.sh to execute the e2e suite against it. This +# validates that tmpnetctl is capable of starting a network and +# that the e2e suite is capable of executing against a network +# that it did not create. +################################################################ + +# e.g., +# ./scripts/build.sh +# ./scripts/tests.e2e.existing.sh --ginkgo.label-filter=x # All arguments are supplied to ginkgo +# E2E_SERIAL=1 ./scripts/tests.e2e.sh # Run tests serially +# CAMINOGO_BIN_PATH=./build/caminogo ./scripts/tests.e2e.existing.sh # Customization of caminogo path +if ! [[ "$0" =~ scripts/tests.e2e.existing.sh ]]; then + echo "must be run from repository root" + exit 255 +fi + +# Ensure an absolute path to avoid dependency on the working directory +# of script execution. +CAMINOGO_BIN_PATH="$(realpath "${CAMINOGO_BIN_PATH:-./build/caminogo}")" +export CAMINOGO_BIN_PATH + +# Provide visual separation between testing and setup/teardown +function print_separator { + printf '%*s\n' "${COLUMNS:-80}" '' | tr ' ' ─ +} + +# Ensure network cleanup on teardown +function cleanup { + print_separator + echo "cleaning up temporary network" + if [[ -n "${TMPNET_NETWORK_DIR:-}" ]]; then + ./build/tmpnetctl stop-network + fi +} +trap cleanup EXIT + +# Start a temporary network +./scripts/build_tmpnetctl.sh +print_separator +./build/tmpnetctl start-network + +# Determine the network configuration path from the latest symlink +LATEST_SYMLINK_PATH="${HOME}/.tmpnet/networks/latest" +if [[ -h "${LATEST_SYMLINK_PATH}" ]]; then + TMPNET_NETWORK_DIR="$(realpath "${LATEST_SYMLINK_PATH}")" + export TMPNET_NETWORK_DIR +else + echo "failed to find configuration path: ${LATEST_SYMLINK_PATH} symlink not found" + exit 255 +fi + +print_separator +# - Setting E2E_USE_EXISTING_NETWORK configures tests.e2e.sh to use +# the temporary network identified by TMPNET_NETWORK_DIR. +# - Only a single test (selected with --ginkgo.focus-file) is required +# to validate that an existing network can be used by an e2e test +# suite run. Executing more tests would be duplicative of the testing +# performed against a network created by the test suite. +E2E_USE_EXISTING_NETWORK=1 ./scripts/tests.e2e.sh --ginkgo.focus-file=permissionless_subnets.go diff --git a/scripts/tests.e2e.persistent.sh b/scripts/tests.e2e.persistent.sh deleted file mode 100755 index 35bdd74db98b..000000000000 --- a/scripts/tests.e2e.persistent.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -################################################################ -# This script deploys a persistent local network and configures -# tests.e2e.sh to execute the e2e suite against it. -################################################################ - -# e.g., -# ./scripts/build.sh -# ./scripts/tests.e2e.persistent.sh --ginkgo.label-filter=x # All arguments are supplied to ginkgo -# E2E_SERIAL=1 ./scripts/tests.e2e.sh # Run tests serially -# CAMINOGO_BIN_PATH=./build/caminogo ./scripts/tests.e2e.persistent.sh # Customization of caminogo path -if ! [[ "$0" =~ scripts/tests.e2e.persistent.sh ]]; then - echo "must be run from repository root" - exit 255 -fi - -# Ensure an absolute path to avoid dependency on the working directory -# of script execution. -CAMINOGO_BIN_PATH="$(realpath "${CAMINOGO_BIN_PATH:-./build/caminogo}")" -export CAMINOGO_BIN_PATH - -# Provide visual separation between testing and setup/teardown -function print_separator { - printf '%*s\n' "${COLUMNS:-80}" '' | tr ' ' ─ -} - -# Ensure network cleanup on teardown -function cleanup { - print_separator - echo "cleaning up persistent network" - if [[ -n "${TESTNETCTL_NETWORK_DIR:-}" ]]; then - ./build/testnetctl stop-network - fi -} -trap cleanup EXIT - -# Start a persistent network -./scripts/build_testnetctl.sh -print_separator -./build/testnetctl start-network - -# Determine the network configuration path from the latest symlink -LATEST_SYMLINK_PATH="${HOME}/.testnetctl/networks/latest" -if [[ -h "${LATEST_SYMLINK_PATH}" ]]; then - TESTNETCTL_NETWORK_DIR="$(realpath "${LATEST_SYMLINK_PATH}")" - export TESTNETCTL_NETWORK_DIR -else - echo "failed to find configuration path: ${LATEST_SYMLINK_PATH} symlink not found" - exit 255 -fi - -print_separator -# - Setting E2E_USE_PERSISTENT_NETWORK configures tests.e2e.sh to use -# the persistent network identified by TESTNETCTL_NETWORK_DIR. -# - Only a single test (selected with --ginkgo.focus-file) is required -# to validate that a persistent network can be used by an e2e test -# suite run. Executing more tests would be duplicative of the testing -# performed against an ephemeral test network. -E2E_USE_PERSISTENT_NETWORK=1 ./scripts/tests.e2e.sh --ginkgo.focus-file=permissionless_subnets.go diff --git a/scripts/tests.e2e.sh b/scripts/tests.e2e.sh index df438124323f..67fbfeb4c660 100755 --- a/scripts/tests.e2e.sh +++ b/scripts/tests.e2e.sh @@ -5,9 +5,9 @@ set -euo pipefail # e.g., # ./scripts/tests.e2e.sh # ./scripts/tests.e2e.sh --ginkgo.label-filter=x # All arguments are supplied to ginkgo -# E2E_SERIAL=1 ./scripts/tests.e2e.sh ./build/caminogo # Run tests serially +# E2E_SERIAL=1 ./scripts/tests.e2e.sh # Run tests serially # CAMINOGO_BIN_PATH=./build/caminogo ./scripts/tests.e2e.sh # Customization of caminogo path -# E2E_USE_PERSISTENT_NETWORK=1 TESTNETCTL_NETWORK_DIR=/path/to ./scripts/tests.e2e.sh # Execute against a persistent network +# E2E_USE_EXISTING_NETWORK=1 TMPNET_NETWORK_DIR=/path/to ./scripts/tests.e2e.sh # Execute against an existing network if ! [[ "$0" =~ scripts/tests.e2e.sh ]]; then echo "must be run from repository root" exit 255 @@ -28,11 +28,11 @@ ACK_GINKGO_RC=true ginkgo build ./tests/e2e ./tests/e2e/e2e.test --help ################################# -# Since TESTNETCTL_NETWORK_DIR may be persistently set in the environment (e.g. to configure -# ginkgo or testnetctl), configuring the use of a persistent network with this script -# requires the extra step of setting E2E_USE_PERSISTENT_NETWORK=1. -if [[ -n "${E2E_USE_PERSISTENT_NETWORK:-}" && -n "${TESTNETCTL_NETWORK_DIR:-}" ]]; then - E2E_ARGS="--use-persistent-network" +# Since TMPNET_NETWORK_DIR may be set in the environment (e.g. to configure ginkgo +# or tmpnetctl), configuring the use of an existing network with this script +# requires the extra step of setting E2E_USE_EXISTING_NETWORK=1. +if [[ -n "${E2E_USE_EXISTING_NETWORK:-}" && -n "${TMPNET_NETWORK_DIR:-}" ]]; then + E2E_ARGS="--use-existing-network" else CAMINOGO_BIN_PATH="$(realpath "${CAMINOGO_BIN_PATH:-./build/caminogo}")" E2E_ARGS="--avalanchego-path=${CAMINOGO_BIN_PATH}" diff --git a/snow/acceptor.go b/snow/acceptor.go index f1a92e2f0303..83575e5cf3e2 100644 --- a/snow/acceptor.go +++ b/snow/acceptor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snow @@ -14,8 +14,6 @@ import ( ) var ( - _ Acceptor = noOpAcceptor{} - _ Acceptor = (*AcceptorTracker)(nil) _ Acceptor = acceptorWrapper{} _ AcceptorGroup = (*acceptorGroup)(nil) @@ -32,39 +30,6 @@ type Acceptor interface { Accept(ctx *ConsensusContext, containerID ids.ID, container []byte) error } -type noOpAcceptor struct{} - -func (noOpAcceptor) Accept(*ConsensusContext, ids.ID, []byte) error { - return nil -} - -// AcceptorTracker tracks the dispatched accept events by its ID and counts. -// Useful for testing. -type AcceptorTracker struct { - lock sync.RWMutex - accepted map[ids.ID]int -} - -func NewAcceptorTracker() *AcceptorTracker { - return &AcceptorTracker{ - accepted: make(map[ids.ID]int), - } -} - -func (a *AcceptorTracker) Accept(_ *ConsensusContext, containerID ids.ID, _ []byte) error { - a.lock.Lock() - a.accepted[containerID]++ - a.lock.Unlock() - return nil -} - -func (a *AcceptorTracker) IsAccepted(containerID ids.ID) (int, bool) { - a.lock.RLock() - count, ok := a.accepted[containerID] - a.lock.RUnlock() - return count, ok -} - type acceptorWrapper struct { Acceptor diff --git a/snow/choices/decidable.go b/snow/choices/decidable.go index b49cd75d3a1d..4c9ba886b105 100644 --- a/snow/choices/decidable.go +++ b/snow/choices/decidable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package choices diff --git a/snow/choices/status.go b/snow/choices/status.go index 255356b73960..ff530e9b7547 100644 --- a/snow/choices/status.go +++ b/snow/choices/status.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package choices @@ -29,22 +29,19 @@ func (s Status) MarshalJSON() ([]byte, error) { if err := s.Valid(); err != nil { return nil, err } - return []byte("\"" + s.String() + "\""), nil + return []byte(`"` + s.String() + `"`), nil } func (s *Status) UnmarshalJSON(b []byte) error { - str := string(b) - if str == "null" { - return nil - } - switch str { - case "\"Unknown\"": + switch string(b) { + case "null": + case `"Unknown"`: *s = Unknown - case "\"Processing\"": + case `"Processing"`: *s = Processing - case "\"Rejected\"": + case `"Rejected"`: *s = Rejected - case "\"Accepted\"": + case `"Accepted"`: *s = Accepted default: return errUnknownStatus diff --git a/snow/choices/status_test.go b/snow/choices/status_test.go index 59d2c4071fc5..5134ca2b752f 100644 --- a/snow/choices/status_test.go +++ b/snow/choices/status_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package choices diff --git a/snow/choices/test_decidable.go b/snow/choices/test_decidable.go index 055a54050d32..39e8ed67b7c1 100644 --- a/snow/choices/test_decidable.go +++ b/snow/choices/test_decidable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package choices diff --git a/snow/consensus/avalanche/test_vertex.go b/snow/consensus/avalanche/test_vertex.go index 60bfdc10c1b0..a3bc2fb06723 100644 --- a/snow/consensus/avalanche/test_vertex.go +++ b/snow/consensus/avalanche/test_vertex.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche diff --git a/snow/consensus/avalanche/vertex.go b/snow/consensus/avalanche/vertex.go index 9f8af73264fe..0356dc1902da 100644 --- a/snow/consensus/avalanche/vertex.go +++ b/snow/consensus/avalanche/vertex.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche diff --git a/snow/consensus/metrics/height.go b/snow/consensus/metrics/height.go deleted file mode 100644 index d491f316d70b..000000000000 --- a/snow/consensus/metrics/height.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -var _ Height = (*height)(nil) - -// Height reports the last accepted height -type Height interface { - Accepted(height uint64) -} - -type height struct { - // lastAcceptedHeight keeps track of the last accepted height - lastAcceptedHeight prometheus.Gauge -} - -func NewHeight(namespace string, reg prometheus.Registerer) (Height, error) { - h := &height{ - lastAcceptedHeight: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "last_accepted_height", - Help: "Last height accepted", - }), - } - return h, reg.Register(h.lastAcceptedHeight) -} - -func (h *height) Accepted(height uint64) { - h.lastAcceptedHeight.Set(float64(height)) -} diff --git a/snow/consensus/metrics/latency.go b/snow/consensus/metrics/latency.go deleted file mode 100644 index 4f5d413a6542..000000000000 --- a/snow/consensus/metrics/latency.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package metrics - -import ( - "fmt" - "time" - - "github.com/prometheus/client_golang/prometheus" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/metric" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -var _ Latency = (*latency)(nil) - -type Latency interface { - // Issued marks the item as having been issued. - Issued(id ids.ID, pollNumber uint64) - - // Accepted marks the item as having been accepted. - // Pass the container size in bytes for metrics tracking. - Accepted(id ids.ID, pollNumber uint64, containerSize int) - - // Rejected marks the item as having been rejected. - // Pass the container size in bytes for metrics tracking. - Rejected(id ids.ID, pollNumber uint64, containerSize int) - - // MeasureAndGetOldestDuration returns the amount of time the oldest item - // has been processing. - MeasureAndGetOldestDuration() time.Duration - - // NumProcessing returns the number of currently processing items. - NumProcessing() int -} - -type opStart struct { - time time.Time - pollNumber uint64 -} - -// Latency reports commonly used consensus latency metrics. -type latency struct { - // ProcessingEntries keeps track of the [opStart] that each item was issued - // into the consensus instance. This is used to calculate the amount of time - // to accept or reject the item. - processingEntries linkedhashmap.LinkedHashmap[ids.ID, opStart] - - // log reports anomalous events. - log logging.Logger - - // numProcessing keeps track of the number of items processing - numProcessing prometheus.Gauge - - // pollsAccepted tracks the number of polls that an item was in processing - // for before being accepted - pollsAccepted metric.Averager - - // pollsRejected tracks the number of polls that an item was in processing - // for before being rejected - pollsRejected metric.Averager - - // latAccepted tracks the number of nanoseconds that an item was processing - // before being accepted - latAccepted metric.Averager - containerSizeAcceptedSum prometheus.Gauge - - // rejected tracks the number of nanoseconds that an item was processing - // before being rejected - latRejected metric.Averager - containerSizeRejectedSum prometheus.Gauge -} - -// Initialize the metrics with the provided names. -func NewLatency(metricName, descriptionName string, log logging.Logger, namespace string, reg prometheus.Registerer) (Latency, error) { - errs := wrappers.Errs{} - l := &latency{ - processingEntries: linkedhashmap.New[ids.ID, opStart](), - log: log, - - // e.g., - // "avalanche_7y7zwo7XatqnX4dtTakLo32o7jkMX4XuDa26WaxbCXoCT1qKK_blks_processing" to count how blocks are currently processing - numProcessing: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: fmt.Sprintf("%s_processing", metricName), - Help: fmt.Sprintf("Number of currently processing %s", metricName), - }), - - pollsAccepted: metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("%s_polls_accepted", metricName), - fmt.Sprintf("number of polls from issuance of a %s to its acceptance", descriptionName), - reg, - &errs, - ), - pollsRejected: metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("%s_polls_rejected", metricName), - fmt.Sprintf("number of polls from issuance of a %s to its rejection", descriptionName), - reg, - &errs, - ), - - // e.g., - // "avalanche_C_blks_accepted_count" to count how many "Observe" gets called -- count all "Accept" - // "avalanche_C_blks_accepted_sum" to count how many ns have elapsed since its issuance on acceptance - // "avalanche_C_blks_accepted_sum / avalanche_C_blks_accepted_count" is the average block acceptance latency in ns - // "avalanche_C_blks_accepted_container_size_sum" to track cumulative sum of all accepted blocks' sizes - // "avalanche_C_blks_accepted_container_size_sum / avalanche_C_blks_accepted_count" is the average block size - latAccepted: metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("%s_accepted", metricName), - fmt.Sprintf("time (in ns) from issuance of a %s to its acceptance", descriptionName), - reg, - &errs, - ), - containerSizeAcceptedSum: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: fmt.Sprintf("%s_accepted_container_size_sum", metricName), - Help: fmt.Sprintf("Cumulative sum of container size of all accepted %s", metricName), - }), - - // e.g., - // "avalanche_P_blks_rejected_count" to count how many "Observe" gets called -- count all "Reject" - // "avalanche_P_blks_rejected_sum" to count how many ns have elapsed since its issuance on rejection - // "avalanche_P_blks_accepted_sum / avalanche_P_blks_accepted_count" is the average block acceptance latency in ns - // "avalanche_P_blks_accepted_container_size_sum" to track cumulative sum of all accepted blocks' sizes - // "avalanche_P_blks_accepted_container_size_sum / avalanche_P_blks_accepted_count" is the average block size - latRejected: metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("%s_rejected", metricName), - fmt.Sprintf("time (in ns) from issuance of a %s to its rejection", descriptionName), - reg, - &errs, - ), - containerSizeRejectedSum: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: fmt.Sprintf("%s_rejected_container_size_sum", metricName), - Help: fmt.Sprintf("Cumulative sum of container size of all rejected %s", metricName), - }), - } - errs.Add( - reg.Register(l.numProcessing), - reg.Register(l.containerSizeAcceptedSum), - reg.Register(l.containerSizeRejectedSum), - ) - return l, errs.Err -} - -func (l *latency) Issued(id ids.ID, pollNumber uint64) { - l.processingEntries.Put(id, opStart{ - time: time.Now(), - pollNumber: pollNumber, - }) - l.numProcessing.Inc() -} - -func (l *latency) Accepted(id ids.ID, pollNumber uint64, containerSize int) { - start, ok := l.processingEntries.Get(id) - if !ok { - l.log.Debug("unable to measure tx latency", - zap.Stringer("status", choices.Accepted), - zap.Stringer("txID", id), - ) - return - } - l.processingEntries.Delete(id) - - l.pollsAccepted.Observe(float64(pollNumber - start.pollNumber)) - - duration := time.Since(start.time) - l.latAccepted.Observe(float64(duration)) - l.numProcessing.Dec() - - l.containerSizeAcceptedSum.Add(float64(containerSize)) -} - -func (l *latency) Rejected(id ids.ID, pollNumber uint64, containerSize int) { - start, ok := l.processingEntries.Get(id) - if !ok { - l.log.Debug("unable to measure tx latency", - zap.Stringer("status", choices.Rejected), - zap.Stringer("txID", id), - ) - return - } - l.processingEntries.Delete(id) - - l.pollsRejected.Observe(float64(pollNumber - start.pollNumber)) - - duration := time.Since(start.time) - l.latRejected.Observe(float64(duration)) - l.numProcessing.Dec() - - l.containerSizeRejectedSum.Add(float64(containerSize)) -} - -func (l *latency) MeasureAndGetOldestDuration() time.Duration { - _, oldestOp, exists := l.processingEntries.Oldest() - if !exists { - return 0 - } - return time.Since(oldestOp.time) -} - -func (l *latency) NumProcessing() int { - return l.processingEntries.Len() -} diff --git a/snow/consensus/metrics/polls.go b/snow/consensus/metrics/polls.go deleted file mode 100644 index 589833954f6b..000000000000 --- a/snow/consensus/metrics/polls.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package metrics - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/utils" -) - -var _ Polls = (*polls)(nil) - -// Polls reports commonly used consensus poll metrics. -type Polls interface { - Successful() - Failed() -} - -type polls struct { - // numFailedPolls keeps track of the number of polls that failed - numFailedPolls prometheus.Counter - - // numSuccessfulPolls keeps track of the number of polls that succeeded - numSuccessfulPolls prometheus.Counter -} - -func NewPolls(namespace string, reg prometheus.Registerer) (Polls, error) { - p := &polls{ - numSuccessfulPolls: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "polls_successful", - Help: "Number of successful polls", - }), - numFailedPolls: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "polls_failed", - Help: "Number of failed polls", - }), - } - err := utils.Err( - reg.Register(p.numFailedPolls), - reg.Register(p.numSuccessfulPolls), - ) - return p, err -} - -func (p *polls) Failed() { - p.numFailedPolls.Inc() -} - -func (p *polls) Successful() { - p.numSuccessfulPolls.Inc() -} diff --git a/snow/consensus/metrics/timestamp.go b/snow/consensus/metrics/timestamp.go deleted file mode 100644 index 0e784fa53454..000000000000 --- a/snow/consensus/metrics/timestamp.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package metrics - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -var _ Timestamp = (*timestamp)(nil) - -// Timestamp reports the last accepted block time, -// to track it in unix seconds. -type Timestamp interface { - Accepted(ts time.Time) -} - -type timestamp struct { - // lastAcceptedTimestamp keeps track of the last accepted timestamp - lastAcceptedTimestamp prometheus.Gauge -} - -func NewTimestamp(namespace string, reg prometheus.Registerer) (Timestamp, error) { - t := ×tamp{ - lastAcceptedTimestamp: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "last_accepted_timestamp", - Help: "Last accepted block timestamp in unix seconds", - }), - } - return t, reg.Register(t.lastAcceptedTimestamp) -} - -func (t *timestamp) Accepted(ts time.Time) { - t.lastAcceptedTimestamp.Set(float64(ts.Unix())) -} diff --git a/snow/consensus/snowball/binary_slush.go b/snow/consensus/snowball/binary_slush.go index b4e1bc2ace08..a440fce0e1ec 100644 --- a/snow/consensus/snowball/binary_slush.go +++ b/snow/consensus/snowball/binary_slush.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/binary_snowball.go b/snow/consensus/snowball/binary_snowball.go index aa1dc37bbe34..f1b213ad98fc 100644 --- a/snow/consensus/snowball/binary_snowball.go +++ b/snow/consensus/snowball/binary_snowball.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/binary_snowball_test.go b/snow/consensus/snowball/binary_snowball_test.go index 42b6b404caa0..2c2a8421e043 100644 --- a/snow/consensus/snowball/binary_snowball_test.go +++ b/snow/consensus/snowball/binary_snowball_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/binary_snowflake.go b/snow/consensus/snowball/binary_snowflake.go index d95ef9709ec6..139bd40361f7 100644 --- a/snow/consensus/snowball/binary_snowflake.go +++ b/snow/consensus/snowball/binary_snowflake.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/binary_snowflake_test.go b/snow/consensus/snowball/binary_snowflake_test.go index 2f14396da959..085b94c5f450 100644 --- a/snow/consensus/snowball/binary_snowflake_test.go +++ b/snow/consensus/snowball/binary_snowflake_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/consensus.go b/snow/consensus/snowball/consensus.go index 3f3c508af053..e28f4cad4d36 100644 --- a/snow/consensus/snowball/consensus.go +++ b/snow/consensus/snowball/consensus.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/consensus_performance_test.go b/snow/consensus/snowball/consensus_performance_test.go index a84e1e60a9c7..9ebb3362720d 100644 --- a/snow/consensus/snowball/consensus_performance_test.go +++ b/snow/consensus/snowball/consensus_performance_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/utils/sampler" + "gonum.org/v1/gonum/mathext/prng" ) // Test that a network running the lower AlphaPreference converges faster than a @@ -16,37 +16,38 @@ import ( func TestDualAlphaOptimization(t *testing.T) { require := require.New(t) - numColors := 10 - numNodes := 100 - params := Parameters{ - K: 20, - AlphaPreference: 15, - AlphaConfidence: 15, - BetaVirtuous: 15, - BetaRogue: 20, - } - seed := int64(0) - - singleAlphaNetwork := Network{} - singleAlphaNetwork.Initialize(params, numColors) + var ( + numColors = 10 + numNodes = 100 + params = Parameters{ + K: 20, + AlphaPreference: 15, + AlphaConfidence: 15, + BetaVirtuous: 15, + BetaRogue: 20, + } + seed uint64 = 0 + source = prng.NewMT19937() + ) + + singleAlphaNetwork := NewNetwork(params, numColors, source) params.AlphaPreference = params.K/2 + 1 - dualAlphaNetwork := Network{} - dualAlphaNetwork.Initialize(params, numColors) + dualAlphaNetwork := NewNetwork(params, numColors, source) - sampler.Seed(seed) + source.Seed(seed) for i := 0; i < numNodes; i++ { dualAlphaNetwork.AddNode(NewTree) } - sampler.Seed(seed) + source.Seed(seed) for i := 0; i < numNodes; i++ { singleAlphaNetwork.AddNode(NewTree) } // Although this can theoretically fail with a correct implementation, it // shouldn't in practice - runNetworksInLockstep(require, seed, &dualAlphaNetwork, &singleAlphaNetwork) + runNetworksInLockstep(require, seed, source, dualAlphaNetwork, singleAlphaNetwork) } // Test that a network running the snowball tree converges faster than a network @@ -54,38 +55,39 @@ func TestDualAlphaOptimization(t *testing.T) { func TestTreeConvergenceOptimization(t *testing.T) { require := require.New(t) - numColors := 10 - numNodes := 100 - params := DefaultParameters - seed := int64(0) - - treeNetwork := Network{} - treeNetwork.Initialize(params, numColors) + var ( + numColors = 10 + numNodes = 100 + params = DefaultParameters + seed uint64 = 0 + source = prng.NewMT19937() + ) - flatNetwork := treeNetwork + treeNetwork := NewNetwork(params, numColors, source) + flatNetwork := NewNetwork(params, numColors, source) - sampler.Seed(seed) + source.Seed(seed) for i := 0; i < numNodes; i++ { treeNetwork.AddNode(NewTree) } - sampler.Seed(seed) + source.Seed(seed) for i := 0; i < numNodes; i++ { flatNetwork.AddNode(NewFlat) } // Although this can theoretically fail with a correct implementation, it // shouldn't in practice - runNetworksInLockstep(require, seed, &treeNetwork, &flatNetwork) + runNetworksInLockstep(require, seed, source, treeNetwork, flatNetwork) } -func runNetworksInLockstep(require *require.Assertions, seed int64, fast *Network, slow *Network) { +func runNetworksInLockstep(require *require.Assertions, seed uint64, source *prng.MT19937, fast *Network, slow *Network) { numRounds := 0 for !fast.Finalized() && !fast.Disagreement() && !slow.Finalized() && !slow.Disagreement() { - sampler.Seed(int64(numRounds) + seed) + source.Seed(uint64(numRounds) + seed) fast.Round() - sampler.Seed(int64(numRounds) + seed) + source.Seed(uint64(numRounds) + seed) slow.Round() numRounds++ } diff --git a/snow/consensus/snowball/consensus_reversibility_test.go b/snow/consensus/snowball/consensus_reversibility_test.go index fd03a0411581..d0f3065a9db9 100644 --- a/snow/consensus/snowball/consensus_reversibility_test.go +++ b/snow/consensus/snowball/consensus_reversibility_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -8,23 +8,25 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/utils/sampler" + "gonum.org/v1/gonum/mathext/prng" ) func TestSnowballGovernance(t *testing.T) { require := require.New(t) - numColors := 2 - numNodes := 100 - numByzantine := 10 - numRed := 55 - params := DefaultParameters - seed := int64(0) + var ( + numColors = 2 + numNodes = 100 + numByzantine = 10 + numRed = 55 + params = DefaultParameters + seed uint64 = 0 + source = prng.NewMT19937() + ) - nBitwise := Network{} - nBitwise.Initialize(params, numColors) + nBitwise := NewNetwork(params, numColors, source) - sampler.Seed(seed) + source.Seed(seed) for i := 0; i < numRed; i++ { nBitwise.AddNodeSpecificColor(NewTree, 0, []int{1}) } diff --git a/snow/consensus/snowball/consensus_test.go b/snow/consensus/snowball/consensus_test.go index 484c5c9fbc87..264edaa733d9 100644 --- a/snow/consensus/snowball/consensus_test.go +++ b/snow/consensus/snowball/consensus_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/flat.go b/snow/consensus/snowball/flat.go index 7f633efb8006..97c549816be0 100644 --- a/snow/consensus/snowball/flat.go +++ b/snow/consensus/snowball/flat.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/flat_test.go b/snow/consensus/snowball/flat_test.go index dac78fc0cede..38ca57d83b0e 100644 --- a/snow/consensus/snowball/flat_test.go +++ b/snow/consensus/snowball/flat_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/network_test.go b/snow/consensus/snowball/network_test.go index 711bbf89010e..56e7d0ca7dbc 100644 --- a/snow/consensus/snowball/network_test.go +++ b/snow/consensus/snowball/network_test.go @@ -1,13 +1,12 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball import ( - "math/rand" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/sampler" ) @@ -16,20 +15,24 @@ type newConsensusFunc func(params Parameters, choice ids.ID) Consensus type Network struct { params Parameters colors []ids.ID + rngSource sampler.Source nodes, running []Consensus } -// Initialize sets the parameters for the network and adds [numColors] different -// possible colors to the network configuration. -func (n *Network) Initialize(params Parameters, numColors int) { - n.params = params +// Create a new network with [numColors] different possible colors to finalize. +func NewNetwork(params Parameters, numColors int, rngSource sampler.Source) *Network { + n := &Network{ + params: params, + rngSource: rngSource, + } for i := 0; i < numColors; i++ { n.colors = append(n.colors, ids.Empty.Prefix(uint64(i))) } + return n } func (n *Network) AddNode(newConsensusFunc newConsensusFunc) Consensus { - s := sampler.NewUniform() + s := sampler.NewDeterministicUniform(n.rngSource) s.Initialize(uint64(len(n.colors))) indices, _ := s.Sample(len(n.colors)) @@ -78,15 +81,14 @@ func (n *Network) Finalized() bool { // performing an unbiased poll of the nodes in the network for that node. func (n *Network) Round() { if len(n.running) > 0 { - runningInd := rand.Intn(len(n.running)) // #nosec G404 + s := sampler.NewDeterministicUniform(n.rngSource) + + s.Initialize(uint64(len(n.running))) + runningInd, _ := s.Next() running := n.running[runningInd] - s := sampler.NewUniform() s.Initialize(uint64(len(n.nodes))) - count := len(n.nodes) - if count > n.params.K { - count = n.params.K - } + count := math.Min(n.params.K, len(n.nodes)) indices, _ := s.Sample(count) sampledColors := bag.Bag[ids.ID]{} for _, index := range indices { diff --git a/snow/consensus/snowball/nnary_slush.go b/snow/consensus/snowball/nnary_slush.go index 2987861f7943..dad85252906f 100644 --- a/snow/consensus/snowball/nnary_slush.go +++ b/snow/consensus/snowball/nnary_slush.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/nnary_snowball.go b/snow/consensus/snowball/nnary_snowball.go index 0fe8c25f8617..2a968c0ba91c 100644 --- a/snow/consensus/snowball/nnary_snowball.go +++ b/snow/consensus/snowball/nnary_snowball.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/nnary_snowball_test.go b/snow/consensus/snowball/nnary_snowball_test.go index 10b63ce647cb..18bea5eef65e 100644 --- a/snow/consensus/snowball/nnary_snowball_test.go +++ b/snow/consensus/snowball/nnary_snowball_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/nnary_snowflake.go b/snow/consensus/snowball/nnary_snowflake.go index 503fcd614c7b..de898f155f38 100644 --- a/snow/consensus/snowball/nnary_snowflake.go +++ b/snow/consensus/snowball/nnary_snowflake.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/nnary_snowflake_test.go b/snow/consensus/snowball/nnary_snowflake_test.go index 07601a6065eb..5df8c2966335 100644 --- a/snow/consensus/snowball/nnary_snowflake_test.go +++ b/snow/consensus/snowball/nnary_snowflake_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/parameters.go b/snow/consensus/snowball/parameters.go index 29bb0ba9e215..bf458fbf9f40 100644 --- a/snow/consensus/snowball/parameters.go +++ b/snow/consensus/snowball/parameters.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -17,21 +17,21 @@ const ( // 1 means MinPercentConnected = 1 (fully connected). MinPercentConnectedBuffer = .2 - errMsg = "" + - `__________ .___` + "\n" + - `\______ \____________ __| _/__.__.` + "\n" + - ` | | _/\_ __ \__ \ / __ < | |` + "\n" + - ` | | \ | | \// __ \_/ /_/ |\___ |` + "\n" + - ` |______ / |__| (____ /\____ |/ ____|` + "\n" + - ` \/ \/ \/\/` + "\n" + - "\n" + - ` 🏆 🏆 🏆 🏆 🏆 🏆 🏆` + "\n" + - ` ________ ________ ________________` + "\n" + - ` / _____/ \_____ \ / _ \__ ___/` + "\n" + - `/ \ ___ / | \ / /_\ \| |` + "\n" + - `\ \_\ \/ | \/ | \ |` + "\n" + - ` \______ /\_______ /\____|__ /____|` + "\n" + - ` \/ \/ \/` + "\n" + errMsg = `__________ .___ +\______ \____________ __| _/__.__. + | | _/\_ __ \__ \ / __ < | | + | | \ | | \// __ \_/ /_/ |\___ | + |______ / |__| (____ /\____ |/ ____| + \/ \/ \/\/ + + 🏆 🏆 🏆 🏆 🏆 🏆 🏆 + ________ ________ ________________ + / _____/ \_____ \ / _ \__ ___/ +/ \ ___ / | \ / /_\ \| | +\ \_\ \/ | \/ | \ | + \______ /\_______ /\____|__ /____| + \/ \/ \/ +` ) var ( @@ -39,7 +39,7 @@ var ( K: 20, AlphaPreference: 15, AlphaConfidence: 15, - BetaVirtuous: 15, + BetaVirtuous: 20, BetaRogue: 20, ConcurrentRepolls: 4, OptimalProcessing: 10, diff --git a/snow/consensus/snowball/parameters_test.go b/snow/consensus/snowball/parameters_test.go index 26ffab8763c7..525001fd535f 100644 --- a/snow/consensus/snowball/parameters_test.go +++ b/snow/consensus/snowball/parameters_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/tree.go b/snow/consensus/snowball/tree.go index 052e75ec8d35..2278975f8843 100644 --- a/snow/consensus/snowball/tree.go +++ b/snow/consensus/snowball/tree.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/tree_test.go b/snow/consensus/snowball/tree_test.go index 8b0f6159df72..99bf25769f98 100644 --- a/snow/consensus/snowball/tree_test.go +++ b/snow/consensus/snowball/tree_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //nolint:goconst @@ -10,9 +10,10 @@ import ( "github.com/stretchr/testify/require" + "gonum.org/v1/gonum/mathext/prng" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/sampler" ) const initialUnaryDescription = "SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 256)" @@ -147,8 +148,8 @@ func TestSnowballLastBinary(t *testing.T) { // Should do nothing tree.Add(one) - expected := "SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 255)\n" + - " SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 255" + expected := `SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 255) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 255` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -158,8 +159,8 @@ func TestSnowballLastBinary(t *testing.T) { require.Equal(one, tree.Preference()) require.False(tree.Finalized()) - expected = "SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [0, 255)\n" + - " SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 255" + expected = `SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [0, 255) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 255` require.Equal(expected, tree.String()) require.True(tree.RecordPoll(oneBag)) @@ -190,12 +191,12 @@ func TestSnowballAddPreviouslyRejected(t *testing.T) { tree.Add(four) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2)\n" + - " SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -205,11 +206,11 @@ func TestSnowballAddPreviouslyRejected(t *testing.T) { require.True(tree.RecordPoll(zeroBag)) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -218,11 +219,11 @@ func TestSnowballAddPreviouslyRejected(t *testing.T) { tree.Add(two) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -246,9 +247,9 @@ func TestSnowballNewUnary(t *testing.T) { tree.Add(one) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -258,9 +259,9 @@ func TestSnowballNewUnary(t *testing.T) { require.True(tree.RecordPoll(oneBag)) { - expected := "SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(one, tree.Preference()) require.False(tree.Finalized()) @@ -269,9 +270,9 @@ func TestSnowballNewUnary(t *testing.T) { require.True(tree.RecordPoll(oneBag)) { - expected := "SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = 2, Finalized = false, SL(Preference = 1))) Bit = 0\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + - " SB(PreferenceStrength = 2, SF(Confidence = 2, Finalized = true)) Bits = [1, 256)" + expected := `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = 2, Finalized = false, SL(Preference = 1))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 2, SF(Confidence = 2, Finalized = true)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(one, tree.Preference()) require.False(tree.Finalized()) @@ -297,13 +298,13 @@ func TestSnowballTransitiveReset(t *testing.T) { tree.Add(eight) { - expected := "SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 1)\n" + - " SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 1\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 3)\n" + - " SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 3\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + expected := `SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -313,13 +314,13 @@ func TestSnowballTransitiveReset(t *testing.T) { require.True(tree.RecordPoll(zeroBag)) { - expected := "SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [0, 1)\n" + - " SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3)\n" + - " SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + expected := `SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -329,13 +330,13 @@ func TestSnowballTransitiveReset(t *testing.T) { require.False(tree.RecordPoll(emptyBag)) { - expected := "SB(PreferenceStrength = 1, SF(Confidence = 0, Finalized = false)) Bits = [0, 1)\n" + - " SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3)\n" + - " SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + expected := `SB(PreferenceStrength = 1, SF(Confidence = 0, Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -344,13 +345,13 @@ func TestSnowballTransitiveReset(t *testing.T) { require.True(tree.RecordPoll(zeroBag)) { - expected := "SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [0, 1)\n" + - " SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n" + - " SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [2, 3)\n" + - " SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3\n" + - " SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [4, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + expected := `SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -490,11 +491,11 @@ func TestSnowballAddRejected(t *testing.T) { require.True(tree.RecordPoll(c0010Bag)) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0010, tree.Preference()) require.False(tree.Finalized()) @@ -503,11 +504,11 @@ func TestSnowballAddRejected(t *testing.T) { tree.Add(c0101) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0010, tree.Preference()) require.False(tree.Finalized()) @@ -539,11 +540,11 @@ func TestSnowballResetChild(t *testing.T) { require.True(tree.RecordPoll(c0000Bag)) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -553,11 +554,11 @@ func TestSnowballResetChild(t *testing.T) { require.False(tree.RecordPoll(emptyBag)) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -566,11 +567,11 @@ func TestSnowballResetChild(t *testing.T) { require.True(tree.RecordPoll(c0000Bag)) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n" + - " SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = true)) Bits = [2, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -602,11 +603,11 @@ func TestSnowballResetSibling(t *testing.T) { require.True(tree.RecordPoll(c0100Bag)) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0100, tree.Preference()) require.False(tree.Finalized()) @@ -616,11 +617,11 @@ func TestSnowballResetSibling(t *testing.T) { require.True(tree.RecordPoll(c1000Bag)) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0\n" + - " SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256)\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0100, tree.Preference()) require.False(tree.Finalized()) @@ -629,11 +630,11 @@ func TestSnowballResetSibling(t *testing.T) { require.True(tree.RecordPoll(c0100Bag)) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = true)) Bits = [2, 256)\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0100, tree.Preference()) require.False(tree.Finalized()) @@ -700,9 +701,9 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c1100) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -711,11 +712,11 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c1000) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + - " SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -724,14 +725,14 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c0010) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2)\n" + - " SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -741,13 +742,13 @@ func TestSnowballFineGrained(t *testing.T) { require.True(tree.RecordPoll(c0000Bag)) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -757,9 +758,9 @@ func TestSnowballFineGrained(t *testing.T) { require.True(tree.RecordPoll(c0010Bag)) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -796,22 +797,23 @@ func TestSnowballDoubleAdd(t *testing.T) { func TestSnowballConsistent(t *testing.T) { require := require.New(t) - numColors := 50 - numNodes := 100 - params := Parameters{ - K: 20, - AlphaPreference: 15, - AlphaConfidence: 15, - BetaVirtuous: 20, - BetaRogue: 30, - } - seed := int64(0) - - sampler.Seed(seed) - - n := Network{} - n.Initialize(params, numColors) - + var ( + numColors = 50 + numNodes = 100 + params = Parameters{ + K: 20, + AlphaPreference: 15, + AlphaConfidence: 15, + BetaVirtuous: 20, + BetaRogue: 30, + } + seed uint64 = 0 + source = prng.NewMT19937() + ) + + n := NewNetwork(params, numColors, source) + + source.Seed(seed) for i := 0; i < numNodes; i++ { n.AddNode(NewTree) } @@ -847,9 +849,9 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { tree.Add(c1000) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -858,12 +860,12 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { tree.Add(c0010) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2)\n" + - " SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -873,11 +875,11 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { require.True(tree.RecordPoll(c0000Bag)) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -886,11 +888,11 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { tree.Add(c0100) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -900,9 +902,9 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { require.True(tree.RecordPoll(c0100Bag)) { - expected := "SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) diff --git a/snow/consensus/snowball/unary_snowball.go b/snow/consensus/snowball/unary_snowball.go index 6223d6f3a0cc..3e4477b4b82a 100644 --- a/snow/consensus/snowball/unary_snowball.go +++ b/snow/consensus/snowball/unary_snowball.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/unary_snowball_test.go b/snow/consensus/snowball/unary_snowball_test.go index 6fd6cd9b40c7..d94d2b61d63d 100644 --- a/snow/consensus/snowball/unary_snowball_test.go +++ b/snow/consensus/snowball/unary_snowball_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/unary_snowflake.go b/snow/consensus/snowball/unary_snowflake.go index 68def8663724..6bcfebe23fe8 100644 --- a/snow/consensus/snowball/unary_snowflake.go +++ b/snow/consensus/snowball/unary_snowflake.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowball/unary_snowflake_test.go b/snow/consensus/snowball/unary_snowflake_test.go index 162f4a56e200..0791b688065e 100644 --- a/snow/consensus/snowball/unary_snowflake_test.go +++ b/snow/consensus/snowball/unary_snowflake_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/snow/consensus/snowman/block.go b/snow/consensus/snowman/block.go index b5d79983ef6a..c950ac3c29ee 100644 --- a/snow/consensus/snowman/block.go +++ b/snow/consensus/snowman/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/snow/consensus/snowman/bootstrapper/majority.go b/snow/consensus/snowman/bootstrapper/majority.go new file mode 100644 index 000000000000..7fe028288656 --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/majority.go @@ -0,0 +1,110 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + + "go.uber.org/zap" + + "golang.org/x/exp/maps" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ Poll = (*Majority)(nil) + +// Majority implements the bootstrapping poll to filter the initial set of +// potentially accaptable blocks into a set of accepted blocks to sync to. +// +// Once the last accepted blocks have been fetched from the initial set of +// peers, the set of blocks are sent to all peers. Each peer is expected to +// filter the provided blocks and report which of them they consider accepted. +// If a majority of the peers report that a block is accepted, then the node +// will consider that block to be accepted by the network. This assumes that a +// majority of the network is correct. If a majority of the network is +// malicious, the node may accept an incorrect block. +type Majority struct { + requests + + log logging.Logger + nodeWeights map[ids.NodeID]uint64 + + // received maps the blockID to the total sum of weight that has reported + // that block as accepted. + received map[ids.ID]uint64 + accepted []ids.ID +} + +func NewMajority( + log logging.Logger, + nodeWeights map[ids.NodeID]uint64, + maxOutstanding int, +) *Majority { + return &Majority{ + requests: requests{ + maxOutstanding: maxOutstanding, + pendingSend: set.Of(maps.Keys(nodeWeights)...), + }, + log: log, + nodeWeights: nodeWeights, + received: make(map[ids.ID]uint64), + } +} + +func (m *Majority) RecordOpinion(_ context.Context, nodeID ids.NodeID, blkIDs set.Set[ids.ID]) error { + if !m.recordResponse(nodeID) { + // The chain router should have already dropped unexpected messages. + m.log.Error("received unexpected opinion", + zap.String("pollType", "majority"), + zap.Stringer("nodeID", nodeID), + zap.Reflect("blkIDs", blkIDs), + ) + return nil + } + + weight := m.nodeWeights[nodeID] + for blkID := range blkIDs { + newWeight, err := math.Add64(m.received[blkID], weight) + if err != nil { + return err + } + m.received[blkID] = newWeight + } + + if !m.finished() { + return nil + } + + var ( + totalWeight uint64 + err error + ) + for _, weight := range m.nodeWeights { + totalWeight, err = math.Add64(totalWeight, weight) + if err != nil { + return err + } + } + + requiredWeight := totalWeight/2 + 1 + for blkID, weight := range m.received { + if weight >= requiredWeight { + m.accepted = append(m.accepted, blkID) + } + } + + m.log.Debug("finalized bootstrapping poll", + zap.String("pollType", "majority"), + zap.Stringers("accepted", m.accepted), + ) + return nil +} + +func (m *Majority) Result(context.Context) ([]ids.ID, bool) { + return m.accepted, m.finished() +} diff --git a/snow/consensus/snowman/bootstrapper/majority_test.go b/snow/consensus/snowman/bootstrapper/majority_test.go new file mode 100644 index 000000000000..819840f28311 --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/majority_test.go @@ -0,0 +1,396 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + + safemath "github.com/ava-labs/avalanchego/utils/math" +) + +func TestNewMajority(t *testing.T) { + majority := NewMajority( + logging.NoLog{}, // log + map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, // nodeWeights + 2, // maxOutstanding + ) + + expectedMajority := &Majority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0, nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + } + require.Equal(t, expectedMajority, majority) +} + +func TestMajorityGetPeers(t *testing.T) { + tests := []struct { + name string + majority Poll + expectedState Poll + expectedPeers set.Set[ids.NodeID] + }{ + { + name: "max outstanding", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedPeers: nil, + }, + { + name: "send until max outstanding", + majority: &Majority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0, nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Set[ids.NodeID]{}, + outstanding: set.Of(nodeID0, nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedPeers: set.Of(nodeID0, nodeID1), + }, + { + name: "send until no more to send", + majority: &Majority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Set[ids.NodeID]{}, + outstanding: set.Of(nodeID0), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedPeers: set.Of(nodeID0), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + peers := test.majority.GetPeers(context.Background()) + require.Equal(test.expectedState, test.majority) + require.Equal(test.expectedPeers, peers) + }) + } +} + +func TestMajorityRecordOpinion(t *testing.T) { + tests := []struct { + name string + majority Poll + nodeID ids.NodeID + blkIDs set.Set[ids.ID] + expectedState Poll + expectedErr error + }{ + { + name: "unexpected response", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + nodeID: nodeID0, + blkIDs: nil, + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedErr: nil, + }, + { + name: "unfinished after response", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 2, + nodeID1: 3, + }, + received: make(map[ids.ID]uint64), + }, + nodeID: nodeID1, + blkIDs: set.Of(blkID0), + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 2, + nodeID1: 3, + }, + received: map[ids.ID]uint64{ + blkID0: 3, + }, + }, + expectedErr: nil, + }, + { + name: "overflow during response", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: math.MaxUint64, + }, + received: map[ids.ID]uint64{ + blkID0: 1, + }, + }, + nodeID: nodeID1, + blkIDs: set.Of(blkID0), + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: math.MaxUint64, + }, + received: map[ids.ID]uint64{ + blkID0: 1, + }, + }, + expectedErr: safemath.ErrOverflow, + }, + { + name: "overflow during final response", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: math.MaxUint64, + }, + received: make(map[ids.ID]uint64), + }, + nodeID: nodeID1, + blkIDs: set.Of(blkID0), + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: math.MaxUint64, + }, + received: map[ids.ID]uint64{ + blkID0: math.MaxUint64, + }, + }, + expectedErr: safemath.ErrOverflow, + }, + { + name: "finished after response", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID2), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + nodeID2: 1, + }, + received: map[ids.ID]uint64{ + blkID0: 1, + blkID1: 1, + }, + }, + nodeID: nodeID2, + blkIDs: set.Of(blkID1), + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + nodeID2: 1, + }, + received: map[ids.ID]uint64{ + blkID0: 1, + blkID1: 2, + }, + accepted: []ids.ID{blkID1}, + }, + expectedErr: nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + err := test.majority.RecordOpinion(context.Background(), test.nodeID, test.blkIDs) + require.Equal(test.expectedState, test.majority) + require.ErrorIs(err, test.expectedErr) + }) + } +} + +func TestMajorityResult(t *testing.T) { + tests := []struct { + name string + majority Poll + expectedAccepted []ids.ID + expectedFinalized bool + }{ + { + name: "not finalized", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + accepted: nil, + }, + expectedAccepted: nil, + expectedFinalized: false, + }, + { + name: "finalized", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: map[ids.ID]uint64{ + blkID0: 2, + }, + accepted: []ids.ID{blkID0}, + }, + expectedAccepted: []ids.ID{blkID0}, + expectedFinalized: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + accepted, finalized := test.majority.Result(context.Background()) + require.Equal(test.expectedAccepted, accepted) + require.Equal(test.expectedFinalized, finalized) + }) + } +} diff --git a/snow/consensus/snowman/bootstrapper/minority.go b/snow/consensus/snowman/bootstrapper/minority.go new file mode 100644 index 000000000000..4674921aaf6b --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/minority.go @@ -0,0 +1,77 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ Poll = (*Minority)(nil) + +// Minority implements the bootstrapping poll to determine the initial set of +// potentially accaptable blocks. +// +// This poll fetches the last accepted block from an initial set of peers. In +// order for the protocol to find a recently accepted block, there must be at +// least one correct node in this set of peers. If there is not a correct node +// in the set of peers, the node will not accept an incorrect block. However, +// the node may be unable to find an acceptable block. +type Minority struct { + requests + + log logging.Logger + + receivedSet set.Set[ids.ID] + received []ids.ID +} + +func NewMinority( + log logging.Logger, + frontierNodes set.Set[ids.NodeID], + maxOutstanding int, +) *Minority { + return &Minority{ + requests: requests{ + maxOutstanding: maxOutstanding, + pendingSend: frontierNodes, + }, + log: log, + } +} + +func (m *Minority) RecordOpinion(_ context.Context, nodeID ids.NodeID, blkIDs set.Set[ids.ID]) error { + if !m.recordResponse(nodeID) { + // The chain router should have already dropped unexpected messages. + m.log.Error("received unexpected opinion", + zap.String("pollType", "minority"), + zap.Stringer("nodeID", nodeID), + zap.Reflect("blkIDs", blkIDs), + ) + return nil + } + + m.receivedSet.Union(blkIDs) + + if !m.finished() { + return nil + } + + m.received = m.receivedSet.List() + + m.log.Debug("finalized bootstrapping poll", + zap.String("pollType", "minority"), + zap.Stringers("frontier", m.received), + ) + return nil +} + +func (m *Minority) Result(context.Context) ([]ids.ID, bool) { + return m.received, m.finished() +} diff --git a/snow/consensus/snowman/bootstrapper/minority_test.go b/snow/consensus/snowman/bootstrapper/minority_test.go new file mode 100644 index 000000000000..c44b314f3443 --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/minority_test.go @@ -0,0 +1,242 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" +) + +func TestNewMinority(t *testing.T) { + minority := NewMinority( + logging.NoLog{}, // log + set.Of(nodeID0), // frontierNodes + 2, // maxOutstanding + ) + + expectedMinority := &Minority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0), + }, + log: logging.NoLog{}, + } + require.Equal(t, expectedMinority, minority) +} + +func TestMinorityGetPeers(t *testing.T) { + tests := []struct { + name string + minority Poll + expectedState Poll + expectedPeers set.Set[ids.NodeID] + }{ + { + name: "max outstanding", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + }, + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + }, + expectedPeers: nil, + }, + { + name: "send until max outstanding", + minority: &Minority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0, nodeID1), + }, + log: logging.NoLog{}, + }, + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Set[ids.NodeID]{}, + outstanding: set.Of(nodeID0, nodeID1), + }, + log: logging.NoLog{}, + }, + expectedPeers: set.Of(nodeID0, nodeID1), + }, + { + name: "send until no more to send", + minority: &Minority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0), + }, + log: logging.NoLog{}, + }, + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Set[ids.NodeID]{}, + outstanding: set.Of(nodeID0), + }, + log: logging.NoLog{}, + }, + expectedPeers: set.Of(nodeID0), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + peers := test.minority.GetPeers(context.Background()) + require.Equal(test.expectedState, test.minority) + require.Equal(test.expectedPeers, peers) + }) + } +} + +func TestMinorityRecordOpinion(t *testing.T) { + tests := []struct { + name string + minority Poll + nodeID ids.NodeID + blkIDs set.Set[ids.ID] + expectedState Poll + expectedErr error + }{ + { + name: "unexpected response", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + }, + nodeID: nodeID0, + blkIDs: nil, + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + }, + expectedErr: nil, + }, + { + name: "unfinished after response", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + }, + nodeID: nodeID1, + blkIDs: set.Of(blkID0), + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + receivedSet: set.Of(blkID0), + }, + expectedErr: nil, + }, + { + name: "finished after response", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID2), + }, + log: logging.NoLog{}, + }, + nodeID: nodeID2, + blkIDs: set.Of(blkID1), + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + receivedSet: set.Of(blkID1), + received: []ids.ID{blkID1}, + }, + expectedErr: nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + err := test.minority.RecordOpinion(context.Background(), test.nodeID, test.blkIDs) + require.Equal(test.expectedState, test.minority) + require.ErrorIs(err, test.expectedErr) + }) + } +} + +func TestMinorityResult(t *testing.T) { + tests := []struct { + name string + minority Poll + expectedAccepted []ids.ID + expectedFinalized bool + }{ + { + name: "not finalized", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + received: nil, + }, + expectedAccepted: nil, + expectedFinalized: false, + }, + { + name: "finalized", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + }, + log: logging.NoLog{}, + receivedSet: set.Of(blkID0), + received: []ids.ID{blkID0}, + }, + expectedAccepted: []ids.ID{blkID0}, + expectedFinalized: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + accepted, finalized := test.minority.Result(context.Background()) + require.Equal(test.expectedAccepted, accepted) + require.Equal(test.expectedFinalized, finalized) + }) + } +} diff --git a/snow/consensus/snowman/bootstrapper/noop.go b/snow/consensus/snowman/bootstrapper/noop.go new file mode 100644 index 000000000000..6d97eed069a8 --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/noop.go @@ -0,0 +1,27 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +var Noop Poll = noop{} + +type noop struct{} + +func (noop) GetPeers(context.Context) set.Set[ids.NodeID] { + return nil +} + +func (noop) RecordOpinion(context.Context, ids.NodeID, set.Set[ids.ID]) error { + return nil +} + +func (noop) Result(context.Context) ([]ids.ID, bool) { + return nil, false +} diff --git a/snow/consensus/snowman/bootstrapper/noop_test.go b/snow/consensus/snowman/bootstrapper/noop_test.go new file mode 100644 index 000000000000..e0bccb8aad7f --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/noop_test.go @@ -0,0 +1,23 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNoop(t *testing.T) { + require := require.New(t) + + require.Empty(Noop.GetPeers(context.Background())) + + require.NoError(Noop.RecordOpinion(context.Background(), nodeID0, nil)) + + blkIDs, finalized := Noop.Result(context.Background()) + require.Empty(blkIDs) + require.False(finalized) +} diff --git a/snow/consensus/snowman/bootstrapper/poll.go b/snow/consensus/snowman/bootstrapper/poll.go new file mode 100644 index 000000000000..0d3eb7143167 --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/poll.go @@ -0,0 +1,23 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +type Poll interface { + // GetPeers returns the set of peers whose opinion should be requested. It + // is expected to repeatedly call this function along with [RecordOpinion] + // until [Result] returns finalized. + GetPeers(ctx context.Context) (peers set.Set[ids.NodeID]) + // RecordOpinion of a node whose opinion was requested. + RecordOpinion(ctx context.Context, nodeID ids.NodeID, blkIDs set.Set[ids.ID]) error + // Result returns the evaluation of all the peer's opinions along with a + // flag to identify that the result has finished being calculated. + Result(ctx context.Context) (blkIDs []ids.ID, finalized bool) +} diff --git a/snow/consensus/snowman/bootstrapper/poll_test.go b/snow/consensus/snowman/bootstrapper/poll_test.go new file mode 100644 index 000000000000..bbdcc0db51a4 --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/poll_test.go @@ -0,0 +1,15 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import "github.com/ava-labs/avalanchego/ids" + +var ( + nodeID0 = ids.GenerateTestNodeID() + nodeID1 = ids.GenerateTestNodeID() + nodeID2 = ids.GenerateTestNodeID() + + blkID0 = ids.GenerateTestID() + blkID1 = ids.GenerateTestID() +) diff --git a/snow/consensus/snowman/bootstrapper/requests.go b/snow/consensus/snowman/bootstrapper/requests.go new file mode 100644 index 000000000000..ebeaf57ac70f --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/requests.go @@ -0,0 +1,48 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" +) + +type requests struct { + maxOutstanding int + + pendingSend set.Set[ids.NodeID] + outstanding set.Set[ids.NodeID] +} + +func (r *requests) GetPeers(context.Context) set.Set[ids.NodeID] { + numPending := r.outstanding.Len() + if numPending >= r.maxOutstanding { + return nil + } + + numToSend := math.Min( + r.maxOutstanding-numPending, + r.pendingSend.Len(), + ) + nodeIDs := set.NewSet[ids.NodeID](numToSend) + for i := 0; i < numToSend; i++ { + nodeID, _ := r.pendingSend.Pop() + nodeIDs.Add(nodeID) + } + r.outstanding.Union(nodeIDs) + return nodeIDs +} + +func (r *requests) recordResponse(nodeID ids.NodeID) bool { + wasOutstanding := r.outstanding.Contains(nodeID) + r.outstanding.Remove(nodeID) + return wasOutstanding +} + +func (r *requests) finished() bool { + return r.pendingSend.Len() == 0 && r.outstanding.Len() == 0 +} diff --git a/snow/consensus/snowman/bootstrapper/sampler.go b/snow/consensus/snowman/bootstrapper/sampler.go new file mode 100644 index 000000000000..e23253864669 --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/sampler.go @@ -0,0 +1,49 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/sampler" + "github.com/ava-labs/avalanchego/utils/set" +) + +// Sample keys from [elements] uniformly by weight without replacement. The +// returned set will have size less than or equal to [maxSize]. This function +// will error if the sum of all weights overflows. +func Sample[T comparable](elements map[T]uint64, maxSize int) (set.Set[T], error) { + var ( + keys = make([]T, len(elements)) + weights = make([]uint64, len(elements)) + totalWeight uint64 + err error + ) + i := 0 + for key, weight := range elements { + keys[i] = key + weights[i] = weight + totalWeight, err = math.Add64(totalWeight, weight) + if err != nil { + return nil, err + } + i++ + } + + sampler := sampler.NewWeightedWithoutReplacement() + if err := sampler.Initialize(weights); err != nil { + return nil, err + } + + maxSize = int(math.Min(uint64(maxSize), totalWeight)) + indices, err := sampler.Sample(maxSize) + if err != nil { + return nil, err + } + + sampledElements := set.NewSet[T](maxSize) + for _, index := range indices { + sampledElements.Add(keys[index]) + } + return sampledElements, nil +} diff --git a/snow/consensus/snowman/bootstrapper/sampler_test.go b/snow/consensus/snowman/bootstrapper/sampler_test.go new file mode 100644 index 000000000000..b438a5fb2629 --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/sampler_test.go @@ -0,0 +1,75 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" + + safemath "github.com/ava-labs/avalanchego/utils/math" +) + +func TestSample(t *testing.T) { + tests := []struct { + name string + elements map[ids.NodeID]uint64 + maxSize int + expectedSampled set.Set[ids.NodeID] + expectedErr error + }{ + { + name: "sample everything", + elements: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + maxSize: 2, + expectedSampled: set.Of(nodeID0, nodeID1), + expectedErr: nil, + }, + { + name: "limit sample due to too few elements", + elements: map[ids.NodeID]uint64{ + nodeID0: 1, + }, + maxSize: 2, + expectedSampled: set.Of(nodeID0), + expectedErr: nil, + }, + { + name: "limit sample", + elements: map[ids.NodeID]uint64{ + nodeID0: math.MaxUint64 - 1, + nodeID1: 1, + }, + maxSize: 1, + expectedSampled: set.Of(nodeID0), + expectedErr: nil, + }, + { + name: "overflow", + elements: map[ids.NodeID]uint64{ + nodeID0: math.MaxUint64, + nodeID1: 1, + }, + maxSize: 1, + expectedSampled: nil, + expectedErr: safemath.ErrOverflow, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + sampled, err := Sample(test.elements, test.maxSize) + require.ErrorIs(err, test.expectedErr) + require.Equal(test.expectedSampled, sampled) + }) + } +} diff --git a/snow/consensus/snowman/consensus.go b/snow/consensus/snowman/consensus.go index 25b2c7242ec1..3f1006416366 100644 --- a/snow/consensus/snowman/consensus.go +++ b/snow/consensus/snowman/consensus.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/snow/consensus/snowman/consensus_test.go b/snow/consensus/snowman/consensus_test.go index 401435738b18..15e56709dd28 100644 --- a/snow/consensus/snowman/consensus_test.go +++ b/snow/consensus/snowman/consensus_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -17,12 +17,13 @@ import ( "github.com/stretchr/testify/require" + "gonum.org/v1/gonum/mathext/prng" + "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/sampler" ) type testFunc func(*testing.T, Factory) @@ -93,7 +94,8 @@ func InitializeTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -118,7 +120,8 @@ func NumProcessingTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -160,7 +163,8 @@ func AddToTailTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -199,7 +203,8 @@ func AddToNonTailTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -247,7 +252,8 @@ func AddToUnknownTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -287,7 +293,8 @@ func StatusOrProcessingPreviouslyAcceptedTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -316,7 +323,8 @@ func StatusOrProcessingPreviouslyRejectedTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -353,7 +361,8 @@ func StatusOrProcessingUnissuedTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -390,7 +399,8 @@ func StatusOrProcessingIssuedTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -429,7 +439,8 @@ func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -471,7 +482,8 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -524,7 +536,8 @@ func RecordPollSplitVoteNoChangeTest(t *testing.T, factory Factory) { require := require.New(t) sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) registerer := prometheus.NewRegistry() ctx.Registerer = registerer @@ -587,7 +600,8 @@ func RecordPollWhenFinalizedTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -612,7 +626,8 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -682,7 +697,8 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -774,7 +790,8 @@ func RecordPollInvalidVoteTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -815,7 +832,8 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 3, AlphaPreference: 3, @@ -925,7 +943,8 @@ func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { sm := factory.New() require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -1028,7 +1047,8 @@ func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Fact sm := factory.New() require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -1132,7 +1152,8 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -1239,7 +1260,8 @@ func LastAcceptedTest(t *testing.T, factory Factory) { sm := factory.New() require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -1323,7 +1345,8 @@ func MetricsProcessingErrorTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -1352,7 +1375,8 @@ func MetricsAcceptedErrorTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -1381,7 +1405,8 @@ func MetricsRejectedErrorTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -1410,7 +1435,8 @@ func ErrorOnInitialRejectionTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -1449,7 +1475,8 @@ func ErrorOnAcceptTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -1486,7 +1513,8 @@ func ErrorOnRejectSiblingTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -1532,7 +1560,8 @@ func ErrorOnTransitiveRejectionTest(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -1585,28 +1614,30 @@ func ErrorOnTransitiveRejectionTest(t *testing.T, factory Factory) { func RandomizedConsistencyTest(t *testing.T, factory Factory) { require := require.New(t) - numColors := 50 - numNodes := 100 - params := snowball.Parameters{ - K: 20, - AlphaPreference: 15, - AlphaConfidence: 15, - BetaVirtuous: 20, - BetaRogue: 30, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - seed := int64(0) + var ( + numColors = 50 + numNodes = 100 + params = snowball.Parameters{ + K: 20, + AlphaPreference: 15, + AlphaConfidence: 15, + BetaVirtuous: 20, + BetaRogue: 30, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + } + seed uint64 = 0 + source = prng.NewMT19937() + ) - sampler.Seed(seed) + source.Seed(seed) - n := Network{} - n.Initialize(params, numColors) + n := NewNetwork(params, numColors, source) for i := 0; i < numNodes; i++ { - require.NoError(n.AddNode(factory.New())) + require.NoError(n.AddNode(t, factory.New())) } for !n.Finalized() { @@ -1620,7 +1651,8 @@ func ErrorOnAddDecidedBlockTest(t *testing.T, factory Factory) { sm := factory.New() require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -1650,7 +1682,8 @@ func ErrorOnAddDuplicateBlockIDTest(t *testing.T, factory Factory) { sm := factory.New() require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -1714,7 +1747,8 @@ func RecordPollWithDefaultParameters(t *testing.T, factory Factory) { sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.DefaultParameters require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) diff --git a/snow/consensus/snowman/factory.go b/snow/consensus/snowman/factory.go index 06341981aef4..c2fc76e83ef9 100644 --- a/snow/consensus/snowman/factory.go +++ b/snow/consensus/snowman/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/snow/consensus/snowman/metrics.go b/snow/consensus/snowman/metrics.go new file mode 100644 index 000000000000..a052db5144d4 --- /dev/null +++ b/snow/consensus/snowman/metrics.go @@ -0,0 +1,272 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/metric" + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +type processingStart struct { + time time.Time + pollNumber uint64 +} + +type metrics struct { + log logging.Logger + + currentMaxVerifiedHeight uint64 + maxVerifiedHeight prometheus.Gauge + + lastAcceptedHeight prometheus.Gauge + lastAcceptedTimestamp prometheus.Gauge + + // processingBlocks keeps track of the [processingStart] that each block was + // issued into the consensus instance. This is used to calculate the amount + // of time to accept or reject the block. + processingBlocks linkedhashmap.LinkedHashmap[ids.ID, processingStart] + + // numProcessing keeps track of the number of processing blocks + numProcessing prometheus.Gauge + + blockSizeAcceptedSum prometheus.Gauge + // pollsAccepted tracks the number of polls that a block was in processing + // for before being accepted + pollsAccepted metric.Averager + // latAccepted tracks the number of nanoseconds that a block was processing + // before being accepted + latAccepted metric.Averager + buildLatencyAccepted prometheus.Gauge + + blockSizeRejectedSum prometheus.Gauge + // pollsRejected tracks the number of polls that a block was in processing + // for before being rejected + pollsRejected metric.Averager + // latRejected tracks the number of nanoseconds that a block was processing + // before being rejected + latRejected metric.Averager + + // numFailedPolls keeps track of the number of polls that failed + numFailedPolls prometheus.Counter + + // numSuccessfulPolls keeps track of the number of polls that succeeded + numSuccessfulPolls prometheus.Counter +} + +func newMetrics( + log logging.Logger, + namespace string, + reg prometheus.Registerer, + lastAcceptedHeight uint64, + lastAcceptedTime time.Time, +) (*metrics, error) { + errs := wrappers.Errs{} + m := &metrics{ + log: log, + currentMaxVerifiedHeight: lastAcceptedHeight, + maxVerifiedHeight: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "max_verified_height", + Help: "highest verified height", + }), + lastAcceptedHeight: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "last_accepted_height", + Help: "last height accepted", + }), + lastAcceptedTimestamp: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "last_accepted_timestamp", + Help: "timestamp of the last accepted block in unix seconds", + }), + + processingBlocks: linkedhashmap.New[ids.ID, processingStart](), + + // e.g., + // "avalanche_X_blks_processing" reports how many blocks are currently processing + numProcessing: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "blks_processing", + Help: "number of currently processing blocks", + }), + + blockSizeAcceptedSum: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "blks_accepted_container_size_sum", + Help: "cumulative size of all accepted blocks", + }), + pollsAccepted: metric.NewAveragerWithErrs( + namespace, + "blks_polls_accepted", + "number of polls from the issuance of a block to its acceptance", + reg, + &errs, + ), + // e.g., + // "avalanche_C_blks_accepted_count" reports how many times "Observe" has been called which is the total number of blocks accepted + // "avalanche_C_blks_accepted_sum" reports the cumulative sum of all block acceptance latencies in nanoseconds + // "avalanche_C_blks_accepted_sum / avalanche_C_blks_accepted_count" is the average block acceptance latency in nanoseconds + // "avalanche_C_blks_accepted_container_size_sum" reports the cumulative sum of all accepted blocks' sizes in bytes + // "avalanche_C_blks_accepted_container_size_sum / avalanche_C_blks_accepted_count" is the average accepted block size in bytes + latAccepted: metric.NewAveragerWithErrs( + namespace, + "blks_accepted", + "time (in ns) from the issuance of a block to its acceptance", + reg, + &errs, + ), + buildLatencyAccepted: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "blks_build_accept_latency", + Help: "time (in ns) from the timestamp of a block to the time it was accepted", + }), + + blockSizeRejectedSum: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "blks_rejected_container_size_sum", + Help: "cumulative size of all rejected blocks", + }), + pollsRejected: metric.NewAveragerWithErrs( + namespace, + "blks_polls_rejected", + "number of polls from the issuance of a block to its rejection", + reg, + &errs, + ), + // e.g., + // "avalanche_P_blks_rejected_count" reports how many times "Observe" has been called which is the total number of blocks rejected + // "avalanche_P_blks_rejected_sum" reports the cumulative sum of all block rejection latencies in nanoseconds + // "avalanche_P_blks_rejected_sum / avalanche_P_blks_rejected_count" is the average block rejection latency in nanoseconds + // "avalanche_P_blks_rejected_container_size_sum" reports the cumulative sum of all rejected blocks' sizes in bytes + // "avalanche_P_blks_rejected_container_size_sum / avalanche_P_blks_rejected_count" is the average rejected block size in bytes + latRejected: metric.NewAveragerWithErrs( + namespace, + "blks_rejected", + "time (in ns) from the issuance of a block to its rejection", + reg, + &errs, + ), + + numSuccessfulPolls: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "polls_successful", + Help: "number of successful polls", + }), + numFailedPolls: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "polls_failed", + Help: "number of failed polls", + }), + } + + // Initially set the metrics for the last accepted block. + m.maxVerifiedHeight.Set(float64(lastAcceptedHeight)) + m.lastAcceptedHeight.Set(float64(lastAcceptedHeight)) + m.lastAcceptedTimestamp.Set(float64(lastAcceptedTime.Unix())) + + errs.Add( + reg.Register(m.maxVerifiedHeight), + reg.Register(m.lastAcceptedHeight), + reg.Register(m.lastAcceptedTimestamp), + reg.Register(m.numProcessing), + reg.Register(m.blockSizeAcceptedSum), + reg.Register(m.buildLatencyAccepted), + reg.Register(m.blockSizeRejectedSum), + reg.Register(m.numSuccessfulPolls), + reg.Register(m.numFailedPolls), + ) + return m, errs.Err +} + +func (m *metrics) Issued(blkID ids.ID, pollNumber uint64) { + m.processingBlocks.Put(blkID, processingStart{ + time: time.Now(), + pollNumber: pollNumber, + }) + m.numProcessing.Inc() +} + +func (m *metrics) Verified(height uint64) { + m.currentMaxVerifiedHeight = math.Max(m.currentMaxVerifiedHeight, height) + m.maxVerifiedHeight.Set(float64(m.currentMaxVerifiedHeight)) +} + +func (m *metrics) Accepted( + blkID ids.ID, + height uint64, + timestamp time.Time, + pollNumber uint64, + blockSize int, +) { + start, ok := m.processingBlocks.Get(blkID) + if !ok { + m.log.Error("unable to measure latency", + zap.Stringer("blkID", blkID), + zap.Stringer("status", choices.Accepted), + ) + return + } + m.lastAcceptedHeight.Set(float64(height)) + m.lastAcceptedTimestamp.Set(float64(timestamp.Unix())) + m.processingBlocks.Delete(blkID) + m.numProcessing.Dec() + + m.blockSizeAcceptedSum.Add(float64(blockSize)) + + m.pollsAccepted.Observe(float64(pollNumber - start.pollNumber)) + + now := time.Now() + processingDuration := now.Sub(start.time) + m.latAccepted.Observe(float64(processingDuration)) + + builtDuration := now.Sub(timestamp) + m.buildLatencyAccepted.Add(float64(builtDuration)) +} + +func (m *metrics) Rejected(blkID ids.ID, pollNumber uint64, blockSize int) { + start, ok := m.processingBlocks.Get(blkID) + if !ok { + m.log.Error("unable to measure latency", + zap.Stringer("blkID", blkID), + zap.Stringer("status", choices.Rejected), + ) + return + } + m.processingBlocks.Delete(blkID) + m.numProcessing.Dec() + + m.blockSizeRejectedSum.Add(float64(blockSize)) + + m.pollsRejected.Observe(float64(pollNumber - start.pollNumber)) + + duration := time.Since(start.time) + m.latRejected.Observe(float64(duration)) +} + +func (m *metrics) MeasureAndGetOldestDuration() time.Duration { + _, oldestOp, exists := m.processingBlocks.Oldest() + if !exists { + return 0 + } + return time.Since(oldestOp.time) +} + +func (m *metrics) SuccessfulPoll() { + m.numSuccessfulPolls.Inc() +} + +func (m *metrics) FailedPoll() { + m.numFailedPolls.Inc() +} diff --git a/snow/consensus/snowman/mock_block.go b/snow/consensus/snowman/mock_block.go index f5b7422190e5..45393bfe7bdb 100644 --- a/snow/consensus/snowman/mock_block.go +++ b/snow/consensus/snowman/mock_block.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/consensus/snowman (interfaces: Block) +// +// Generated by this command: +// +// mockgen -package=snowman -destination=snow/consensus/snowman/mock_block.go github.com/ava-labs/avalanchego/snow/consensus/snowman Block +// // Package snowman is a generated GoMock package. package snowman @@ -49,7 +51,7 @@ func (m *MockBlock) Accept(arg0 context.Context) error { } // Accept indicates an expected call of Accept. -func (mr *MockBlockMockRecorder) Accept(arg0 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) Accept(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockBlock)(nil).Accept), arg0) } @@ -119,7 +121,7 @@ func (m *MockBlock) Reject(arg0 context.Context) error { } // Reject indicates an expected call of Reject. -func (mr *MockBlockMockRecorder) Reject(arg0 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) Reject(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reject", reflect.TypeOf((*MockBlock)(nil).Reject), arg0) } @@ -161,7 +163,7 @@ func (m *MockBlock) Verify(arg0 context.Context) error { } // Verify indicates an expected call of Verify. -func (mr *MockBlockMockRecorder) Verify(arg0 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) Verify(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockBlock)(nil).Verify), arg0) } diff --git a/snow/consensus/snowman/network_test.go b/snow/consensus/snowman/network_test.go index ae855ab84ac6..aead346fb5e4 100644 --- a/snow/consensus/snowman/network_test.go +++ b/snow/consensus/snowman/network_test.go @@ -1,16 +1,16 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( "context" - "math/rand" + "testing" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/sampler" @@ -19,49 +19,57 @@ import ( type Network struct { params snowball.Parameters colors []*TestBlock + rngSource sampler.Source nodes, running []Consensus } -func (n *Network) shuffleColors() { - s := sampler.NewUniform() - s.Initialize(uint64(len(n.colors))) - indices, _ := s.Sample(len(n.colors)) - colors := []*TestBlock(nil) - for _, index := range indices { - colors = append(colors, n.colors[int(index)]) +func NewNetwork(params snowball.Parameters, numColors int, rngSource sampler.Source) *Network { + n := &Network{ + params: params, + colors: []*TestBlock{{ + TestDecidable: choices.TestDecidable{ + IDV: ids.Empty.Prefix(rngSource.Uint64()), + StatusV: choices.Processing, + }, + ParentV: Genesis.IDV, + HeightV: 1, + }}, + rngSource: rngSource, } - n.colors = colors - utils.Sort(n.colors) -} - -func (n *Network) Initialize(params snowball.Parameters, numColors int) { - n.params = params - // #nosec G404 - n.colors = append(n.colors, &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(uint64(rand.Int63())), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: 1, - }) + s := sampler.NewDeterministicUniform(n.rngSource) for i := 1; i < numColors; i++ { - dependency := n.colors[rand.Intn(len(n.colors))] // #nosec G404 - // #nosec G404 + s.Initialize(uint64(len(n.colors))) + dependencyInd, _ := s.Next() + dependency := n.colors[dependencyInd] n.colors = append(n.colors, &TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(uint64(rand.Int63())), + IDV: ids.Empty.Prefix(rngSource.Uint64()), StatusV: choices.Processing, }, ParentV: dependency.IDV, HeightV: dependency.HeightV + 1, }) } + return n } -func (n *Network) AddNode(sm Consensus) error { - if err := sm.Initialize(snow.DefaultConsensusContextTest(), n.params, Genesis.ID(), Genesis.Height(), Genesis.Timestamp()); err != nil { +func (n *Network) shuffleColors() { + s := sampler.NewDeterministicUniform(n.rngSource) + s.Initialize(uint64(len(n.colors))) + indices, _ := s.Sample(len(n.colors)) + colors := []*TestBlock(nil) + for _, index := range indices { + colors = append(colors, n.colors[int(index)]) + } + n.colors = colors + utils.Sort(n.colors) +} + +func (n *Network) AddNode(t testing.TB, sm Consensus) error { + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + if err := sm.Initialize(ctx, n.params, Genesis.ID(), Genesis.Height(), Genesis.Timestamp()); err != nil { return err } @@ -101,10 +109,12 @@ func (n *Network) Round() error { return nil } - runningInd := rand.Intn(len(n.running)) // #nosec G404 + s := sampler.NewDeterministicUniform(n.rngSource) + s.Initialize(uint64(len(n.running))) + + runningInd, _ := s.Next() running := n.running[runningInd] - s := sampler.NewUniform() s.Initialize(uint64(len(n.nodes))) indices, _ := s.Sample(n.params.K) sampledColors := bag.Bag[ids.ID]{} diff --git a/snow/consensus/snowman/oracle_block.go b/snow/consensus/snowman/oracle_block.go index 0d8bd2be94dc..2ca81680ae78 100644 --- a/snow/consensus/snowman/oracle_block.go +++ b/snow/consensus/snowman/oracle_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -16,10 +16,7 @@ var ErrNotOracle = errors.New("block isn't an oracle") // This ordering does not need to be deterministically created from the chain // state. type OracleBlock interface { - Block - // Options returns the possible children of this block in the order this // validator prefers the blocks. - // Options is guaranteed to only be called on a verified block. Options(context.Context) ([2]Block, error) } diff --git a/snow/consensus/snowman/poll/early_term_no_traversal.go b/snow/consensus/snowman/poll/early_term_no_traversal.go index fcad5b71932b..460805ab7820 100644 --- a/snow/consensus/snowman/poll/early_term_no_traversal.go +++ b/snow/consensus/snowman/poll/early_term_no_traversal.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll diff --git a/snow/consensus/snowman/poll/early_term_no_traversal_test.go b/snow/consensus/snowman/poll/early_term_no_traversal_test.go index 8255818abdbb..9d215c246eec 100644 --- a/snow/consensus/snowman/poll/early_term_no_traversal_test.go +++ b/snow/consensus/snowman/poll/early_term_no_traversal_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll diff --git a/snow/consensus/snowman/poll/interfaces.go b/snow/consensus/snowman/poll/interfaces.go index cab31cfc54ce..c1a776b4dc5f 100644 --- a/snow/consensus/snowman/poll/interfaces.go +++ b/snow/consensus/snowman/poll/interfaces.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll diff --git a/snow/consensus/snowman/poll/set.go b/snow/consensus/snowman/poll/set.go index e31821476bc8..4c085b7aa4bc 100644 --- a/snow/consensus/snowman/poll/set.go +++ b/snow/consensus/snowman/poll/set.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll import ( + "errors" "fmt" "strings" "time" @@ -19,6 +20,11 @@ import ( "github.com/ava-labs/avalanchego/utils/metric" ) +var ( + errFailedPollsMetric = errors.New("failed to register polls metric") + errFailedPollDurationMetrics = errors.New("failed to register poll_duration metrics") +) + type pollHolder interface { GetPoll() Poll StartTime() time.Time @@ -52,16 +58,14 @@ func NewSet( log logging.Logger, namespace string, reg prometheus.Registerer, -) Set { +) (Set, error) { numPolls := prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "polls", Help: "Number of pending network polls", }) if err := reg.Register(numPolls); err != nil { - log.Error("failed to register polls statistics", - zap.Error(err), - ) + return nil, fmt.Errorf("%w: %w", errFailedPollsMetric, err) } durPolls, err := metric.NewAverager( @@ -71,9 +75,7 @@ func NewSet( reg, ) if err != nil { - log.Error("failed to register poll_duration statistics", - zap.Error(err), - ) + return nil, fmt.Errorf("%w: %w", errFailedPollDurationMetrics, err) } return &set{ @@ -82,7 +84,7 @@ func NewSet( durPolls: durPolls, factory: factory, polls: linkedhashmap.New[uint32, pollHolder](), - } + }, nil } // Add to the current set of polls diff --git a/snow/consensus/snowman/poll/set_test.go b/snow/consensus/snowman/poll/set_test.go index 8200f25dc5f0..cdcf0e7d8903 100644 --- a/snow/consensus/snowman/poll/set_test.go +++ b/snow/consensus/snowman/poll/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -21,14 +21,14 @@ var ( blkID3 = ids.ID{3} blkID4 = ids.ID{4} - vdr1 = ids.NodeID{1} - vdr2 = ids.NodeID{2} - vdr3 = ids.NodeID{3} - vdr4 = ids.NodeID{4} - vdr5 = ids.NodeID{5} + vdr1 = ids.BuildTestNodeID([]byte{0x01}) + vdr2 = ids.BuildTestNodeID([]byte{0x02}) + vdr3 = ids.BuildTestNodeID([]byte{0x03}) + vdr4 = ids.BuildTestNodeID([]byte{0x04}) + vdr5 = ids.BuildTestNodeID([]byte{0x05}) // k = 5 ) -func TestNewSetErrorOnMetrics(t *testing.T) { +func TestNewSetErrorOnPollsMetrics(t *testing.T) { require := require.New(t) factory := NewEarlyTermNoTraversalFactory(1, 1) @@ -37,13 +37,29 @@ func TestNewSetErrorOnMetrics(t *testing.T) { registerer := prometheus.NewRegistry() require.NoError(registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Name: "polls", + Namespace: namespace, + Name: "polls", }))) + + _, err := NewSet(factory, log, namespace, registerer) + require.ErrorIs(err, errFailedPollsMetric) +} + +func TestNewSetErrorOnPollDurationMetrics(t *testing.T) { + require := require.New(t) + + factory := NewEarlyTermNoTraversalFactory(1, 1) + log := logging.NoLog{} + namespace := "" + registerer := prometheus.NewRegistry() + require.NoError(registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Name: "poll_duration", + Namespace: namespace, + Name: "poll_duration_count", }))) - require.NotNil(NewSet(factory, log, namespace, registerer)) + _, err := NewSet(factory, log, namespace, registerer) + require.ErrorIs(err, errFailedPollDurationMetrics) } func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { @@ -56,7 +72,8 @@ func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) // create two polls for the two blocks vdrBag := bag.Of(vdrs...) @@ -92,7 +109,8 @@ func TestCreateAndFinishPollOutOfOrder_OlderFinishesFirst(t *testing.T) { log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) // create two polls for the two blocks vdrBag := bag.Of(vdrs...) @@ -128,7 +146,8 @@ func TestCreateAndFinishPollOutOfOrder_UnfinishedPollsGaps(t *testing.T) { log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) // create three polls for the two blocks vdrBag := bag.Of(vdrs...) @@ -172,7 +191,8 @@ func TestCreateAndFinishSuccessfulPoll(t *testing.T) { log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) require.Zero(s.Len()) @@ -204,7 +224,8 @@ func TestCreateAndFinishFailedPoll(t *testing.T) { log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) require.Zero(s.Len()) @@ -233,7 +254,8 @@ func TestSetString(t *testing.T) { log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) expected := `current polls: (Size = 1) RequestID 0: diff --git a/snow/consensus/snowman/snowman_block.go b/snow/consensus/snowman/snowman_block.go index 782c77d8e415..a25099b4519f 100644 --- a/snow/consensus/snowman/snowman_block.go +++ b/snow/consensus/snowman/snowman_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/snow/consensus/snowman/test_block.go b/snow/consensus/snowman/test_block.go index a02bf31787c1..b59eb2ed5f89 100644 --- a/snow/consensus/snowman/test_block.go +++ b/snow/consensus/snowman/test_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -48,6 +48,6 @@ func (b *TestBlock) Bytes() []byte { return b.BytesV } -func (b *TestBlock) Less(other *TestBlock) bool { - return b.HeightV < other.HeightV +func (b *TestBlock) Compare(other *TestBlock) int { + return utils.Compare(b.HeightV, other.HeightV) } diff --git a/snow/consensus/snowman/topological.go b/snow/consensus/snowman/topological.go index 7cf6cde8139e..5b12ce3f2e6b 100644 --- a/snow/consensus/snowman/topological.go +++ b/snow/consensus/snowman/topological.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "strings" "time" "go.uber.org/zap" @@ -17,14 +16,15 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/metrics" "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/set" ) var ( - errDuplicateAdd = errors.New("duplicate block add") + errDuplicateAdd = errors.New("duplicate block add") + errTooManyProcessingBlocks = errors.New("too many processing blocks") + errBlockProcessingTooLong = errors.New("block processing too long") _ Factory = (*TopologicalFactory)(nil) _ Consensus = (*Topological)(nil) @@ -41,10 +41,7 @@ func (TopologicalFactory) New() Consensus { // strongly preferred branch. This tree structure amortizes network polls to // vote on more than just the next block. type Topological struct { - metrics.Latency - metrics.Polls - metrics.Height - metrics.Timestamp + metrics *metrics // pollNumber is the number of times RecordPolls has been called pollNumber uint64 @@ -56,11 +53,8 @@ type Topological struct { // instances params snowball.Parameters - // head is the last accepted block - head ids.ID - - // height is the height of the last accepted block - height uint64 + lastAcceptedID ids.ID + lastAcceptedHeight uint64 // blocks stores the last accepted block and all the pending blocks blocks map[ids.ID]*snowmanBlock // blockID -> snowmanBlock @@ -72,8 +66,8 @@ type Topological struct { // that height. preferredHeights map[uint64]ids.ID // height -> blockID - // tail is the preferred block with no children - tail ids.ID + // preference is the preferred block with highest height + preference ids.ID // Used in [calculateInDegree] and. // Should only be accessed in that method. @@ -108,54 +102,37 @@ type votes struct { func (ts *Topological) Initialize( ctx *snow.ConsensusContext, params snowball.Parameters, - rootID ids.ID, - rootHeight uint64, - rootTime time.Time, + lastAcceptedID ids.ID, + lastAcceptedHeight uint64, + lastAcceptedTime time.Time, ) error { - if err := params.Verify(); err != nil { - return err - } - - latencyMetrics, err := metrics.NewLatency("blks", "block(s)", ctx.Log, "", ctx.Registerer) - if err != nil { - return err - } - ts.Latency = latencyMetrics - - pollsMetrics, err := metrics.NewPolls("", ctx.Registerer) - if err != nil { - return err - } - ts.Polls = pollsMetrics - - heightMetrics, err := metrics.NewHeight("", ctx.Registerer) + err := params.Verify() if err != nil { return err } - ts.Height = heightMetrics - timestampMetrics, err := metrics.NewTimestamp("", ctx.Registerer) + ts.metrics, err = newMetrics( + ctx.Log, + "", + ctx.Registerer, + lastAcceptedHeight, + lastAcceptedTime, + ) if err != nil { return err } - ts.Timestamp = timestampMetrics ts.leaves = set.Set[ids.ID]{} ts.kahnNodes = make(map[ids.ID]kahnNode) ts.ctx = ctx ts.params = params - ts.head = rootID - ts.height = rootHeight + ts.lastAcceptedID = lastAcceptedID + ts.lastAcceptedHeight = lastAcceptedHeight ts.blocks = map[ids.ID]*snowmanBlock{ - rootID: {params: ts.params}, + lastAcceptedID: {params: ts.params}, } ts.preferredHeights = make(map[uint64]ids.ID) - ts.tail = rootID - - // Initially set the metrics for the last accepted block. - ts.Height.Accepted(ts.height) - ts.Timestamp.Accepted(rootTime) - + ts.preference = lastAcceptedID return nil } @@ -165,8 +142,10 @@ func (ts *Topological) NumProcessing() int { func (ts *Topological) Add(ctx context.Context, blk Block) error { blkID := blk.ID() + height := blk.Height() ts.ctx.Log.Verbo("adding block", zap.Stringer("blkID", blkID), + zap.Uint64("height", height), ) // Make sure a block is not inserted twice. This enforces the invariant that @@ -178,13 +157,15 @@ func (ts *Topological) Add(ctx context.Context, blk Block) error { return errDuplicateAdd } - ts.Latency.Issued(blkID, ts.pollNumber) + ts.metrics.Verified(height) + ts.metrics.Issued(blkID, ts.pollNumber) parentID := blk.Parent() parentNode, ok := ts.blocks[parentID] if !ok { ts.ctx.Log.Verbo("block ancestor is missing, being rejected", zap.Stringer("blkID", blkID), + zap.Uint64("height", height), zap.Stringer("parentID", parentID), ) @@ -194,7 +175,7 @@ func (ts *Topological) Add(ctx context.Context, blk Block) error { if err := blk.Reject(ctx); err != nil { return err } - ts.Latency.Rejected(blkID, ts.pollNumber, len(blk.Bytes())) + ts.metrics.Rejected(blkID, ts.pollNumber, len(blk.Bytes())) return nil } @@ -205,15 +186,16 @@ func (ts *Topological) Add(ctx context.Context, blk Block) error { blk: blk, } - // If we are extending the tail, this is the new tail - if ts.tail == parentID { - ts.tail = blkID + // If we are extending the preference, this is the new preference + if ts.preference == parentID { + ts.preference = blkID ts.preferredIDs.Add(blkID) - ts.preferredHeights[blk.Height()] = blkID + ts.preferredHeights[height] = blkID } ts.ctx.Log.Verbo("added block", zap.Stringer("blkID", blkID), + zap.Uint64("height", height), zap.Stringer("parentID", parentID), ) return nil @@ -226,17 +208,17 @@ func (ts *Topological) Decided(blk Block) bool { } // If the block is marked as fetched, we can check if it has been // transitively rejected. - return blk.Status() == choices.Processing && blk.Height() <= ts.height + return blk.Status() == choices.Processing && blk.Height() <= ts.lastAcceptedHeight } func (ts *Topological) Processing(blkID ids.ID) bool { // The last accepted block is in the blocks map, so we first must ensure the // requested block isn't the last accepted block. - if blkID == ts.head { + if blkID == ts.lastAcceptedID { return false } - // If the block is in the map of current blocks and not the head, then the - // block is currently processing. + // If the block is in the map of current blocks and not the last accepted + // block, then it is currently processing. _, ok := ts.blocks[blkID] return ok } @@ -250,16 +232,16 @@ func (ts *Topological) IsPreferred(blk Block) bool { } func (ts *Topological) LastAccepted() (ids.ID, uint64) { - return ts.head, ts.height + return ts.lastAcceptedID, ts.lastAcceptedHeight } func (ts *Topological) Preference() ids.ID { - return ts.tail + return ts.preference } func (ts *Topological) PreferenceAtHeight(height uint64) (ids.ID, bool) { - if height == ts.height { - return ts.head, true + if height == ts.lastAcceptedHeight { + return ts.lastAcceptedID, true } blkID, ok := ts.preferredHeights[height] return blkID, ok @@ -273,8 +255,8 @@ func (ts *Topological) PreferenceAtHeight(height uint64) (ids.ID, bool) { // Every other block will have an unsuccessful poll registered. // // After collecting which blocks should be voted on, the polls are registered -// and blocks are accepted/rejected as needed. The tail is then updated to equal -// the leaf on the preferred branch. +// and blocks are accepted/rejected as needed. The preference is then updated to +// equal the leaf on the preferred branch. // // To optimize the theoretical complexity of the vote propagation, a topological // sort is done over the blocks that are reachable from the provided votes. @@ -312,11 +294,11 @@ func (ts *Topological) RecordPoll(ctx context.Context, voteBag bag.Bag[ids.ID]) } // If the set of preferred IDs already contains the preference, then the - // tail is guaranteed to already be set correctly. This is because the value - // returned from vote reports the next preferred block after the last + // preference is guaranteed to already be set correctly. This is because the + // value returned from vote reports the next preferred block after the last // preferred block that was voted for. If this block was previously // preferred, then we know that following the preferences down the chain - // will return the current tail. + // will return the current preference. if ts.preferredIDs.Contains(preferred) { return nil } @@ -325,8 +307,8 @@ func (ts *Topological) RecordPoll(ctx context.Context, voteBag bag.Bag[ids.ID]) ts.preferredIDs.Clear() maps.Clear(ts.preferredHeights) - ts.tail = preferred - startBlock := ts.blocks[ts.tail] + ts.preference = preferred + startBlock := ts.blocks[ts.preference] // Runtime = |live set| ; Space = Constant // Traverse from the preferred ID to the last accepted ancestor. @@ -339,43 +321,47 @@ func (ts *Topological) RecordPoll(ctx context.Context, voteBag bag.Bag[ids.ID]) // Traverse from the preferred ID to the preferred child until there are no // children. for block := startBlock; block.sb != nil; { - ts.tail = block.sb.Preference() - ts.preferredIDs.Add(ts.tail) - block = ts.blocks[ts.tail] + ts.preference = block.sb.Preference() + ts.preferredIDs.Add(ts.preference) + block = ts.blocks[ts.preference] // Invariant: Because the prior block had an initialized snowball // instance, it must have a processing child. This guarantees that // block.blk is non-nil here. - ts.preferredHeights[block.blk.Height()] = ts.tail + ts.preferredHeights[block.blk.Height()] = ts.preference } return nil } // HealthCheck returns information about the consensus health. func (ts *Topological) HealthCheck(context.Context) (interface{}, error) { - numOutstandingBlks := ts.Latency.NumProcessing() - isOutstandingBlks := numOutstandingBlks <= ts.params.MaxOutstandingItems - healthy := isOutstandingBlks - details := map[string]interface{}{ - "outstandingBlocks": numOutstandingBlks, + var errs []error + + numProcessingBlks := ts.NumProcessing() + if numProcessingBlks > ts.params.MaxOutstandingItems { + err := fmt.Errorf("%w: %d > %d", + errTooManyProcessingBlocks, + numProcessingBlks, + ts.params.MaxOutstandingItems, + ) + errs = append(errs, err) } - // check for long running blocks - timeReqRunning := ts.Latency.MeasureAndGetOldestDuration() - isProcessingTime := timeReqRunning <= ts.params.MaxItemProcessingTime - healthy = healthy && isProcessingTime - details["longestRunningBlock"] = timeReqRunning.String() - - if !healthy { - var errorReasons []string - if !isOutstandingBlks { - errorReasons = append(errorReasons, fmt.Sprintf("number of outstanding blocks %d > %d", numOutstandingBlks, ts.params.MaxOutstandingItems)) - } - if !isProcessingTime { - errorReasons = append(errorReasons, fmt.Sprintf("block processing time %s > %s", timeReqRunning, ts.params.MaxItemProcessingTime)) - } - return details, fmt.Errorf("snowman consensus is not healthy reason: %s", strings.Join(errorReasons, ", ")) + maxTimeProcessing := ts.metrics.MeasureAndGetOldestDuration() + if maxTimeProcessing > ts.params.MaxItemProcessingTime { + err := fmt.Errorf("%w: %s > %s", + errBlockProcessingTooLong, + maxTimeProcessing, + ts.params.MaxItemProcessingTime, + ) + errs = append(errs, err) } - return details, nil + + return map[string]interface{}{ + "processingBlocks": numProcessingBlks, + "longestProcessingBlock": maxTimeProcessing.String(), // .String() is needed here to ensure a human readable format + "lastAcceptedID": ts.lastAcceptedID, + "lastAcceptedHeight": ts.lastAcceptedHeight, + }, errors.Join(errs...) } // takes in a list of votes and sets up the topological ordering. Returns the @@ -495,20 +481,20 @@ func (ts *Topological) vote(ctx context.Context, voteStack []votes) (ids.ID, err // If the voteStack is empty, then the full tree should falter. This won't // change the preferred branch. if len(voteStack) == 0 { - headBlock := ts.blocks[ts.head] - headBlock.shouldFalter = true + lastAcceptedBlock := ts.blocks[ts.lastAcceptedID] + lastAcceptedBlock.shouldFalter = true if numProcessing := len(ts.blocks) - 1; numProcessing > 0 { ts.ctx.Log.Verbo("no progress was made after processing pending blocks", zap.Int("numProcessing", numProcessing), ) - ts.Polls.Failed() + ts.metrics.FailedPoll() } - return ts.tail, nil + return ts.preference, nil } // keep track of the new preferred block - newPreferred := ts.head + newPreferred := ts.lastAcceptedID onPreferredBranch := true pollSuccessful := false for len(voteStack) > 0 { @@ -544,8 +530,9 @@ func (ts *Topological) vote(ctx context.Context, voteStack []votes) (ids.ID, err // apply the votes for this snowball instance pollSuccessful = parentBlock.sb.RecordPoll(vote.votes) || pollSuccessful - // Only accept when you are finalized and the head. - if parentBlock.sb.Finalized() && ts.head == vote.parentID { + // Only accept when you are finalized and a child of the last accepted + // block. + if parentBlock.sb.Finalized() && ts.lastAcceptedID == vote.parentID { if err := ts.acceptPreferredChild(ctx, parentBlock); err != nil { return ids.ID{}, err } @@ -607,9 +594,9 @@ func (ts *Topological) vote(ctx context.Context, voteStack []votes) (ids.ID, err } if pollSuccessful { - ts.Polls.Successful() + ts.metrics.SuccessfulPoll() } else { - ts.Polls.Failed() + ts.metrics.FailedPoll() } return newPreferred, nil } @@ -634,24 +621,32 @@ func (ts *Topological) acceptPreferredChild(ctx context.Context, n *snowmanBlock return err } + height := child.Height() + timestamp := child.Timestamp() ts.ctx.Log.Trace("accepting block", zap.Stringer("blkID", pref), + zap.Uint64("height", height), + zap.Time("timestamp", timestamp), ) if err := child.Accept(ctx); err != nil { return err } - // Because this is the newest accepted block, this is the new head. - ts.head = pref - ts.height = child.Height() + // Update the last accepted values to the newly accepted block. + ts.lastAcceptedID = pref + ts.lastAcceptedHeight = height // Remove the decided block from the set of processing IDs, as its status // now implies its preferredness. ts.preferredIDs.Remove(pref) - delete(ts.preferredHeights, ts.height) - - ts.Latency.Accepted(pref, ts.pollNumber, len(bytes)) - ts.Height.Accepted(ts.height) - ts.Timestamp.Accepted(child.Timestamp()) + delete(ts.preferredHeights, height) + + ts.metrics.Accepted( + pref, + height, + timestamp, + ts.pollNumber, + len(bytes), + ) // Because ts.blocks contains the last accepted block, we don't delete the // block from the blocks map here. @@ -665,13 +660,14 @@ func (ts *Topological) acceptPreferredChild(ctx context.Context, n *snowmanBlock ts.ctx.Log.Trace("rejecting block", zap.String("reason", "conflict with accepted block"), - zap.Stringer("rejectedID", childID), - zap.Stringer("conflictedID", pref), + zap.Stringer("blkID", childID), + zap.Uint64("height", child.Height()), + zap.Stringer("conflictID", pref), ) if err := child.Reject(ctx); err != nil { return err } - ts.Latency.Rejected(childID, ts.pollNumber, len(child.Bytes())) + ts.metrics.Rejected(childID, ts.pollNumber, len(child.Bytes())) // Track which blocks have been directly rejected rejects = append(rejects, childID) @@ -696,10 +692,16 @@ func (ts *Topological) rejectTransitively(ctx context.Context, rejected []ids.ID delete(ts.blocks, rejectedID) for childID, child := range rejectedNode.children { + ts.ctx.Log.Trace("rejecting block", + zap.String("reason", "rejected ancestor"), + zap.Stringer("blkID", childID), + zap.Uint64("height", child.Height()), + zap.Stringer("parentID", rejectedID), + ) if err := child.Reject(ctx); err != nil { return err } - ts.Latency.Rejected(childID, ts.pollNumber, len(child.Bytes())) + ts.metrics.Rejected(childID, ts.pollNumber, len(child.Bytes())) // add the newly rejected block to the end of the stack rejected = append(rejected, childID) diff --git a/snow/consensus/snowman/topological_test.go b/snow/consensus/snowman/topological_test.go index 53a1b4416d9a..540b5a8f2eb1 100644 --- a/snow/consensus/snowman/topological_test.go +++ b/snow/consensus/snowman/topological_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/snow/consensus/snowman/traced_consensus.go b/snow/consensus/snowman/traced_consensus.go index 67a8797b294a..363fa15334b0 100644 --- a/snow/consensus/snowman/traced_consensus.go +++ b/snow/consensus/snowman/traced_consensus.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/snow/consensus/snowstorm/test_tx.go b/snow/consensus/snowstorm/test_tx.go index 8c5aa9aa3544..a8b514c8164d 100644 --- a/snow/consensus/snowstorm/test_tx.go +++ b/snow/consensus/snowstorm/test_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm diff --git a/snow/consensus/snowstorm/tx.go b/snow/consensus/snowstorm/tx.go index 54a56a42f5b4..cc1cf649e9a8 100644 --- a/snow/consensus/snowstorm/tx.go +++ b/snow/consensus/snowstorm/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm diff --git a/snow/context.go b/snow/context.go index c89c2dd07a11..2cbbedb38b47 100644 --- a/snow/context.go +++ b/snow/context.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snow @@ -96,33 +96,3 @@ type ConsensusContext struct { // True iff this chain is currently state-syncing StateSyncing utils.Atomic[bool] } - -func DefaultContextTest() *Context { - sk, err := bls.NewSecretKey() - if err != nil { - panic(err) - } - pk := bls.PublicFromSecretKey(sk) - return &Context{ - NetworkID: 0, - SubnetID: ids.Empty, - ChainID: ids.Empty, - NodeID: ids.EmptyNodeID, - PublicKey: pk, - Log: logging.NoLog{}, - BCLookup: ids.NewAliaser(), - Metrics: metrics.NewOptionalGatherer(), - ChainDataDir: "", - } -} - -func DefaultConsensusContextTest() *ConsensusContext { - return &ConsensusContext{ - Context: DefaultContextTest(), - Registerer: prometheus.NewRegistry(), - AvalancheRegisterer: prometheus.NewRegistry(), - BlockAcceptor: noOpAcceptor{}, - TxAcceptor: noOpAcceptor{}, - VertexAcceptor: noOpAcceptor{}, - } -} diff --git a/snow/engine/avalanche/bootstrap/bootstrapper.go b/snow/engine/avalanche/bootstrap/bootstrapper.go index 0f8a2484a4e2..cd530d1cb1f8 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap @@ -16,6 +16,7 @@ import ( "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/bimap" "github.com/ava-labs/avalanchego/utils/heap" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" @@ -29,6 +30,14 @@ const ( stripeDistance = 2000 stripeWidth = 5 cacheSize = 100000 + + // statusUpdateFrequency is how many containers should be processed between + // logs + statusUpdateFrequency = 5000 + + // maxOutstandingGetAncestorsRequests is the maximum number of GetAncestors + // sent but not yet responded to/failed + maxOutstandingGetAncestorsRequests = 10 ) var _ common.BootstrapableEngine = (*bootstrapper)(nil) @@ -42,44 +51,41 @@ func New( StateSummaryFrontierHandler: common.NewNoOpStateSummaryFrontierHandler(config.Ctx.Log), AcceptedStateSummaryHandler: common.NewNoOpAcceptedStateSummaryHandler(config.Ctx.Log), + AcceptedFrontierHandler: common.NewNoOpAcceptedFrontierHandler(config.Ctx.Log), + AcceptedHandler: common.NewNoOpAcceptedHandler(config.Ctx.Log), PutHandler: common.NewNoOpPutHandler(config.Ctx.Log), QueryHandler: common.NewNoOpQueryHandler(config.Ctx.Log), ChitsHandler: common.NewNoOpChitsHandler(config.Ctx.Log), AppHandler: config.VM, - processedCache: &cache.LRU[ids.ID, struct{}]{Size: cacheSize}, - Fetcher: common.Fetcher{ - OnFinished: onFinished, - }, - } + outstandingRequests: bimap.New[common.Request, ids.ID](), - if err := b.metrics.Initialize("bs", config.Ctx.AvalancheRegisterer); err != nil { - return nil, err + processedCache: &cache.LRU[ids.ID, struct{}]{Size: cacheSize}, + onFinished: onFinished, } - - config.Config.Bootstrapable = b - b.Bootstrapper = common.NewCommonBootstrapper(config.Config) - return b, nil + return b, b.metrics.Initialize("bs", config.Ctx.AvalancheRegisterer) } // Note: To align with the Snowman invariant, it should be guaranteed the VM is // not used until after the bootstrapper has been Started. type bootstrapper struct { Config + common.Halter // list of NoOpsHandler for messages dropped by bootstrapper common.StateSummaryFrontierHandler common.AcceptedStateSummaryHandler + common.AcceptedFrontierHandler + common.AcceptedHandler common.PutHandler common.QueryHandler common.ChitsHandler common.AppHandler - common.Bootstrapper - common.Fetcher metrics - started bool + // tracks which validators were asked for which containers in which requests + outstandingRequests *bimap.BiMap[common.Request, ids.ID] // IDs of vertices that we will send a GetAncestors request for once we are // not at the max number of outstanding requests @@ -87,6 +93,16 @@ type bootstrapper struct { // Contains IDs of vertices that have recently been processed processedCache *cache.LRU[ids.ID, struct{}] + + // Tracks the last requestID that was used in a request + requestID uint32 + + // Called when bootstrapping is done on a specific chain + onFinished func(ctx context.Context, lastReqID uint32) error +} + +func (b *bootstrapper) Context() *snow.ConsensusContext { + return b.Ctx } func (b *bootstrapper) Clear(context.Context) error { @@ -127,7 +143,10 @@ func (b *bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request vtxs = vtxs[:b.Config.AncestorsMaxContainersReceived] } - requestedVtxID, requested := b.OutstandingRequests.Remove(nodeID, requestID) + requestedVtxID, requested := b.outstandingRequests.DeleteKey(common.Request{ + NodeID: nodeID, + RequestID: requestID, + }) vtx, err := b.Manager.ParseVtx(ctx, vtxs[0]) // first vertex should be the one we requested in GetAncestors request if err != nil { if !requested { @@ -167,7 +186,7 @@ func (b *bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request ) return b.fetch(ctx, requestedVtxID) } - if !requested && !b.OutstandingRequests.Contains(vtxID) && !b.needToFetch.Contains(vtxID) { + if !requested && !b.outstandingRequests.HasValue(vtxID) && !b.needToFetch.Contains(vtxID) { b.Ctx.Log.Debug("received un-needed vertex", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), @@ -234,7 +253,10 @@ func (b *bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request } func (b *bootstrapper) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - vtxID, ok := b.OutstandingRequests.Remove(nodeID, requestID) + vtxID, ok := b.outstandingRequests.DeleteKey(common.Request{ + NodeID: nodeID, + RequestID: requestID, + }) if !ok { b.Ctx.Log.Debug("skipping GetAncestorsFailed call", zap.String("reason", "no matching outstanding request"), @@ -256,16 +278,7 @@ func (b *bootstrapper) Connected( return err } - if err := b.StartupTracker.Connected(ctx, nodeID, nodeVersion); err != nil { - return err - } - - if b.started || !b.StartupTracker.ShouldStart() { - return nil - } - - b.started = true - return b.Startup(ctx) + return b.StartupTracker.Connected(ctx, nodeID, nodeVersion) } func (b *bootstrapper) Disconnected(ctx context.Context, nodeID ids.NodeID) error { @@ -327,7 +340,7 @@ func (b *bootstrapper) Start(ctx context.Context, startReqID uint32) error { return err } - b.Config.SharedCfg.RequestID = startReqID + b.requestID = startReqID // If the network was already linearized, don't attempt to linearize it // again. @@ -336,38 +349,38 @@ func (b *bootstrapper) Start(ctx context.Context, startReqID uint32) error { return fmt.Errorf("failed to get linearization status: %w", err) } if linearized { - edge := b.Manager.Edge(ctx) - return b.ForceAccepted(ctx, edge) + return b.startSyncing(ctx, nil) } - // If requested, assume the currently accepted state is what was linearized. - // - // Note: This is used to linearize networks that were created after the - // linearization occurred. - if b.Config.LinearizeOnStartup { - edge := b.Manager.Edge(ctx) - stopVertex, err := b.Manager.BuildStopVtx(ctx, edge) - if err != nil { - return fmt.Errorf("failed to create stop vertex: %w", err) - } - if err := stopVertex.Accept(ctx); err != nil { - return fmt.Errorf("failed to accept stop vertex: %w", err) - } - - stopVertexID := stopVertex.ID() - b.Ctx.Log.Info("accepted stop vertex", - zap.Stringer("vtxID", stopVertexID), + // If a stop vertex is well known, accept that. + if b.Config.StopVertexID != ids.Empty { + b.Ctx.Log.Info("using well known stop vertex", + zap.Stringer("vtxID", b.Config.StopVertexID), ) - return b.ForceAccepted(ctx, []ids.ID{stopVertexID}) + return b.startSyncing(ctx, []ids.ID{b.Config.StopVertexID}) } - if !b.StartupTracker.ShouldStart() { - return nil + // If a stop vertex isn't well known, treat the current state as the final + // DAG state. + // + // Note: This is used to linearize networks that were created after the + // linearization occurred. + edge := b.Manager.Edge(ctx) + stopVertex, err := b.Manager.BuildStopVtx(ctx, edge) + if err != nil { + return fmt.Errorf("failed to create stop vertex: %w", err) } + if err := stopVertex.Accept(ctx); err != nil { + return fmt.Errorf("failed to accept stop vertex: %w", err) + } + + stopVertexID := stopVertex.ID() + b.Ctx.Log.Info("generated stop vertex", + zap.Stringer("vtxID", stopVertexID), + ) - b.started = true - return b.Startup(ctx) + return b.startSyncing(ctx, nil) } func (b *bootstrapper) HealthCheck(ctx context.Context) (interface{}, error) { @@ -382,21 +395,16 @@ func (b *bootstrapper) HealthCheck(ctx context.Context) (interface{}, error) { return intf, vmErr } -func (b *bootstrapper) GetVM() common.VM { - return b.VM -} - // Add the vertices in [vtxIDs] to the set of vertices that we need to fetch, // and then fetch vertices (and their ancestors) until either there are no more // to fetch or we are at the maximum number of outstanding requests. func (b *bootstrapper) fetch(ctx context.Context, vtxIDs ...ids.ID) error { b.needToFetch.Add(vtxIDs...) - for b.needToFetch.Len() > 0 && b.OutstandingRequests.Len() < common.MaxOutstandingGetAncestorsRequests { - vtxID := b.needToFetch.CappedList(1)[0] - b.needToFetch.Remove(vtxID) + for b.needToFetch.Len() > 0 && b.outstandingRequests.Len() < maxOutstandingGetAncestorsRequests { + vtxID, _ := b.needToFetch.Pop() // Length checked in predicate above // Make sure we haven't already requested this vertex - if b.OutstandingRequests.Contains(vtxID) { + if b.outstandingRequests.HasValue(vtxID) { continue } @@ -410,10 +418,16 @@ func (b *bootstrapper) fetch(ctx context.Context, vtxIDs ...ids.ID) error { return fmt.Errorf("dropping request for %s as there are no validators", vtxID) } validatorID := validatorIDs[0] - b.Config.SharedCfg.RequestID++ - - b.OutstandingRequests.Add(validatorID, b.Config.SharedCfg.RequestID, vtxID) - b.Config.Sender.SendGetAncestors(ctx, validatorID, b.Config.SharedCfg.RequestID, vtxID) // request vertex and ancestors + b.requestID++ + + b.outstandingRequests.Put( + common.Request{ + NodeID: validatorID, + RequestID: b.requestID, + }, + vtxID, + ) + b.Config.Sender.SendGetAncestors(ctx, validatorID, b.requestID, vtxID) // request vertex and ancestors } return b.checkFinish(ctx) } @@ -497,16 +511,10 @@ func (b *bootstrapper) process(ctx context.Context, vtxs ...avalanche.Vertex) er b.numFetchedVts.Inc() verticesFetchedSoFar := b.VtxBlocked.Jobs.PendingJobs() - if verticesFetchedSoFar%common.StatusUpdateFrequency == 0 { // Periodically print progress - if !b.Config.SharedCfg.Restarted { - b.Ctx.Log.Info("fetched vertices", - zap.Uint64("numVerticesFetched", verticesFetchedSoFar), - ) - } else { - b.Ctx.Log.Debug("fetched vertices", - zap.Uint64("numVerticesFetched", verticesFetchedSoFar), - ) - } + if verticesFetchedSoFar%statusUpdateFrequency == 0 { // Periodically print progress + b.Ctx.Log.Info("fetched vertices", + zap.Uint64("numVerticesFetched", verticesFetchedSoFar), + ) } parents, err := vtx.Parents() @@ -549,8 +557,8 @@ func (b *bootstrapper) process(ctx context.Context, vtxs ...avalanche.Vertex) er return b.fetch(ctx) } -// ForceAccepted starts bootstrapping. Process the vertices in [accepterContainerIDs]. -func (b *bootstrapper) ForceAccepted(ctx context.Context, acceptedContainerIDs []ids.ID) error { +// startSyncing starts bootstrapping. Process the vertices in [accepterContainerIDs]. +func (b *bootstrapper) startSyncing(ctx context.Context, acceptedContainerIDs []ids.ID) error { pendingContainerIDs := b.VtxBlocked.MissingIDs() // Append the list of accepted container IDs to pendingContainerIDs to ensure // we iterate over every container that must be traversed. @@ -578,58 +586,36 @@ func (b *bootstrapper) ForceAccepted(ctx context.Context, acceptedContainerIDs [ // checkFinish repeatedly executes pending transactions and requests new frontier blocks until there aren't any new ones // after which it finishes the bootstrap process func (b *bootstrapper) checkFinish(ctx context.Context) error { - // If there are outstanding requests for vertices or we still need to fetch vertices, we can't finish - pendingJobs := b.VtxBlocked.MissingIDs() - if b.IsBootstrapped() || len(pendingJobs) > 0 { + // If we still need to fetch vertices, we can't finish + if len(b.VtxBlocked.MissingIDs()) > 0 { return nil } - if !b.Config.SharedCfg.Restarted { - b.Ctx.Log.Info("executing transactions") - } else { - b.Ctx.Log.Debug("executing transactions") - } - + b.Ctx.Log.Info("executing transactions") _, err := b.TxBlocked.ExecuteAll( ctx, b.Config.Ctx, b, - b.Config.SharedCfg.Restarted, + false, b.Ctx.TxAcceptor, ) if err != nil || b.Halted() { return err } - if !b.Config.SharedCfg.Restarted { - b.Ctx.Log.Info("executing vertices") - } else { - b.Ctx.Log.Debug("executing vertices") - } - + b.Ctx.Log.Info("executing vertices") _, err = b.VtxBlocked.ExecuteAll( ctx, b.Config.Ctx, b, - b.Config.SharedCfg.Restarted, + false, b.Ctx.VertexAcceptor, ) if err != nil || b.Halted() { return err } - // If the chain is linearized, we should immediately move on to start - // bootstrapping snowman. - linearized, err := b.Manager.StopVertexAccepted(ctx) - if err != nil { - return err - } - if !linearized { - b.Ctx.Log.Debug("checking for stop vertex before finishing bootstrapping") - return b.Restart(ctx, true) - } - - // Invariant: edge will only be the stop vertex after its acceptance. + // Invariant: edge will only be the stop vertex edge := b.Manager.Edge(ctx) stopVertexID := edge[0] if err := b.VM.Linearize(ctx, stopVertexID); err != nil { @@ -637,7 +623,7 @@ func (b *bootstrapper) checkFinish(ctx context.Context) error { } b.processedCache.Flush() - return b.OnFinished(ctx, b.Config.SharedCfg.RequestID) + return b.onFinished(ctx, b.requestID) } // A vertex is less than another vertex if it is unknown. Ties are broken by diff --git a/snow/engine/avalanche/bootstrap/bootstrapper_test.go b/snow/engine/avalanche/bootstrap/bootstrapper_test.go index d5bd51a9f78c..7da0b99a8736 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper_test.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap @@ -8,9 +8,12 @@ import ( "context" "errors" "testing" + "time" "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" @@ -24,6 +27,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/queue" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" @@ -52,7 +56,8 @@ func (t *testTx) Accept(ctx context.Context) error { func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *vertex.TestManager, *vertex.TestVM) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() db := memdb.New() @@ -61,23 +66,10 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *vertex.Te vm := &vertex.TestVM{} vm.T = t - isBootstrapped := false - bootstrapTracker := &common.BootstrapTrackerTest{ - T: t, - IsBootstrappedF: func() bool { - return isBootstrapped - }, - BootstrappedF: func(ids.ID) { - isBootstrapped = true - }, - } - sender.Default(true) manager.Default(true) vm.Default(true) - sender.CantSendGetAcceptedFrontier = false - peer := ids.GenerateTestNodeID() require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, peer, nil, ids.Empty, 1)) @@ -93,30 +85,20 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *vertex.Te startupTracker := tracker.NewStartup(peerTracker, totalWeight/2+1) vdrs.RegisterCallbackListener(constants.PrimaryNetworkID, startupTracker) - commonConfig := common.Config{ + avaGetHandler, err := getter.New(manager, sender, ctx.Log, time.Second, 2000, ctx.AvalancheRegisterer) + require.NoError(err) + + return Config{ + AllGetsServer: avaGetHandler, Ctx: ctx, Beacons: vdrs, - SampleK: vdrs.Count(constants.PrimaryNetworkID), - Alpha: totalWeight/2 + 1, StartupTracker: startupTracker, Sender: sender, - BootstrapTracker: bootstrapTracker, - Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, - } - - avaGetHandler, err := getter.New(manager, commonConfig) - require.NoError(err) - - return Config{ - Config: commonConfig, - AllGetsServer: avaGetHandler, - VtxBlocked: vtxBlocker, - TxBlocked: txBlocker, - Manager: manager, - VM: vm, + VtxBlocked: vtxBlocker, + TxBlocked: txBlocker, + Manager: manager, + VM: vm, }, peer, sender, manager, vm } @@ -148,7 +130,10 @@ func TestBootstrapperSingleFrontier(t *testing.T) { IDV: vtxID1, StatusV: choices.Processing, }, - HeightV: 0, + ParentsV: []avalanche.Vertex{ + vtx0, + }, + HeightV: 1, BytesV: vtxBytes1, } vtx2 := &avalanche.TestVertex{ // vtx2 is the stop vertex @@ -156,10 +141,14 @@ func TestBootstrapperSingleFrontier(t *testing.T) { IDV: vtxID2, StatusV: choices.Processing, }, - HeightV: 0, + ParentsV: []avalanche.Vertex{ + vtx1, + }, + HeightV: 2, BytesV: vtxBytes2, } + config.StopVertexID = vtxID2 bs, err := New( config, func(context.Context, uint32) error { @@ -172,11 +161,6 @@ func TestBootstrapperSingleFrontier(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) - - acceptedIDs := []ids.ID{vtxID0, vtxID1, vtxID2} - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID0: @@ -219,7 +203,8 @@ func TestBootstrapperSingleFrontier(t *testing.T) { return nil } - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) require.Equal(choices.Accepted, vtx0.Status()) require.Equal(choices.Accepted, vtx1.Status()) @@ -269,6 +254,7 @@ func TestBootstrapperByzantineResponses(t *testing.T) { BytesV: vtxBytes2, } + config.StopVertexID = vtxID1 bs, err := New( config, func(context.Context, uint32) error { @@ -281,10 +267,6 @@ func TestBootstrapperByzantineResponses(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) - - acceptedIDs := []ids.ID{vtxID1} manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID1: @@ -324,7 +306,8 @@ func TestBootstrapperByzantineResponses(t *testing.T) { } } - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request vtx0 + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) // should request vtx0 require.Equal(vtxID0, reqVtxID) oldReqID := *requestID @@ -437,6 +420,7 @@ func TestBootstrapperTxDependencies(t *testing.T) { BytesV: vtxBytes1, } + config.StopVertexID = vtxID1 bs, err := New( config, func(context.Context, uint32) error { @@ -449,11 +433,6 @@ func TestBootstrapperTxDependencies(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) - - acceptedIDs := []ids.ID{vtxID1} - manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { case bytes.Equal(vtxBytes, vtxBytes1): @@ -485,7 +464,8 @@ func TestBootstrapperTxDependencies(t *testing.T) { *reqIDPtr = reqID } - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request vtx0 + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { @@ -563,6 +543,7 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { BytesV: vtxBytes2, } + config.StopVertexID = vtxID2 bs, err := New( config, func(context.Context, uint32) error { @@ -575,10 +556,6 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) - - acceptedIDs := []ids.ID{vtxID2} manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch { case vtxID == vtxID0: @@ -617,7 +594,8 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { requested = vtxID } - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request vtx1 + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) // should request vtx1 require.Equal(vtxID1, requested) require.NoError(bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes1})) // Provide vtx1; should request vtx0 @@ -645,7 +623,7 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { require.Equal(choices.Accepted, vtx2.Status()) } -func TestBootstrapperFinalized(t *testing.T) { +func TestBootstrapperUnexpectedVertex(t *testing.T) { require := require.New(t) config, peerID, sender, manager, vm := newConfig(t) @@ -674,6 +652,7 @@ func TestBootstrapperFinalized(t *testing.T) { BytesV: vtxBytes1, } + config.StopVertexID = vtxID1 bs, err := New( config, func(context.Context, uint32) error { @@ -686,10 +665,6 @@ func TestBootstrapperFinalized(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) - - acceptedIDs := []ids.ID{vtxID0, vtxID1} parsedVtx0 := false parsedVtx1 := false manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { @@ -728,20 +703,17 @@ func TestBootstrapperFinalized(t *testing.T) { requestIDs := map[ids.ID]uint32{} sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { require.Equal(peerID, vdr) - requestIDs[vtxID] = reqID } - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request vtx0 and vtx1 + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) // should request vtx1 require.Contains(requestIDs, vtxID1) reqID := requestIDs[vtxID1] - require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes1, vtxBytes0})) - require.Contains(requestIDs, vtxID0) - - manager.StopVertexAcceptedF = func(context.Context) (bool, error) { - return vtx1.Status() == choices.Accepted, nil - } + maps.Clear(requestIDs) + require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes0})) + require.Contains(requestIDs, vtxID1) manager.EdgeF = func(context.Context) []ids.ID { require.Equal(choices.Accepted, vtx1.Status()) @@ -753,356 +725,8 @@ func TestBootstrapperFinalized(t *testing.T) { return nil } - reqID = requestIDs[vtxID0] - require.NoError(bs.GetAncestorsFailed(context.Background(), peerID, reqID)) - require.Equal(snow.NormalOp, config.Ctx.State.Get().State) - require.Equal(choices.Accepted, vtx0.Status()) - require.Equal(choices.Accepted, vtx1.Status()) -} - -// Test that Ancestors accepts the parents of the first vertex returned -func TestBootstrapperAcceptsAncestorsParents(t *testing.T) { - require := require.New(t) - - config, peerID, sender, manager, vm := newConfig(t) - - vtxID0 := ids.Empty.Prefix(0) - vtxID1 := ids.Empty.Prefix(1) - vtxID2 := ids.Empty.Prefix(2) - - vtxBytes0 := []byte{0} - vtxBytes1 := []byte{1} - vtxBytes2 := []byte{2} - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID0, - StatusV: choices.Unknown, - }, - HeightV: 0, - BytesV: vtxBytes0, - } - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID1, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx0}, - HeightV: 1, - BytesV: vtxBytes1, - } - vtx2 := &avalanche.TestVertex{ // vtx2 is the stop vertex - TestDecidable: choices.TestDecidable{ - IDV: vtxID2, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx1}, - HeightV: 2, - BytesV: vtxBytes2, - } - - bs, err := New( - config, - func(context.Context, uint32) error { - config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, - State: snow.NormalOp, - }) - return nil - }, - ) - require.NoError(err) - - vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) - - acceptedIDs := []ids.ID{vtxID2} - parsedVtx0 := false - parsedVtx1 := false - parsedVtx2 := false - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - switch vtxID { - case vtxID0: - if parsedVtx0 { - return vtx0, nil - } - case vtxID1: - if parsedVtx1 { - return vtx1, nil - } - case vtxID2: - if parsedVtx2 { - return vtx2, nil - } - default: - require.FailNow(errUnknownVertex.Error()) - return nil, errUnknownVertex - } - return nil, errUnknownVertex - } - manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { - switch { - case bytes.Equal(vtxBytes, vtxBytes0): - vtx0.StatusV = choices.Processing - parsedVtx0 = true - return vtx0, nil - case bytes.Equal(vtxBytes, vtxBytes1): - vtx1.StatusV = choices.Processing - parsedVtx1 = true - return vtx1, nil - case bytes.Equal(vtxBytes, vtxBytes2): - vtx2.StatusV = choices.Processing - parsedVtx2 = true - return vtx2, nil - default: - require.FailNow(errUnknownVertex.Error()) - return nil, errUnknownVertex - } - } - - requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - require.Equal(peerID, vdr) - - requestIDs[vtxID] = reqID - } - - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request vtx2 - require.Contains(requestIDs, vtxID2) - - manager.StopVertexAcceptedF = func(context.Context) (bool, error) { - return vtx2.Status() == choices.Accepted, nil - } - - manager.EdgeF = func(context.Context) []ids.ID { - require.Equal(choices.Accepted, vtx2.Status()) - return []ids.ID{vtxID2} - } - - vm.LinearizeF = func(_ context.Context, stopVertexID ids.ID) error { - require.Equal(vtxID2, stopVertexID) - return nil - } - - reqID := requestIDs[vtxID2] - require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes2, vtxBytes1, vtxBytes0})) - require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes1, vtxBytes0})) require.Equal(choices.Accepted, vtx0.Status()) require.Equal(choices.Accepted, vtx1.Status()) - require.Equal(choices.Accepted, vtx2.Status()) -} - -func TestRestartBootstrapping(t *testing.T) { - require := require.New(t) - - config, peerID, sender, manager, vm := newConfig(t) - - vtxID0 := ids.GenerateTestID() - vtxID1 := ids.GenerateTestID() - vtxID2 := ids.GenerateTestID() - vtxID3 := ids.GenerateTestID() - vtxID4 := ids.GenerateTestID() - vtxID5 := ids.GenerateTestID() - - vtxBytes0 := []byte{0} - vtxBytes1 := []byte{1} - vtxBytes2 := []byte{2} - vtxBytes3 := []byte{3} - vtxBytes4 := []byte{4} - vtxBytes5 := []byte{5} - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID0, - StatusV: choices.Unknown, - }, - HeightV: 0, - BytesV: vtxBytes0, - } - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID1, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx0}, - HeightV: 1, - BytesV: vtxBytes1, - } - vtx2 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID2, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx1}, - HeightV: 2, - BytesV: vtxBytes2, - } - vtx3 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID3, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx2}, - HeightV: 3, - BytesV: vtxBytes3, - } - vtx4 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID4, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx2}, - HeightV: 3, - BytesV: vtxBytes4, - } - vtx5 := &avalanche.TestVertex{ // vtx5 is the stop vertex - TestDecidable: choices.TestDecidable{ - IDV: vtxID5, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx3, vtx4}, - HeightV: 4, - BytesV: vtxBytes5, - } - - bsIntf, err := New( - config, - func(context.Context, uint32) error { - config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, - State: snow.NormalOp, - }) - return nil - }, - ) - require.NoError(err) - - bs := bsIntf.(*bootstrapper) - - vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) - - parsedVtx0 := false - parsedVtx1 := false - parsedVtx2 := false - parsedVtx3 := false - parsedVtx4 := false - parsedVtx5 := false - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - switch vtxID { - case vtxID0: - if parsedVtx0 { - return vtx0, nil - } - case vtxID1: - if parsedVtx1 { - return vtx1, nil - } - case vtxID2: - if parsedVtx2 { - return vtx2, nil - } - case vtxID3: - if parsedVtx3 { - return vtx3, nil - } - case vtxID4: - if parsedVtx4 { - return vtx4, nil - } - case vtxID5: - if parsedVtx5 { - return vtx5, nil - } - default: - require.FailNow(errUnknownVertex.Error()) - return nil, errUnknownVertex - } - return nil, errUnknownVertex - } - manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { - switch { - case bytes.Equal(vtxBytes, vtxBytes0): - vtx0.StatusV = choices.Processing - parsedVtx0 = true - return vtx0, nil - case bytes.Equal(vtxBytes, vtxBytes1): - vtx1.StatusV = choices.Processing - parsedVtx1 = true - return vtx1, nil - case bytes.Equal(vtxBytes, vtxBytes2): - vtx2.StatusV = choices.Processing - parsedVtx2 = true - return vtx2, nil - case bytes.Equal(vtxBytes, vtxBytes3): - vtx3.StatusV = choices.Processing - parsedVtx3 = true - return vtx3, nil - case bytes.Equal(vtxBytes, vtxBytes4): - vtx4.StatusV = choices.Processing - parsedVtx4 = true - return vtx4, nil - case bytes.Equal(vtxBytes, vtxBytes5): - vtx5.StatusV = choices.Processing - parsedVtx5 = true - return vtx5, nil - default: - require.FailNow(errUnknownVertex.Error()) - return nil, errUnknownVertex - } - } - - requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - require.Equal(peerID, vdr) - - requestIDs[vtxID] = reqID - } - - require.NoError(bs.ForceAccepted(context.Background(), []ids.ID{vtxID3, vtxID4})) // should request vtx3 and vtx4 - require.Contains(requestIDs, vtxID3) - require.Contains(requestIDs, vtxID4) - - vtx3ReqID := requestIDs[vtxID3] - require.NoError(bs.Ancestors(context.Background(), peerID, vtx3ReqID, [][]byte{vtxBytes3, vtxBytes2})) - require.Contains(requestIDs, vtxID1) - require.True(bs.OutstandingRequests.RemoveAny(vtxID4)) - require.True(bs.OutstandingRequests.RemoveAny(vtxID1)) - - bs.needToFetch.Clear() - requestIDs = map[ids.ID]uint32{} - - require.NoError(bs.ForceAccepted(context.Background(), []ids.ID{vtxID5, vtxID3})) - require.Contains(requestIDs, vtxID1) - require.Contains(requestIDs, vtxID4) - require.Contains(requestIDs, vtxID5) - require.NotContains(requestIDs, vtxID3) - - vtx5ReqID := requestIDs[vtxID5] - require.NoError(bs.Ancestors(context.Background(), peerID, vtx5ReqID, [][]byte{vtxBytes5, vtxBytes4, vtxBytes2, vtxBytes1})) - require.Contains(requestIDs, vtxID0) - - manager.StopVertexAcceptedF = func(context.Context) (bool, error) { - return vtx5.Status() == choices.Accepted, nil - } - - manager.EdgeF = func(context.Context) []ids.ID { - require.Equal(choices.Accepted, vtx5.Status()) - return []ids.ID{vtxID5} - } - - vm.LinearizeF = func(_ context.Context, stopVertexID ids.ID) error { - require.Equal(vtxID5, stopVertexID) - return nil - } - - vtx1ReqID := requestIDs[vtxID1] - require.NoError(bs.Ancestors(context.Background(), peerID, vtx1ReqID, [][]byte{vtxBytes1, vtxBytes0})) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) - require.Equal(choices.Accepted, vtx0.Status()) - require.Equal(choices.Accepted, vtx1.Status()) - require.Equal(choices.Accepted, vtx2.Status()) - require.Equal(choices.Accepted, vtx3.Status()) - require.Equal(choices.Accepted, vtx4.Status()) - require.Equal(choices.Accepted, vtx5.Status()) } diff --git a/snow/engine/avalanche/bootstrap/config.go b/snow/engine/avalanche/bootstrap/config.go index 0569342cc328..a674c2758460 100644 --- a/snow/engine/avalanche/bootstrap/config.go +++ b/snow/engine/avalanche/bootstrap/config.go @@ -1,24 +1,40 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap import ( + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/queue" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" + "github.com/ava-labs/avalanchego/snow/validators" ) type Config struct { - common.Config common.AllGetsServer + Ctx *snow.ConsensusContext + Beacons validators.Manager + + StartupTracker tracker.Startup + Sender common.Sender + + // This node will only consider the first [AncestorsMaxContainersReceived] + // containers in an ancestors message it receives. + AncestorsMaxContainersReceived int + // VtxBlocked tracks operations that are blocked on vertices VtxBlocked *queue.JobsWithMissing // TxBlocked tracks operations that are blocked on transactions TxBlocked *queue.Jobs - Manager vertex.Manager - VM vertex.LinearizableVM - LinearizeOnStartup bool + Manager vertex.Manager + VM vertex.LinearizableVM + + // If StopVertexID is empty, the engine will generate the stop vertex based + // on the current state. + StopVertexID ids.ID } diff --git a/snow/engine/avalanche/bootstrap/metrics.go b/snow/engine/avalanche/bootstrap/metrics.go index b9d5824ec95a..cc357f25901f 100644 --- a/snow/engine/avalanche/bootstrap/metrics.go +++ b/snow/engine/avalanche/bootstrap/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap diff --git a/snow/engine/avalanche/bootstrap/tx_job.go b/snow/engine/avalanche/bootstrap/tx_job.go index 615108c992ba..9bb939d3d186 100644 --- a/snow/engine/avalanche/bootstrap/tx_job.go +++ b/snow/engine/avalanche/bootstrap/tx_job.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap diff --git a/snow/engine/avalanche/bootstrap/vertex_job.go b/snow/engine/avalanche/bootstrap/vertex_job.go index 3001ce89904c..30d33c5dc9fb 100644 --- a/snow/engine/avalanche/bootstrap/vertex_job.go +++ b/snow/engine/avalanche/bootstrap/vertex_job.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap diff --git a/snow/engine/avalanche/engine.go b/snow/engine/avalanche/engine.go index 9b828ac05946..530a319e0fc6 100644 --- a/snow/engine/avalanche/engine.go +++ b/snow/engine/avalanche/engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche @@ -66,10 +66,6 @@ func (e *engine) Context() *snow.ConsensusContext { return e.ctx } -func (e *engine) GetVM() common.VM { - return e.vm -} - func (*engine) HealthCheck(context.Context) (interface{}, error) { return nil, nil } diff --git a/snow/engine/avalanche/getter/getter.go b/snow/engine/avalanche/getter/getter.go index ebf6ef38b09f..8903eec1b3ba 100644 --- a/snow/engine/avalanche/getter/getter.go +++ b/snow/engine/avalanche/getter/getter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package getter @@ -7,6 +7,8 @@ import ( "context" "time" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" @@ -25,12 +27,20 @@ import ( // Get requests are always served, regardless node state (bootstrapping or normal operations). var _ common.AllGetsServer = (*getter)(nil) -func New(storage vertex.Storage, commonCfg common.Config) (common.AllGetsServer, error) { +func New( + storage vertex.Storage, + sender common.Sender, + log logging.Logger, + maxTimeGetAncestors time.Duration, + maxContainersGetAncestors int, + reg prometheus.Registerer, +) (common.AllGetsServer, error) { gh := &getter{ - storage: storage, - sender: commonCfg.Sender, - cfg: commonCfg, - log: commonCfg.Ctx.Log, + storage: storage, + sender: sender, + log: log, + maxTimeGetAncestors: maxTimeGetAncestors, + maxContainersGetAncestors: maxContainersGetAncestors, } var err error @@ -38,17 +48,18 @@ func New(storage vertex.Storage, commonCfg common.Config) (common.AllGetsServer, "bs", "get_ancestors_vtxs", "vertices fetched in a call to GetAncestors", - commonCfg.Ctx.AvalancheRegisterer, + reg, ) return gh, err } type getter struct { - storage vertex.Storage - sender common.Sender - cfg common.Config + storage vertex.Storage + sender common.Sender + log logging.Logger + maxTimeGetAncestors time.Duration + maxContainersGetAncestors int - log logging.Logger getAncestorsVtxs metric.Averager } @@ -62,7 +73,7 @@ func (gh *getter) GetStateSummaryFrontier(_ context.Context, nodeID ids.NodeID, return nil } -func (gh *getter) GetAcceptedStateSummary(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []uint64) error { +func (gh *getter) GetAcceptedStateSummary(_ context.Context, nodeID ids.NodeID, requestID uint32, _ set.Set[uint64]) error { gh.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), zap.Stringer("messageOp", message.GetAcceptedStateSummaryOp), @@ -72,6 +83,8 @@ func (gh *getter) GetAcceptedStateSummary(_ context.Context, nodeID ids.NodeID, return nil } +// TODO: Remove support for GetAcceptedFrontier messages after v1.11.x is +// activated. func (gh *getter) GetAcceptedFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32) error { acceptedFrontier := gh.storage.Edge(ctx) // Since all the DAGs are linearized, we only need to return the stop @@ -82,9 +95,10 @@ func (gh *getter) GetAcceptedFrontier(ctx context.Context, validatorID ids.NodeI return nil } -func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - acceptedVtxIDs := make([]ids.ID, 0, len(containerIDs)) - for _, vtxID := range containerIDs { +// TODO: Remove support for GetAccepted messages after v1.11.x is activated. +func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { + acceptedVtxIDs := make([]ids.ID, 0, containerIDs.Len()) + for vtxID := range containerIDs { if vtx, err := gh.storage.GetVtx(ctx, vtxID); err == nil && vtx.Status() == choices.Accepted { acceptedVtxIDs = append(acceptedVtxIDs, vtxID) } @@ -106,13 +120,13 @@ func (gh *getter) GetAncestors(ctx context.Context, nodeID ids.NodeID, requestID return nil // Don't have the requested vertex. Drop message. } - queue := make([]avalanche.Vertex, 1, gh.cfg.AncestorsMaxContainersSent) // for BFS + queue := make([]avalanche.Vertex, 1, gh.maxContainersGetAncestors) // for BFS queue[0] = vertex - ancestorsBytesLen := 0 // length, in bytes, of vertex and its ancestors - ancestorsBytes := make([][]byte, 0, gh.cfg.AncestorsMaxContainersSent) // vertex and its ancestors in BFS order - visited := set.Of(vertex.ID()) // IDs of vertices that have been in queue before + ancestorsBytesLen := 0 // length, in bytes, of vertex and its ancestors + ancestorsBytes := make([][]byte, 0, gh.maxContainersGetAncestors) // vertex and its ancestors in BFS order + visited := set.Of(vertex.ID()) // IDs of vertices that have been in queue before - for len(ancestorsBytes) < gh.cfg.AncestorsMaxContainersSent && len(queue) > 0 && time.Since(startTime) < gh.cfg.MaxTimeGetAncestors { + for len(ancestorsBytes) < gh.maxContainersGetAncestors && len(queue) > 0 && time.Since(startTime) < gh.maxTimeGetAncestors { var vtx avalanche.Vertex vtx, queue = queue[0], queue[1:] // pop vtxBytes := vtx.Bytes() diff --git a/snow/engine/avalanche/getter/getter_test.go b/snow/engine/avalanche/getter/getter_test.go index 93694ed5bba0..4977d53fa7ee 100644 --- a/snow/engine/avalanche/getter/getter_test.go +++ b/snow/engine/avalanche/getter/getter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package getter @@ -7,75 +7,50 @@ import ( "context" "errors" "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" ) var errUnknownVertex = errors.New("unknown vertex") -func testSetup(t *testing.T) (*vertex.TestManager, *common.SenderTest, common.Config) { - vdrs := validators.NewManager() - peer := ids.GenerateTestNodeID() - require.NoError(t, vdrs.AddStaker(constants.PrimaryNetworkID, peer, nil, ids.Empty, 1)) - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false +func newTest(t *testing.T) (common.AllGetsServer, *vertex.TestManager, *common.SenderTest) { + manager := vertex.NewTestManager(t) + manager.Default(true) - isBootstrapped := false - bootstrapTracker := &common.BootstrapTrackerTest{ + sender := &common.SenderTest{ T: t, - IsBootstrappedF: func() bool { - return isBootstrapped - }, - BootstrappedF: func(ids.ID) { - isBootstrapped = true - }, } + sender.Default(true) - totalWeight, err := vdrs.TotalWeight(constants.PrimaryNetworkID) + bs, err := New( + manager, + sender, + logging.NoLog{}, + time.Second, + 2000, + prometheus.NewRegistry(), + ) require.NoError(t, err) - commonConfig := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Count(constants.PrimaryNetworkID), - Alpha: totalWeight/2 + 1, - Sender: sender, - BootstrapTracker: bootstrapTracker, - Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, - AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, - } - - manager := vertex.NewTestManager(t) - manager.Default(true) - - return manager, sender, commonConfig + return bs, manager, sender } func TestAcceptedFrontier(t *testing.T) { require := require.New(t) - - manager, sender, config := testSetup(t) + bs, manager, sender := newTest(t) vtxID := ids.GenerateTestID() - - bsIntf, err := New(manager, config) - require.NoError(err) - require.IsType(&getter{}, bsIntf) - bs := bsIntf.(*getter) - manager.EdgeF = func(context.Context) []ids.ID { return []ids.ID{ vtxID, @@ -92,8 +67,7 @@ func TestAcceptedFrontier(t *testing.T) { func TestFilterAccepted(t *testing.T) { require := require.New(t) - - manager, sender, config := testSetup(t) + bs, manager, sender := newTest(t) vtxID0 := ids.GenerateTestID() vtxID1 := ids.GenerateTestID() @@ -108,13 +82,6 @@ func TestFilterAccepted(t *testing.T) { StatusV: choices.Accepted, }} - bsIntf, err := New(manager, config) - require.NoError(err) - require.IsType(&getter{}, bsIntf) - bs := bsIntf.(*getter) - - vtxIDs := []ids.ID{vtxID0, vtxID1, vtxID2} - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID0: @@ -133,6 +100,7 @@ func TestFilterAccepted(t *testing.T) { accepted = frontier } + vtxIDs := set.Of(vtxID0, vtxID1, vtxID2) require.NoError(bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, vtxIDs)) require.Contains(accepted, vtxID0) diff --git a/snow/engine/avalanche/state/prefixed_state.go b/snow/engine/avalanche/state/prefixed_state.go index 5fac890b9ed1..1ff634d5c61e 100644 --- a/snow/engine/avalanche/state/prefixed_state.go +++ b/snow/engine/avalanche/state/prefixed_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/snow/engine/avalanche/state/serializer.go b/snow/engine/avalanche/state/serializer.go index 88dc7afefae4..b305100b9ce2 100644 --- a/snow/engine/avalanche/state/serializer.go +++ b/snow/engine/avalanche/state/serializer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Package state manages the meta-data required by consensus for an avalanche diff --git a/snow/engine/avalanche/state/state.go b/snow/engine/avalanche/state/state.go index f7f94c5e6923..021a4c7e1d68 100644 --- a/snow/engine/avalanche/state/state.go +++ b/snow/engine/avalanche/state/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/snow/engine/avalanche/state/unique_vertex.go b/snow/engine/avalanche/state/unique_vertex.go index 73c1ef94ccdc..bc245d1b5496 100644 --- a/snow/engine/avalanche/state/unique_vertex.go +++ b/snow/engine/avalanche/state/unique_vertex.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/snow/engine/avalanche/state/unique_vertex_test.go b/snow/engine/avalanche/state/unique_vertex_test.go index 4d6dcc55e385..6f644680d290 100644 --- a/snow/engine/avalanche/state/unique_vertex_test.go +++ b/snow/engine/avalanche/state/unique_vertex_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -13,11 +13,11 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/logging" ) var errUnknownTx = errors.New("unknown tx") @@ -29,13 +29,12 @@ func newTestSerializer(t *testing.T, parse func(context.Context, []byte) (snowst vm.ParseTxF = parse baseDB := memdb.New() - ctx := snow.DefaultContextTest() s := NewSerializer( SerializerConfig{ - ChainID: ctx.ChainID, + ChainID: ids.Empty, VM: &vm, DB: baseDB, - Log: ctx.Log, + Log: logging.NoLog{}, }, ) @@ -260,9 +259,9 @@ func TestParseVertexWithIncorrectChainID(t *testing.T) { func TestParseVertexWithInvalidTxs(t *testing.T) { require := require.New(t) - ctx := snow.DefaultContextTest() + chainID := ids.Empty statelessVertex, err := vertex.Build( // regular, non-stop vertex - ctx.ChainID, + chainID, 0, nil, [][]byte{{1}}, @@ -290,7 +289,7 @@ func TestParseVertexWithInvalidTxs(t *testing.T) { require.ErrorIs(err, errUnknownVertex) childStatelessVertex, err := vertex.Build( // regular, non-stop vertex - ctx.ChainID, + chainID, 1, []ids.ID{id}, [][]byte{{2}}, diff --git a/snow/engine/avalanche/vertex/builder.go b/snow/engine/avalanche/vertex/builder.go index 34ee26763849..cf3e88ee6a35 100644 --- a/snow/engine/avalanche/vertex/builder.go +++ b/snow/engine/avalanche/vertex/builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -62,10 +62,10 @@ func buildVtx( utils.Sort(parentIDs) utils.SortByHash(txs) - codecVer := codecVersion + codecVer := CodecVersion if stopVertex { // use new codec version for the "StopVertex" - codecVer = codecVersionWithStopVtx + codecVer = CodecVersionWithStopVtx } innerVtx := innerStatelessVertex{ @@ -80,7 +80,7 @@ func buildVtx( return nil, err } - vtxBytes, err := c.Marshal(innerVtx.Version, innerVtx) + vtxBytes, err := Codec.Marshal(innerVtx.Version, innerVtx) vtx := statelessVertex{ innerStatelessVertex: innerVtx, id: hashing.ComputeHash256Array(vtxBytes), diff --git a/snow/engine/avalanche/vertex/builder_test.go b/snow/engine/avalanche/vertex/builder_test.go index 132ccbc33f73..a70b14ba8ff1 100644 --- a/snow/engine/avalanche/vertex/builder_test.go +++ b/snow/engine/avalanche/vertex/builder_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/snow/engine/avalanche/vertex/codec.go b/snow/engine/avalanche/vertex/codec.go index 564d699a25e9..12f387d0d25d 100644 --- a/snow/engine/avalanche/vertex/codec.go +++ b/snow/engine/avalanche/vertex/codec.go @@ -1,35 +1,38 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex import ( + "time" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/codec/reflectcodec" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/units" ) const ( + CodecVersion uint16 = 0 + CodecVersionWithStopVtx uint16 = 1 + // maxSize is the maximum allowed vertex size. It is necessary to deter DoS maxSize = units.MiB - - codecVersion uint16 = 0 - codecVersionWithStopVtx uint16 = 1 ) -var c codec.Manager +var Codec codec.Manager func init() { - lc := linearcodec.New([]string{reflectcodec.DefaultTagName + "V0"}, maxSize) - lc2 := linearcodec.New([]string{reflectcodec.DefaultTagName + "V1"}, maxSize) + lc0 := linearcodec.New(time.Time{}, []string{reflectcodec.DefaultTagName + "V0"}, maxSize) + lc1 := linearcodec.New(time.Time{}, []string{reflectcodec.DefaultTagName + "V1"}, maxSize) - c = codec.NewManager(maxSize) - // for backward compatibility, still register the initial codec version - if err := c.RegisterCodec(codecVersion, lc); err != nil { - panic(err) - } - if err := c.RegisterCodec(codecVersionWithStopVtx, lc2); err != nil { + Codec = codec.NewManager(maxSize) + err := utils.Err( + Codec.RegisterCodec(CodecVersion, lc0), + Codec.RegisterCodec(CodecVersionWithStopVtx, lc1), + ) + if err != nil { panic(err) } } diff --git a/snow/engine/avalanche/vertex/manager.go b/snow/engine/avalanche/vertex/manager.go index cf206742b629..a300affdb0e9 100644 --- a/snow/engine/avalanche/vertex/manager.go +++ b/snow/engine/avalanche/vertex/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/snow/engine/avalanche/vertex/mock_vm.go b/snow/engine/avalanche/vertex/mock_vm.go index b8d2637c7311..7ad293f6313f 100644 --- a/snow/engine/avalanche/vertex/mock_vm.go +++ b/snow/engine/avalanche/vertex/mock_vm.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex (interfaces: LinearizableVM) +// +// Generated by this command: +// +// mockgen -package=vertex -destination=snow/engine/avalanche/vertex/mock_vm.go github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex LinearizableVM +// // Package vertex is a generated GoMock package. package vertex @@ -55,7 +57,7 @@ func (m *MockLinearizableVM) AppGossip(arg0 context.Context, arg1 ids.NodeID, ar } // AppGossip indicates an expected call of AppGossip. -func (mr *MockLinearizableVMMockRecorder) AppGossip(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) AppGossip(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppGossip", reflect.TypeOf((*MockLinearizableVM)(nil).AppGossip), arg0, arg1, arg2) } @@ -69,23 +71,23 @@ func (m *MockLinearizableVM) AppRequest(arg0 context.Context, arg1 ids.NodeID, a } // AppRequest indicates an expected call of AppRequest. -func (mr *MockLinearizableVMMockRecorder) AppRequest(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) AppRequest(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequest", reflect.TypeOf((*MockLinearizableVM)(nil).AppRequest), arg0, arg1, arg2, arg3, arg4) } // AppRequestFailed mocks base method. -func (m *MockLinearizableVM) AppRequestFailed(arg0 context.Context, arg1 ids.NodeID, arg2 uint32) error { +func (m *MockLinearizableVM) AppRequestFailed(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 *common.AppError) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppRequestFailed", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "AppRequestFailed", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // AppRequestFailed indicates an expected call of AppRequestFailed. -func (mr *MockLinearizableVMMockRecorder) AppRequestFailed(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) AppRequestFailed(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequestFailed", reflect.TypeOf((*MockLinearizableVM)(nil).AppRequestFailed), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequestFailed", reflect.TypeOf((*MockLinearizableVM)(nil).AppRequestFailed), arg0, arg1, arg2, arg3) } // AppResponse mocks base method. @@ -97,7 +99,7 @@ func (m *MockLinearizableVM) AppResponse(arg0 context.Context, arg1 ids.NodeID, } // AppResponse indicates an expected call of AppResponse. -func (mr *MockLinearizableVMMockRecorder) AppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) AppResponse(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppResponse", reflect.TypeOf((*MockLinearizableVM)(nil).AppResponse), arg0, arg1, arg2, arg3) } @@ -112,7 +114,7 @@ func (m *MockLinearizableVM) BuildBlock(arg0 context.Context) (snowman.Block, er } // BuildBlock indicates an expected call of BuildBlock. -func (mr *MockLinearizableVMMockRecorder) BuildBlock(arg0 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) BuildBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildBlock", reflect.TypeOf((*MockLinearizableVM)(nil).BuildBlock), arg0) } @@ -126,7 +128,7 @@ func (m *MockLinearizableVM) Connected(arg0 context.Context, arg1 ids.NodeID, ar } // Connected indicates an expected call of Connected. -func (mr *MockLinearizableVMMockRecorder) Connected(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) Connected(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connected", reflect.TypeOf((*MockLinearizableVM)(nil).Connected), arg0, arg1, arg2) } @@ -141,26 +143,11 @@ func (m *MockLinearizableVM) CreateHandlers(arg0 context.Context) (map[string]ht } // CreateHandlers indicates an expected call of CreateHandlers. -func (mr *MockLinearizableVMMockRecorder) CreateHandlers(arg0 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) CreateHandlers(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateHandlers", reflect.TypeOf((*MockLinearizableVM)(nil).CreateHandlers), arg0) } -// CreateStaticHandlers mocks base method. -func (m *MockLinearizableVM) CreateStaticHandlers(arg0 context.Context) (map[string]http.Handler, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateStaticHandlers", arg0) - ret0, _ := ret[0].(map[string]http.Handler) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateStaticHandlers indicates an expected call of CreateStaticHandlers. -func (mr *MockLinearizableVMMockRecorder) CreateStaticHandlers(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateStaticHandlers", reflect.TypeOf((*MockLinearizableVM)(nil).CreateStaticHandlers), arg0) -} - // CrossChainAppRequest mocks base method. func (m *MockLinearizableVM) CrossChainAppRequest(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 time.Time, arg4 []byte) error { m.ctrl.T.Helper() @@ -170,23 +157,23 @@ func (m *MockLinearizableVM) CrossChainAppRequest(arg0 context.Context, arg1 ids } // CrossChainAppRequest indicates an expected call of CrossChainAppRequest. -func (mr *MockLinearizableVMMockRecorder) CrossChainAppRequest(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) CrossChainAppRequest(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequest", reflect.TypeOf((*MockLinearizableVM)(nil).CrossChainAppRequest), arg0, arg1, arg2, arg3, arg4) } // CrossChainAppRequestFailed mocks base method. -func (m *MockLinearizableVM) CrossChainAppRequestFailed(arg0 context.Context, arg1 ids.ID, arg2 uint32) error { +func (m *MockLinearizableVM) CrossChainAppRequestFailed(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 *common.AppError) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CrossChainAppRequestFailed", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "CrossChainAppRequestFailed", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // CrossChainAppRequestFailed indicates an expected call of CrossChainAppRequestFailed. -func (mr *MockLinearizableVMMockRecorder) CrossChainAppRequestFailed(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) CrossChainAppRequestFailed(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequestFailed", reflect.TypeOf((*MockLinearizableVM)(nil).CrossChainAppRequestFailed), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequestFailed", reflect.TypeOf((*MockLinearizableVM)(nil).CrossChainAppRequestFailed), arg0, arg1, arg2, arg3) } // CrossChainAppResponse mocks base method. @@ -198,7 +185,7 @@ func (m *MockLinearizableVM) CrossChainAppResponse(arg0 context.Context, arg1 id } // CrossChainAppResponse indicates an expected call of CrossChainAppResponse. -func (mr *MockLinearizableVMMockRecorder) CrossChainAppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) CrossChainAppResponse(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppResponse", reflect.TypeOf((*MockLinearizableVM)(nil).CrossChainAppResponse), arg0, arg1, arg2, arg3) } @@ -212,7 +199,7 @@ func (m *MockLinearizableVM) Disconnected(arg0 context.Context, arg1 ids.NodeID) } // Disconnected indicates an expected call of Disconnected. -func (mr *MockLinearizableVMMockRecorder) Disconnected(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) Disconnected(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnected", reflect.TypeOf((*MockLinearizableVM)(nil).Disconnected), arg0, arg1) } @@ -227,7 +214,7 @@ func (m *MockLinearizableVM) GetBlock(arg0 context.Context, arg1 ids.ID) (snowma } // GetBlock indicates an expected call of GetBlock. -func (mr *MockLinearizableVMMockRecorder) GetBlock(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) GetBlock(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockLinearizableVM)(nil).GetBlock), arg0, arg1) } @@ -242,22 +229,22 @@ func (m *MockLinearizableVM) GetBlockIDAtHeight(arg0 context.Context, arg1 uint6 } // GetBlockIDAtHeight indicates an expected call of GetBlockIDAtHeight. -func (mr *MockLinearizableVMMockRecorder) GetBlockIDAtHeight(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) GetBlockIDAtHeight(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockLinearizableVM)(nil).GetBlockIDAtHeight), arg0, arg1) } // HealthCheck mocks base method. -func (m *MockLinearizableVM) HealthCheck(arg0 context.Context) (interface{}, error) { +func (m *MockLinearizableVM) HealthCheck(arg0 context.Context) (any, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "HealthCheck", arg0) - ret0, _ := ret[0].(interface{}) + ret0, _ := ret[0].(any) ret1, _ := ret[1].(error) return ret0, ret1 } // HealthCheck indicates an expected call of HealthCheck. -func (mr *MockLinearizableVMMockRecorder) HealthCheck(arg0 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) HealthCheck(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockLinearizableVM)(nil).HealthCheck), arg0) } @@ -271,7 +258,7 @@ func (m *MockLinearizableVM) Initialize(arg0 context.Context, arg1 *snow.Context } // Initialize indicates an expected call of Initialize. -func (mr *MockLinearizableVMMockRecorder) Initialize(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) Initialize(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockLinearizableVM)(nil).Initialize), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) } @@ -286,7 +273,7 @@ func (m *MockLinearizableVM) LastAccepted(arg0 context.Context) (ids.ID, error) } // LastAccepted indicates an expected call of LastAccepted. -func (mr *MockLinearizableVMMockRecorder) LastAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) LastAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastAccepted", reflect.TypeOf((*MockLinearizableVM)(nil).LastAccepted), arg0) } @@ -300,7 +287,7 @@ func (m *MockLinearizableVM) Linearize(arg0 context.Context, arg1 ids.ID) error } // Linearize indicates an expected call of Linearize. -func (mr *MockLinearizableVMMockRecorder) Linearize(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) Linearize(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Linearize", reflect.TypeOf((*MockLinearizableVM)(nil).Linearize), arg0, arg1) } @@ -315,7 +302,7 @@ func (m *MockLinearizableVM) ParseBlock(arg0 context.Context, arg1 []byte) (snow } // ParseBlock indicates an expected call of ParseBlock. -func (mr *MockLinearizableVMMockRecorder) ParseBlock(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) ParseBlock(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseBlock", reflect.TypeOf((*MockLinearizableVM)(nil).ParseBlock), arg0, arg1) } @@ -330,7 +317,7 @@ func (m *MockLinearizableVM) ParseTx(arg0 context.Context, arg1 []byte) (snowsto } // ParseTx indicates an expected call of ParseTx. -func (mr *MockLinearizableVMMockRecorder) ParseTx(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) ParseTx(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseTx", reflect.TypeOf((*MockLinearizableVM)(nil).ParseTx), arg0, arg1) } @@ -344,7 +331,7 @@ func (m *MockLinearizableVM) SetPreference(arg0 context.Context, arg1 ids.ID) er } // SetPreference indicates an expected call of SetPreference. -func (mr *MockLinearizableVMMockRecorder) SetPreference(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) SetPreference(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreference", reflect.TypeOf((*MockLinearizableVM)(nil).SetPreference), arg0, arg1) } @@ -358,7 +345,7 @@ func (m *MockLinearizableVM) SetState(arg0 context.Context, arg1 snow.State) err } // SetState indicates an expected call of SetState. -func (mr *MockLinearizableVMMockRecorder) SetState(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) SetState(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetState", reflect.TypeOf((*MockLinearizableVM)(nil).SetState), arg0, arg1) } @@ -372,7 +359,7 @@ func (m *MockLinearizableVM) Shutdown(arg0 context.Context) error { } // Shutdown indicates an expected call of Shutdown. -func (mr *MockLinearizableVMMockRecorder) Shutdown(arg0 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) Shutdown(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockLinearizableVM)(nil).Shutdown), arg0) } @@ -386,7 +373,7 @@ func (m *MockLinearizableVM) VerifyHeightIndex(arg0 context.Context) error { } // VerifyHeightIndex indicates an expected call of VerifyHeightIndex. -func (mr *MockLinearizableVMMockRecorder) VerifyHeightIndex(arg0 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) VerifyHeightIndex(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyHeightIndex", reflect.TypeOf((*MockLinearizableVM)(nil).VerifyHeightIndex), arg0) } @@ -401,7 +388,7 @@ func (m *MockLinearizableVM) Version(arg0 context.Context) (string, error) { } // Version indicates an expected call of Version. -func (mr *MockLinearizableVMMockRecorder) Version(arg0 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) Version(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockLinearizableVM)(nil).Version), arg0) } diff --git a/snow/engine/avalanche/vertex/parser.go b/snow/engine/avalanche/vertex/parser.go index cd409c7edc65..41f848e781c6 100644 --- a/snow/engine/avalanche/vertex/parser.go +++ b/snow/engine/avalanche/vertex/parser.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -19,7 +19,7 @@ type Parser interface { // Parse parses the provided vertex bytes into a stateless vertex func Parse(bytes []byte) (StatelessVertex, error) { vtx := innerStatelessVertex{} - version, err := c.Unmarshal(bytes, &vtx) + version, err := Codec.Unmarshal(bytes, &vtx) if err != nil { return nil, err } diff --git a/snow/engine/avalanche/vertex/parser_test.go b/snow/engine/avalanche/vertex/parser_test.go index 5d765d8384dd..f3016895848d 100644 --- a/snow/engine/avalanche/vertex/parser_test.go +++ b/snow/engine/avalanche/vertex/parser_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/snow/engine/avalanche/vertex/stateless_vertex.go b/snow/engine/avalanche/vertex/stateless_vertex.go index cef298c9b90f..88884d9e90bb 100644 --- a/snow/engine/avalanche/vertex/stateless_vertex.go +++ b/snow/engine/avalanche/vertex/stateless_vertex.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -73,7 +73,7 @@ func (v statelessVertex) ChainID() ids.ID { } func (v statelessVertex) StopVertex() bool { - return v.innerStatelessVertex.Version == codecVersionWithStopVtx + return v.innerStatelessVertex.Version == CodecVersionWithStopVtx } func (v statelessVertex) Height() uint64 { @@ -94,15 +94,15 @@ func (v statelessVertex) Txs() [][]byte { type innerStatelessVertex struct { Version uint16 `json:"version"` - ChainID ids.ID `json:"chainID" serializeV0:"true" serializeV1:"true"` - Height uint64 `json:"height" serializeV0:"true" serializeV1:"true"` - Epoch uint32 `json:"epoch" serializeV0:"true"` - ParentIDs []ids.ID `json:"parentIDs" len:"128" serializeV0:"true" serializeV1:"true"` - Txs [][]byte `json:"txs" len:"128" serializeV0:"true"` + ChainID ids.ID `json:"chainID" serializeV0:"true" serializeV1:"true"` + Height uint64 `json:"height" serializeV0:"true" serializeV1:"true"` + Epoch uint32 `json:"epoch" serializeV0:"true"` + ParentIDs []ids.ID `json:"parentIDs" serializeV0:"true" serializeV1:"true"` + Txs [][]byte `json:"txs" serializeV0:"true"` } func (v innerStatelessVertex) Verify() error { - if v.Version == codecVersionWithStopVtx { + if v.Version == CodecVersionWithStopVtx { return v.verifyStopVertex() } return v.verify() @@ -110,7 +110,7 @@ func (v innerStatelessVertex) Verify() error { func (v innerStatelessVertex) verify() error { switch { - case v.Version != codecVersion: + case v.Version != CodecVersion: return errBadVersion case v.Epoch != 0: return errBadEpoch @@ -131,7 +131,7 @@ func (v innerStatelessVertex) verify() error { func (v innerStatelessVertex) verifyStopVertex() error { switch { - case v.Version != codecVersionWithStopVtx: + case v.Version != CodecVersionWithStopVtx: return errBadVersion case v.Epoch != 0: return errBadEpoch diff --git a/snow/engine/avalanche/vertex/stateless_vertex_test.go b/snow/engine/avalanche/vertex/stateless_vertex_test.go index a18a045a95d4..35ece98c51da 100644 --- a/snow/engine/avalanche/vertex/stateless_vertex_test.go +++ b/snow/engine/avalanche/vertex/stateless_vertex_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/snow/engine/avalanche/vertex/storage.go b/snow/engine/avalanche/vertex/storage.go index 40ec863d2cba..cac766c6b103 100644 --- a/snow/engine/avalanche/vertex/storage.go +++ b/snow/engine/avalanche/vertex/storage.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/snow/engine/avalanche/vertex/test_builder.go b/snow/engine/avalanche/vertex/test_builder.go index 0bd63b26bcb1..534629372249 100644 --- a/snow/engine/avalanche/vertex/test_builder.go +++ b/snow/engine/avalanche/vertex/test_builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/snow/engine/avalanche/vertex/test_manager.go b/snow/engine/avalanche/vertex/test_manager.go index a2f55ee7f8b8..6954161cdd46 100644 --- a/snow/engine/avalanche/vertex/test_manager.go +++ b/snow/engine/avalanche/vertex/test_manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/snow/engine/avalanche/vertex/test_parser.go b/snow/engine/avalanche/vertex/test_parser.go index 3ca17b3440f1..2ee10add6090 100644 --- a/snow/engine/avalanche/vertex/test_parser.go +++ b/snow/engine/avalanche/vertex/test_parser.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/snow/engine/avalanche/vertex/test_storage.go b/snow/engine/avalanche/vertex/test_storage.go index b5250ee1fca4..8e0b8bc1e84d 100644 --- a/snow/engine/avalanche/vertex/test_storage.go +++ b/snow/engine/avalanche/vertex/test_storage.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/snow/engine/avalanche/vertex/test_vm.go b/snow/engine/avalanche/vertex/test_vm.go index d20e57000da9..ee17c8b13ae0 100644 --- a/snow/engine/avalanche/vertex/test_vm.go +++ b/snow/engine/avalanche/vertex/test_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/snow/engine/avalanche/vertex/vm.go b/snow/engine/avalanche/vertex/vm.go index 67f3fc586b26..9987fe164d35 100644 --- a/snow/engine/avalanche/vertex/vm.go +++ b/snow/engine/avalanche/vertex/vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/snow/engine/common/appsender/appsender_client.go b/snow/engine/common/appsender/appsender_client.go index a816dd68241e..acde7109f751 100644 --- a/snow/engine/common/appsender/appsender_client.go +++ b/snow/engine/common/appsender/appsender_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package appsender @@ -52,8 +52,7 @@ func (c *Client) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID] nodeIDsBytes := make([][]byte, nodeIDs.Len()) i := 0 for nodeID := range nodeIDs { - nodeID := nodeID // Prevent overwrite in next iteration - nodeIDsBytes[i] = nodeID[:] + nodeIDsBytes[i] = nodeID.Bytes() i++ } _, err := c.client.SendAppRequest( @@ -71,7 +70,7 @@ func (c *Client) SendAppResponse(ctx context.Context, nodeID ids.NodeID, request _, err := c.client.SendAppResponse( ctx, &appsenderpb.SendAppResponseMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), RequestId: requestID, Response: response, }, @@ -93,8 +92,7 @@ func (c *Client) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids. nodeIDsBytes := make([][]byte, nodeIDs.Len()) i := 0 for nodeID := range nodeIDs { - nodeID := nodeID // Prevent overwrite in next iteration - nodeIDsBytes[i] = nodeID[:] + nodeIDsBytes[i] = nodeID.Bytes() i++ } _, err := c.client.SendAppGossipSpecific( diff --git a/snow/engine/common/appsender/appsender_server.go b/snow/engine/common/appsender/appsender_server.go index 3583940db108..84763e17bf11 100644 --- a/snow/engine/common/appsender/appsender_server.go +++ b/snow/engine/common/appsender/appsender_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package appsender diff --git a/snow/engine/common/bootstrap_tracker.go b/snow/engine/common/bootstrap_tracker.go index 04b90a122f98..bd2ef43cf1f3 100644 --- a/snow/engine/common/bootstrap_tracker.go +++ b/snow/engine/common/bootstrap_tracker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/snow/engine/common/bootstrapable.go b/snow/engine/common/bootstrapable.go index a4abcc59a880..517eba2aa154 100644 --- a/snow/engine/common/bootstrapable.go +++ b/snow/engine/common/bootstrapable.go @@ -1,24 +1,12 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common -import ( - "context" - - "github.com/ava-labs/avalanchego/ids" -) +import "context" type BootstrapableEngine interface { - Bootstrapable Engine -} - -// Bootstrapable defines the functionality required to support bootstrapping -type Bootstrapable interface { - // Force the provided containers to be accepted. Only returns fatal errors - // if they occur. - ForceAccepted(ctx context.Context, acceptedContainerIDs []ids.ID) error // Clear removes all containers to be processed upon bootstrapping Clear(ctx context.Context) error diff --git a/snow/engine/common/bootstrapper.go b/snow/engine/common/bootstrapper.go deleted file mode 100644 index aca219130478..000000000000 --- a/snow/engine/common/bootstrapper.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "context" - "fmt" - "math" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/set" - - safemath "github.com/ava-labs/avalanchego/utils/math" -) - -const ( - // StatusUpdateFrequency is how many containers should be processed between - // logs - StatusUpdateFrequency = 5000 - - // MaxOutstandingGetAncestorsRequests is the maximum number of GetAncestors - // sent but not responded to/failed - MaxOutstandingGetAncestorsRequests = 10 - - // MaxOutstandingBroadcastRequests is the maximum number of requests to have - // outstanding when broadcasting. - MaxOutstandingBroadcastRequests = 50 -) - -var _ Bootstrapper = (*bootstrapper)(nil) - -type Bootstrapper interface { - AcceptedFrontierHandler - AcceptedHandler - Haltable - Startup(context.Context) error - Restart(ctx context.Context, reset bool) error -} - -// It collects mechanisms common to both snowman and avalanche bootstrappers -type bootstrapper struct { - Config - Halter - - // Holds the beacons that were sampled for the accepted frontier - sampledBeacons validators.Manager - // IDs of validators we should request an accepted frontier from - pendingSendAcceptedFrontier set.Set[ids.NodeID] - // IDs of validators we requested an accepted frontier from but haven't - // received a reply yet - pendingReceiveAcceptedFrontier set.Set[ids.NodeID] - // IDs of validators that failed to respond with their accepted frontier - failedAcceptedFrontier set.Set[ids.NodeID] - // IDs of all the returned accepted frontiers - acceptedFrontierSet set.Set[ids.ID] - - // IDs of validators we should request filtering the accepted frontier from - pendingSendAccepted set.Set[ids.NodeID] - // IDs of validators we requested filtering the accepted frontier from but - // haven't received a reply yet - pendingReceiveAccepted set.Set[ids.NodeID] - // IDs of validators that failed to respond with their filtered accepted - // frontier - failedAccepted set.Set[ids.NodeID] - // IDs of the returned accepted containers and the stake weight that has - // marked them as accepted - acceptedVotes map[ids.ID]uint64 - acceptedFrontier []ids.ID - - // number of times the bootstrap has been attempted - bootstrapAttempts int -} - -func NewCommonBootstrapper(config Config) Bootstrapper { - return &bootstrapper{ - Config: config, - } -} - -func (b *bootstrapper) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { - // ignores any late responses - if requestID != b.Config.SharedCfg.RequestID { - b.Ctx.Log.Debug("received out-of-sync AcceptedFrontier message", - zap.Stringer("nodeID", nodeID), - zap.Uint32("expectedRequestID", b.Config.SharedCfg.RequestID), - zap.Uint32("requestID", requestID), - ) - return nil - } - - if !b.pendingReceiveAcceptedFrontier.Contains(nodeID) { - b.Ctx.Log.Debug("received unexpected AcceptedFrontier message", - zap.Stringer("nodeID", nodeID), - ) - return nil - } - - // Union the reported accepted frontier from [nodeID] with the accepted - // frontier we got from others - b.acceptedFrontierSet.Add(containerID) - return b.markAcceptedFrontierReceived(ctx, nodeID) -} - -func (b *bootstrapper) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - // ignores any late responses - if requestID != b.Config.SharedCfg.RequestID { - b.Ctx.Log.Debug("received out-of-sync GetAcceptedFrontierFailed message", - zap.Stringer("nodeID", nodeID), - zap.Uint32("expectedRequestID", b.Config.SharedCfg.RequestID), - zap.Uint32("requestID", requestID), - ) - return nil - } - - if !b.pendingReceiveAcceptedFrontier.Contains(nodeID) { - b.Ctx.Log.Debug("received unexpected GetAcceptedFrontierFailed message", - zap.Stringer("nodeID", nodeID), - ) - return nil - } - - // If we can't get a response from [nodeID], act as though they said their - // accepted frontier is empty and we add the validator to the failed list - b.failedAcceptedFrontier.Add(nodeID) - return b.markAcceptedFrontierReceived(ctx, nodeID) -} - -func (b *bootstrapper) markAcceptedFrontierReceived(ctx context.Context, nodeID ids.NodeID) error { - // Mark that we received a response from [nodeID] - b.pendingReceiveAcceptedFrontier.Remove(nodeID) - - b.sendGetAcceptedFrontiers(ctx) - - // still waiting on requests - if b.pendingReceiveAcceptedFrontier.Len() != 0 { - return nil - } - - // We've received the accepted frontier from every bootstrap validator - // Ask each bootstrap validator to filter the list of containers that we were - // told are on the accepted frontier such that the list only contains containers - // they think are accepted. - totalSampledWeight, err := b.sampledBeacons.TotalWeight(b.Ctx.SubnetID) - if err != nil { - return fmt.Errorf("failed to get total weight of sampled beacons for subnet %s: %w", b.Ctx.SubnetID, err) - } - beaconsTotalWeight, err := b.Beacons.TotalWeight(b.Ctx.SubnetID) - if err != nil { - return fmt.Errorf("failed to get total weight of beacons for subnet %s: %w", b.Ctx.SubnetID, err) - } - newAlpha := float64(totalSampledWeight*b.Alpha) / float64(beaconsTotalWeight) - - failedBeaconWeight, err := b.Beacons.SubsetWeight(b.Ctx.SubnetID, b.failedAcceptedFrontier) - if err != nil { - return fmt.Errorf("failed to get total weight of failed beacons: %w", err) - } - - // fail the bootstrap if the weight is not enough to bootstrap - if float64(totalSampledWeight)-newAlpha < float64(failedBeaconWeight) { - if b.Config.RetryBootstrap { - b.Ctx.Log.Debug("restarting bootstrap", - zap.String("reason", "not enough frontiers received"), - zap.Int("numBeacons", b.Beacons.Count(b.Ctx.SubnetID)), - zap.Int("numFailedBootstrappers", b.failedAcceptedFrontier.Len()), - zap.Int("numBootstrapAttemps", b.bootstrapAttempts), - ) - return b.Restart(ctx, false) - } - - b.Ctx.Log.Debug("didn't receive enough frontiers", - zap.Int("numFailedValidators", b.failedAcceptedFrontier.Len()), - zap.Int("numBootstrapAttempts", b.bootstrapAttempts), - ) - } - - b.Config.SharedCfg.RequestID++ - b.acceptedFrontier = b.acceptedFrontierSet.List() - - b.sendGetAccepted(ctx) - return nil -} - -func (b *bootstrapper) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - // ignores any late responses - if requestID != b.Config.SharedCfg.RequestID { - b.Ctx.Log.Debug("received out-of-sync Accepted message", - zap.Stringer("nodeID", nodeID), - zap.Uint32("expectedRequestID", b.Config.SharedCfg.RequestID), - zap.Uint32("requestID", requestID), - ) - return nil - } - - if !b.pendingReceiveAccepted.Contains(nodeID) { - b.Ctx.Log.Debug("received unexpected Accepted message", - zap.Stringer("nodeID", nodeID), - ) - return nil - } - // Mark that we received a response from [nodeID] - b.pendingReceiveAccepted.Remove(nodeID) - - weight := b.Beacons.GetWeight(b.Ctx.SubnetID, nodeID) - for _, containerID := range containerIDs { - previousWeight := b.acceptedVotes[containerID] - newWeight, err := safemath.Add64(weight, previousWeight) - if err != nil { - b.Ctx.Log.Error("failed calculating the Accepted votes", - zap.Uint64("weight", weight), - zap.Uint64("previousWeight", previousWeight), - zap.Error(err), - ) - newWeight = math.MaxUint64 - } - b.acceptedVotes[containerID] = newWeight - } - - b.sendGetAccepted(ctx) - - // wait on pending responses - if b.pendingReceiveAccepted.Len() != 0 { - return nil - } - - // We've received the filtered accepted frontier from every bootstrap validator - // Accept all containers that have a sufficient weight behind them - accepted := make([]ids.ID, 0, len(b.acceptedVotes)) - for containerID, weight := range b.acceptedVotes { - if weight >= b.Alpha { - accepted = append(accepted, containerID) - } - } - - // if we don't have enough weight for the bootstrap to be accepted then - // retry or fail the bootstrap - size := len(accepted) - if size == 0 && b.Beacons.Count(b.Ctx.SubnetID) > 0 { - // if we had too many timeouts when asking for validator votes, we - // should restart bootstrap hoping for the network problems to go away; - // otherwise, we received enough (>= b.Alpha) responses, but no frontier - // was supported by a majority of validators (i.e. votes are split - // between minorities supporting different frontiers). - beaconTotalWeight, err := b.Beacons.TotalWeight(b.Ctx.SubnetID) - if err != nil { - return fmt.Errorf("failed to get total weight of beacons for subnet %s: %w", b.Ctx.SubnetID, err) - } - failedBeaconWeight, err := b.Beacons.SubsetWeight(b.Ctx.SubnetID, b.failedAccepted) - if err != nil { - return fmt.Errorf("failed to get total weight of failed beacons for subnet %s: %w", b.Ctx.SubnetID, err) - } - votingStakes := beaconTotalWeight - failedBeaconWeight - if b.Config.RetryBootstrap && votingStakes < b.Alpha { - b.Ctx.Log.Debug("restarting bootstrap", - zap.String("reason", "not enough votes received"), - zap.Int("numBeacons", b.Beacons.Count(b.Ctx.SubnetID)), - zap.Int("numFailedBootstrappers", b.failedAccepted.Len()), - zap.Int("numBootstrapAttempts", b.bootstrapAttempts), - ) - return b.Restart(ctx, false) - } - } - - if !b.Config.SharedCfg.Restarted { - b.Ctx.Log.Info("bootstrapping started syncing", - zap.Int("numVerticesInFrontier", size), - ) - } else { - b.Ctx.Log.Debug("bootstrapping started syncing", - zap.Int("numVerticesInFrontier", size), - ) - } - - return b.Bootstrapable.ForceAccepted(ctx, accepted) -} - -func (b *bootstrapper) GetAcceptedFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - // ignores any late responses - if requestID != b.Config.SharedCfg.RequestID { - b.Ctx.Log.Debug("received out-of-sync GetAcceptedFailed message", - zap.Stringer("nodeID", nodeID), - zap.Uint32("expectedRequestID", b.Config.SharedCfg.RequestID), - zap.Uint32("requestID", requestID), - ) - return nil - } - - // If we can't get a response from [nodeID], act as though they said that - // they think none of the containers we sent them in GetAccepted are - // accepted - b.failedAccepted.Add(nodeID) - return b.Accepted(ctx, nodeID, requestID, nil) -} - -func (b *bootstrapper) Startup(ctx context.Context) error { - beaconIDs, err := b.Beacons.Sample(b.Ctx.SubnetID, b.Config.SampleK) - if err != nil { - return err - } - - b.sampledBeacons = validators.NewManager() - b.pendingSendAcceptedFrontier.Clear() - for _, nodeID := range beaconIDs { - if _, ok := b.sampledBeacons.GetValidator(b.Ctx.SubnetID, nodeID); !ok { - // Invariant: We never use the TxID or BLS keys populated here. - err = b.sampledBeacons.AddStaker(b.Ctx.SubnetID, nodeID, nil, ids.Empty, 1) - } else { - err = b.sampledBeacons.AddWeight(b.Ctx.SubnetID, nodeID, 1) - } - if err != nil { - return err - } - b.pendingSendAcceptedFrontier.Add(nodeID) - } - - b.pendingReceiveAcceptedFrontier.Clear() - b.failedAcceptedFrontier.Clear() - b.acceptedFrontierSet.Clear() - - b.pendingSendAccepted.Clear() - for _, nodeID := range b.Beacons.GetValidatorIDs(b.Ctx.SubnetID) { - b.pendingSendAccepted.Add(nodeID) - } - - b.pendingReceiveAccepted.Clear() - b.failedAccepted.Clear() - b.acceptedVotes = make(map[ids.ID]uint64) - - b.bootstrapAttempts++ - if b.pendingSendAcceptedFrontier.Len() == 0 { - b.Ctx.Log.Info("bootstrapping skipped", - zap.String("reason", "no provided bootstraps"), - ) - return b.Bootstrapable.ForceAccepted(ctx, nil) - } - - b.Config.SharedCfg.RequestID++ - b.sendGetAcceptedFrontiers(ctx) - return nil -} - -func (b *bootstrapper) Restart(ctx context.Context, reset bool) error { - // resets the attempts when we're pulling blocks/vertices we don't want to - // fail the bootstrap at that stage - if reset { - b.Ctx.Log.Debug("Checking for new frontiers") - - b.Config.SharedCfg.Restarted = true - b.bootstrapAttempts = 0 - } - - if b.bootstrapAttempts > 0 && b.bootstrapAttempts%b.RetryBootstrapWarnFrequency == 0 { - b.Ctx.Log.Debug("check internet connection", - zap.Int("numBootstrapAttempts", b.bootstrapAttempts), - ) - } - - return b.Startup(ctx) -} - -// Ask up to [MaxOutstandingBroadcastRequests] bootstrap validators to send -// their accepted frontier with the current accepted frontier -func (b *bootstrapper) sendGetAcceptedFrontiers(ctx context.Context) { - vdrs := set.NewSet[ids.NodeID](1) - for b.pendingSendAcceptedFrontier.Len() > 0 && b.pendingReceiveAcceptedFrontier.Len() < MaxOutstandingBroadcastRequests { - vdr, _ := b.pendingSendAcceptedFrontier.Pop() - // Add the validator to the set to send the messages to - vdrs.Add(vdr) - // Add the validator to send pending receipt set - b.pendingReceiveAcceptedFrontier.Add(vdr) - } - - if vdrs.Len() > 0 { - b.Sender.SendGetAcceptedFrontier(ctx, vdrs, b.Config.SharedCfg.RequestID) - } -} - -// Ask up to [MaxOutstandingBroadcastRequests] bootstrap validators to send -// their filtered accepted frontier -func (b *bootstrapper) sendGetAccepted(ctx context.Context) { - vdrs := set.NewSet[ids.NodeID](1) - for b.pendingSendAccepted.Len() > 0 && b.pendingReceiveAccepted.Len() < MaxOutstandingBroadcastRequests { - vdr, _ := b.pendingSendAccepted.Pop() - // Add the validator to the set to send the messages to - vdrs.Add(vdr) - // Add the validator to send pending receipt set - b.pendingReceiveAccepted.Add(vdr) - } - - if vdrs.Len() > 0 { - b.Ctx.Log.Debug("sent GetAccepted messages", - zap.Int("numSent", vdrs.Len()), - zap.Int("numPending", b.pendingSendAccepted.Len()), - ) - b.Sender.SendGetAccepted(ctx, vdrs, b.Config.SharedCfg.RequestID, b.acceptedFrontier) - } -} diff --git a/snow/engine/common/config.go b/snow/engine/common/config.go deleted file mode 100644 index 05eb3602f876..000000000000 --- a/snow/engine/common/config.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "time" - - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/engine/common/tracker" - "github.com/ava-labs/avalanchego/snow/validators" -) - -// Config wraps the common configurations that are needed by a Snow consensus -// engine -type Config struct { - Ctx *snow.ConsensusContext - Beacons validators.Manager - - SampleK int - Alpha uint64 - StartupTracker tracker.Startup - Sender Sender - Bootstrapable Bootstrapable - BootstrapTracker BootstrapTracker - Timer Timer - - // Should Bootstrap be retried - RetryBootstrap bool - - // Max number of times to retry bootstrap before warning the node operator - RetryBootstrapWarnFrequency int - - // Max time to spend fetching a container and its ancestors when responding - // to a GetAncestors - MaxTimeGetAncestors time.Duration - - // Max number of containers in an ancestors message sent by this node. - AncestorsMaxContainersSent int - - // This node will only consider the first [AncestorsMaxContainersReceived] - // containers in an ancestors message it receives. - AncestorsMaxContainersReceived int - - SharedCfg *SharedConfig -} - -func (c *Config) Context() *snow.ConsensusContext { - return c.Ctx -} - -// IsBootstrapped returns true iff this chain is done bootstrapping -func (c *Config) IsBootstrapped() bool { - return c.Ctx.State.Get().State == snow.NormalOp -} - -// Shared among common.bootstrapper and snowman/avalanche bootstrapper -type SharedConfig struct { - // Tracks the last requestID that was used in a request - RequestID uint32 - - // True if RestartBootstrap has been called at least once - Restarted bool -} diff --git a/snow/engine/common/engine.go b/snow/engine/common/engine.go index 4c8213a432f6..cbd9c37dc10c 100644 --- a/snow/engine/common/engine.go +++ b/snow/engine/common/engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/set" ) // Engine describes the standard interface of a consensus engine. @@ -31,9 +32,6 @@ type Engine interface { // Returns nil if the engine is healthy. // Periodically called and reported through the health API health.Checker - - // GetVM returns this engine's VM - GetVM() VM } type Handler interface { @@ -108,7 +106,7 @@ type GetAcceptedStateSummaryHandler interface { ctx context.Context, nodeID ids.NodeID, requestID uint32, - heights []uint64, + heights set.Set[uint64], ) error } @@ -122,7 +120,7 @@ type AcceptedStateSummaryHandler interface { ctx context.Context, nodeID ids.NodeID, requestID uint32, - summaryIDs []ids.ID, + summaryIDs set.Set[ids.ID], ) error // Notify this engine that a GetAcceptedStateSummary request it issued has @@ -182,7 +180,7 @@ type GetAcceptedHandler interface { ctx context.Context, nodeID ids.NodeID, requestID uint32, - containerIDs []ids.ID, + containerIDs set.Set[ids.ID], ) error } @@ -196,7 +194,7 @@ type AcceptedHandler interface { ctx context.Context, nodeID ids.NodeID, requestID uint32, - containerIDs []ids.ID, + containerIDs set.Set[ids.ID], ) error // Notify this engine that a GetAccepted request it issued has failed. @@ -398,6 +396,7 @@ type AppResponseHandler interface { ctx context.Context, nodeID ids.NodeID, requestID uint32, + appErr *AppError, ) error } @@ -467,6 +466,7 @@ type CrossChainAppResponseHandler interface { ctx context.Context, chainID ids.ID, requestID uint32, + appErr *AppError, ) error } diff --git a/snow/engine/common/error.go b/snow/engine/common/error.go new file mode 100644 index 000000000000..261fedaa260c --- /dev/null +++ b/snow/engine/common/error.go @@ -0,0 +1,43 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import "fmt" + +var ( + _ error = (*AppError)(nil) + + // ErrUndefined indicates an undefined error + ErrUndefined = &AppError{ + Code: 0, + Message: "undefined", + } + + // ErrTimeout is used to signal a response timeout + ErrTimeout = &AppError{ + Code: -1, + Message: "timed out", + } +) + +// AppError is an application-defined error +type AppError struct { + // Code is application-defined and should be used for error matching + Code int32 + // Message is a human-readable error message + Message string +} + +func (a *AppError) Error() string { + return fmt.Sprintf("%d: %s", a.Code, a.Message) +} + +func (a *AppError) Is(target error) bool { + appErr, ok := target.(*AppError) + if !ok { + return false + } + + return a.Code == appErr.Code +} diff --git a/snow/engine/common/error_test.go b/snow/engine/common/error_test.go new file mode 100644 index 000000000000..0204e010045b --- /dev/null +++ b/snow/engine/common/error_test.go @@ -0,0 +1,92 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +// Tests the invariant that AppErrors are matched against their error codes +func TestAppErrorEqual(t *testing.T) { + tests := []struct { + name string + err1 *AppError + err2 error + expected bool + }{ + { + name: "is - equal", + err1: &AppError{ + Code: 1, + }, + err2: &AppError{ + Code: 1, + }, + expected: true, + }, + { + name: "is - same error code different messages", + err1: &AppError{ + Code: 1, + Message: "foo", + }, + err2: &AppError{ + Code: 1, + Message: "bar", + }, + expected: true, + }, + { + name: "not is - different error code", + err1: &AppError{ + Code: 1, + }, + err2: &AppError{ + Code: 2, + }, + }, + { + name: "not is - different type", + err1: &AppError{ + Code: 1, + }, + err2: errors.New("foobar"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.expected, errors.Is(tt.err1, tt.err2)) + }) + } +} + +// Tests reserved error types +func TestErrorCode(t *testing.T) { + tests := []struct { + name string + code int32 + expected *AppError + }{ + { + name: "undefined", + code: 0, + expected: ErrUndefined, + }, + { + name: "undefined", + code: -1, + expected: ErrTimeout, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.ErrorIs(t, tt.expected, &AppError{Code: tt.code}) + }) + } +} diff --git a/snow/engine/common/fetcher.go b/snow/engine/common/fetcher.go deleted file mode 100644 index 9e90da3d325b..000000000000 --- a/snow/engine/common/fetcher.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import "context" - -type Fetcher struct { - // tracks which validators were asked for which containers in which requests - OutstandingRequests Requests - - // Called when bootstrapping is done on a specific chain - OnFinished func(ctx context.Context, lastReqID uint32) error -} diff --git a/snow/engine/common/fx.go b/snow/engine/common/fx.go index 861986462bb2..000c22ed6baf 100644 --- a/snow/engine/common/fx.go +++ b/snow/engine/common/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/snow/engine/common/halter.go b/snow/engine/common/halter.go index bdfe3c9d489e..1fcea981d2e4 100644 --- a/snow/engine/common/halter.go +++ b/snow/engine/common/halter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/snow/engine/common/message.go b/snow/engine/common/message.go index 6ce1c4501a1c..1bc05991973e 100644 --- a/snow/engine/common/message.go +++ b/snow/engine/common/message.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/snow/engine/common/mock_sender.go b/snow/engine/common/mock_sender.go index 3850c198a236..6ebeeb636675 100644 --- a/snow/engine/common/mock_sender.go +++ b/snow/engine/common/mock_sender.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/snow/engine/common (interfaces: Sender) +// Source: snow/engine/common/sender.go +// +// Generated by this command: +// +// mockgen -source=snow/engine/common/sender.go -destination=snow/engine/common/mock_sender.go -package=common -exclude_interfaces=StateSummarySender,AcceptedStateSummarySender,FrontierSender,AcceptedSender,FetchSender,AppSender,QuerySender,CrossChainAppSender,NetworkAppSender,Gossiper +// // Package common is a generated GoMock package. package common @@ -41,291 +43,291 @@ func (m *MockSender) EXPECT() *MockSenderMockRecorder { } // Accept mocks base method. -func (m *MockSender) Accept(arg0 *snow.ConsensusContext, arg1 ids.ID, arg2 []byte) error { +func (m *MockSender) Accept(ctx *snow.ConsensusContext, containerID ids.ID, container []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Accept", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "Accept", ctx, containerID, container) ret0, _ := ret[0].(error) return ret0 } // Accept indicates an expected call of Accept. -func (mr *MockSenderMockRecorder) Accept(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) Accept(ctx, containerID, container any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockSender)(nil).Accept), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockSender)(nil).Accept), ctx, containerID, container) } // SendAccepted mocks base method. -func (m *MockSender) SendAccepted(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []ids.ID) { +func (m *MockSender) SendAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAccepted", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendAccepted", ctx, nodeID, requestID, containerIDs) } // SendAccepted indicates an expected call of SendAccepted. -func (mr *MockSenderMockRecorder) SendAccepted(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAccepted(ctx, nodeID, requestID, containerIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAccepted", reflect.TypeOf((*MockSender)(nil).SendAccepted), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAccepted", reflect.TypeOf((*MockSender)(nil).SendAccepted), ctx, nodeID, requestID, containerIDs) } // SendAcceptedFrontier mocks base method. -func (m *MockSender) SendAcceptedFrontier(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []ids.ID) { +func (m *MockSender) SendAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAcceptedFrontier", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendAcceptedFrontier", ctx, nodeID, requestID, containerID) } // SendAcceptedFrontier indicates an expected call of SendAcceptedFrontier. -func (mr *MockSenderMockRecorder) SendAcceptedFrontier(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAcceptedFrontier(ctx, nodeID, requestID, containerID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAcceptedFrontier", reflect.TypeOf((*MockSender)(nil).SendAcceptedFrontier), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAcceptedFrontier", reflect.TypeOf((*MockSender)(nil).SendAcceptedFrontier), ctx, nodeID, requestID, containerID) } // SendAcceptedStateSummary mocks base method. -func (m *MockSender) SendAcceptedStateSummary(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []ids.ID) { +func (m *MockSender) SendAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAcceptedStateSummary", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendAcceptedStateSummary", ctx, nodeID, requestID, summaryIDs) } // SendAcceptedStateSummary indicates an expected call of SendAcceptedStateSummary. -func (mr *MockSenderMockRecorder) SendAcceptedStateSummary(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAcceptedStateSummary(ctx, nodeID, requestID, summaryIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAcceptedStateSummary", reflect.TypeOf((*MockSender)(nil).SendAcceptedStateSummary), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAcceptedStateSummary", reflect.TypeOf((*MockSender)(nil).SendAcceptedStateSummary), ctx, nodeID, requestID, summaryIDs) } // SendAncestors mocks base method. -func (m *MockSender) SendAncestors(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 [][]byte) { +func (m *MockSender) SendAncestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, containers [][]byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAncestors", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendAncestors", ctx, nodeID, requestID, containers) } // SendAncestors indicates an expected call of SendAncestors. -func (mr *MockSenderMockRecorder) SendAncestors(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAncestors(ctx, nodeID, requestID, containers any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAncestors", reflect.TypeOf((*MockSender)(nil).SendAncestors), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAncestors", reflect.TypeOf((*MockSender)(nil).SendAncestors), ctx, nodeID, requestID, containers) } // SendAppGossip mocks base method. -func (m *MockSender) SendAppGossip(arg0 context.Context, arg1 []byte) error { +func (m *MockSender) SendAppGossip(ctx context.Context, appGossipBytes []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppGossip", arg0, arg1) + ret := m.ctrl.Call(m, "SendAppGossip", ctx, appGossipBytes) ret0, _ := ret[0].(error) return ret0 } // SendAppGossip indicates an expected call of SendAppGossip. -func (mr *MockSenderMockRecorder) SendAppGossip(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAppGossip(ctx, appGossipBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossip", reflect.TypeOf((*MockSender)(nil).SendAppGossip), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossip", reflect.TypeOf((*MockSender)(nil).SendAppGossip), ctx, appGossipBytes) } // SendAppGossipSpecific mocks base method. -func (m *MockSender) SendAppGossipSpecific(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 []byte) error { +func (m *MockSender) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids.NodeID], appGossipBytes []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppGossipSpecific", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "SendAppGossipSpecific", ctx, nodeIDs, appGossipBytes) ret0, _ := ret[0].(error) return ret0 } // SendAppGossipSpecific indicates an expected call of SendAppGossipSpecific. -func (mr *MockSenderMockRecorder) SendAppGossipSpecific(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAppGossipSpecific(ctx, nodeIDs, appGossipBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossipSpecific", reflect.TypeOf((*MockSender)(nil).SendAppGossipSpecific), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossipSpecific", reflect.TypeOf((*MockSender)(nil).SendAppGossipSpecific), ctx, nodeIDs, appGossipBytes) } // SendAppRequest mocks base method. -func (m *MockSender) SendAppRequest(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32, arg3 []byte) error { +func (m *MockSender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, appRequestBytes []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppRequest", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "SendAppRequest", ctx, nodeIDs, requestID, appRequestBytes) ret0, _ := ret[0].(error) return ret0 } // SendAppRequest indicates an expected call of SendAppRequest. -func (mr *MockSenderMockRecorder) SendAppRequest(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAppRequest(ctx, nodeIDs, requestID, appRequestBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppRequest", reflect.TypeOf((*MockSender)(nil).SendAppRequest), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppRequest", reflect.TypeOf((*MockSender)(nil).SendAppRequest), ctx, nodeIDs, requestID, appRequestBytes) } // SendAppResponse mocks base method. -func (m *MockSender) SendAppResponse(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []byte) error { +func (m *MockSender) SendAppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppResponse", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "SendAppResponse", ctx, nodeID, requestID, appResponseBytes) ret0, _ := ret[0].(error) return ret0 } // SendAppResponse indicates an expected call of SendAppResponse. -func (mr *MockSenderMockRecorder) SendAppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAppResponse(ctx, nodeID, requestID, appResponseBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppResponse", reflect.TypeOf((*MockSender)(nil).SendAppResponse), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppResponse", reflect.TypeOf((*MockSender)(nil).SendAppResponse), ctx, nodeID, requestID, appResponseBytes) } // SendChits mocks base method. -func (m *MockSender) SendChits(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3, arg4 ids.ID) { +func (m *MockSender) SendChits(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredID, preferredIDAtHeight, acceptedID ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendChits", arg0, arg1, arg2, arg3, arg4) + m.ctrl.Call(m, "SendChits", ctx, nodeID, requestID, preferredID, preferredIDAtHeight, acceptedID) } // SendChits indicates an expected call of SendChits. -func (mr *MockSenderMockRecorder) SendChits(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendChits(ctx, nodeID, requestID, preferredID, preferredIDAtHeight, acceptedID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendChits", reflect.TypeOf((*MockSender)(nil).SendChits), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendChits", reflect.TypeOf((*MockSender)(nil).SendChits), ctx, nodeID, requestID, preferredID, preferredIDAtHeight, acceptedID) } // SendCrossChainAppRequest mocks base method. -func (m *MockSender) SendCrossChainAppRequest(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 []byte) error { +func (m *MockSender) SendCrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, appRequestBytes []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendCrossChainAppRequest", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "SendCrossChainAppRequest", ctx, chainID, requestID, appRequestBytes) ret0, _ := ret[0].(error) return ret0 } // SendCrossChainAppRequest indicates an expected call of SendCrossChainAppRequest. -func (mr *MockSenderMockRecorder) SendCrossChainAppRequest(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendCrossChainAppRequest(ctx, chainID, requestID, appRequestBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCrossChainAppRequest", reflect.TypeOf((*MockSender)(nil).SendCrossChainAppRequest), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCrossChainAppRequest", reflect.TypeOf((*MockSender)(nil).SendCrossChainAppRequest), ctx, chainID, requestID, appRequestBytes) } // SendCrossChainAppResponse mocks base method. -func (m *MockSender) SendCrossChainAppResponse(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 []byte) error { +func (m *MockSender) SendCrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, appResponseBytes []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendCrossChainAppResponse", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "SendCrossChainAppResponse", ctx, chainID, requestID, appResponseBytes) ret0, _ := ret[0].(error) return ret0 } // SendCrossChainAppResponse indicates an expected call of SendCrossChainAppResponse. -func (mr *MockSenderMockRecorder) SendCrossChainAppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendCrossChainAppResponse(ctx, chainID, requestID, appResponseBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCrossChainAppResponse", reflect.TypeOf((*MockSender)(nil).SendCrossChainAppResponse), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCrossChainAppResponse", reflect.TypeOf((*MockSender)(nil).SendCrossChainAppResponse), ctx, chainID, requestID, appResponseBytes) } // SendGet mocks base method. -func (m *MockSender) SendGet(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 ids.ID) { +func (m *MockSender) SendGet(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGet", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendGet", ctx, nodeID, requestID, containerID) } // SendGet indicates an expected call of SendGet. -func (mr *MockSenderMockRecorder) SendGet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGet(ctx, nodeID, requestID, containerID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGet", reflect.TypeOf((*MockSender)(nil).SendGet), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGet", reflect.TypeOf((*MockSender)(nil).SendGet), ctx, nodeID, requestID, containerID) } // SendGetAccepted mocks base method. -func (m *MockSender) SendGetAccepted(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32, arg3 []ids.ID) { +func (m *MockSender) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerIDs []ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAccepted", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendGetAccepted", ctx, nodeIDs, requestID, containerIDs) } // SendGetAccepted indicates an expected call of SendGetAccepted. -func (mr *MockSenderMockRecorder) SendGetAccepted(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGetAccepted(ctx, nodeIDs, requestID, containerIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAccepted", reflect.TypeOf((*MockSender)(nil).SendGetAccepted), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAccepted", reflect.TypeOf((*MockSender)(nil).SendGetAccepted), ctx, nodeIDs, requestID, containerIDs) } // SendGetAcceptedFrontier mocks base method. -func (m *MockSender) SendGetAcceptedFrontier(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32) { +func (m *MockSender) SendGetAcceptedFrontier(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAcceptedFrontier", arg0, arg1, arg2) + m.ctrl.Call(m, "SendGetAcceptedFrontier", ctx, nodeIDs, requestID) } // SendGetAcceptedFrontier indicates an expected call of SendGetAcceptedFrontier. -func (mr *MockSenderMockRecorder) SendGetAcceptedFrontier(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGetAcceptedFrontier(ctx, nodeIDs, requestID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAcceptedFrontier", reflect.TypeOf((*MockSender)(nil).SendGetAcceptedFrontier), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAcceptedFrontier", reflect.TypeOf((*MockSender)(nil).SendGetAcceptedFrontier), ctx, nodeIDs, requestID) } // SendGetAcceptedStateSummary mocks base method. -func (m *MockSender) SendGetAcceptedStateSummary(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32, arg3 []uint64) { +func (m *MockSender) SendGetAcceptedStateSummary(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, heights []uint64) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAcceptedStateSummary", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendGetAcceptedStateSummary", ctx, nodeIDs, requestID, heights) } // SendGetAcceptedStateSummary indicates an expected call of SendGetAcceptedStateSummary. -func (mr *MockSenderMockRecorder) SendGetAcceptedStateSummary(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGetAcceptedStateSummary(ctx, nodeIDs, requestID, heights any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAcceptedStateSummary", reflect.TypeOf((*MockSender)(nil).SendGetAcceptedStateSummary), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAcceptedStateSummary", reflect.TypeOf((*MockSender)(nil).SendGetAcceptedStateSummary), ctx, nodeIDs, requestID, heights) } // SendGetAncestors mocks base method. -func (m *MockSender) SendGetAncestors(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 ids.ID) { +func (m *MockSender) SendGetAncestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAncestors", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendGetAncestors", ctx, nodeID, requestID, containerID) } // SendGetAncestors indicates an expected call of SendGetAncestors. -func (mr *MockSenderMockRecorder) SendGetAncestors(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGetAncestors(ctx, nodeID, requestID, containerID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAncestors", reflect.TypeOf((*MockSender)(nil).SendGetAncestors), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAncestors", reflect.TypeOf((*MockSender)(nil).SendGetAncestors), ctx, nodeID, requestID, containerID) } // SendGetStateSummaryFrontier mocks base method. -func (m *MockSender) SendGetStateSummaryFrontier(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32) { +func (m *MockSender) SendGetStateSummaryFrontier(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetStateSummaryFrontier", arg0, arg1, arg2) + m.ctrl.Call(m, "SendGetStateSummaryFrontier", ctx, nodeIDs, requestID) } // SendGetStateSummaryFrontier indicates an expected call of SendGetStateSummaryFrontier. -func (mr *MockSenderMockRecorder) SendGetStateSummaryFrontier(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGetStateSummaryFrontier(ctx, nodeIDs, requestID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetStateSummaryFrontier", reflect.TypeOf((*MockSender)(nil).SendGetStateSummaryFrontier), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetStateSummaryFrontier", reflect.TypeOf((*MockSender)(nil).SendGetStateSummaryFrontier), ctx, nodeIDs, requestID) } // SendGossip mocks base method. -func (m *MockSender) SendGossip(arg0 context.Context, arg1 []byte) { +func (m *MockSender) SendGossip(ctx context.Context, container []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGossip", arg0, arg1) + m.ctrl.Call(m, "SendGossip", ctx, container) } // SendGossip indicates an expected call of SendGossip. -func (mr *MockSenderMockRecorder) SendGossip(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGossip(ctx, container any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGossip", reflect.TypeOf((*MockSender)(nil).SendGossip), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGossip", reflect.TypeOf((*MockSender)(nil).SendGossip), ctx, container) } // SendPullQuery mocks base method. -func (m *MockSender) SendPullQuery(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32, arg3 ids.ID) { +func (m *MockSender) SendPullQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerID ids.ID, requestedHeight uint64) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendPullQuery", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendPullQuery", ctx, nodeIDs, requestID, containerID, requestedHeight) } // SendPullQuery indicates an expected call of SendPullQuery. -func (mr *MockSenderMockRecorder) SendPullQuery(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendPullQuery(ctx, nodeIDs, requestID, containerID, requestedHeight any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPullQuery", reflect.TypeOf((*MockSender)(nil).SendPullQuery), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPullQuery", reflect.TypeOf((*MockSender)(nil).SendPullQuery), ctx, nodeIDs, requestID, containerID, requestedHeight) } // SendPushQuery mocks base method. -func (m *MockSender) SendPushQuery(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32, arg3 []byte) { +func (m *MockSender) SendPushQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, container []byte, requestedHeight uint64) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendPushQuery", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendPushQuery", ctx, nodeIDs, requestID, container, requestedHeight) } // SendPushQuery indicates an expected call of SendPushQuery. -func (mr *MockSenderMockRecorder) SendPushQuery(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendPushQuery(ctx, nodeIDs, requestID, container, requestedHeight any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPushQuery", reflect.TypeOf((*MockSender)(nil).SendPushQuery), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPushQuery", reflect.TypeOf((*MockSender)(nil).SendPushQuery), ctx, nodeIDs, requestID, container, requestedHeight) } // SendPut mocks base method. -func (m *MockSender) SendPut(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []byte) { +func (m *MockSender) SendPut(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendPut", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendPut", ctx, nodeID, requestID, container) } // SendPut indicates an expected call of SendPut. -func (mr *MockSenderMockRecorder) SendPut(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendPut(ctx, nodeID, requestID, container any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPut", reflect.TypeOf((*MockSender)(nil).SendPut), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPut", reflect.TypeOf((*MockSender)(nil).SendPut), ctx, nodeID, requestID, container) } // SendStateSummaryFrontier mocks base method. -func (m *MockSender) SendStateSummaryFrontier(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []byte) { +func (m *MockSender) SendStateSummaryFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, summary []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendStateSummaryFrontier", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendStateSummaryFrontier", ctx, nodeID, requestID, summary) } // SendStateSummaryFrontier indicates an expected call of SendStateSummaryFrontier. -func (mr *MockSenderMockRecorder) SendStateSummaryFrontier(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendStateSummaryFrontier(ctx, nodeID, requestID, summary any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendStateSummaryFrontier", reflect.TypeOf((*MockSender)(nil).SendStateSummaryFrontier), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendStateSummaryFrontier", reflect.TypeOf((*MockSender)(nil).SendStateSummaryFrontier), ctx, nodeID, requestID, summary) } diff --git a/snow/engine/common/no_ops_handlers.go b/snow/engine/common/no_ops_handlers.go index 9530600177bb..870c6694a7a7 100644 --- a/snow/engine/common/no_ops_handlers.go +++ b/snow/engine/common/no_ops_handlers.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) @@ -65,7 +66,7 @@ func NewNoOpAcceptedStateSummaryHandler(log logging.Logger) AcceptedStateSummary return &noOpAcceptedStateSummaryHandler{log: log} } -func (nop *noOpAcceptedStateSummaryHandler) AcceptedStateSummary(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []ids.ID) error { +func (nop *noOpAcceptedStateSummaryHandler) AcceptedStateSummary(_ context.Context, nodeID ids.NodeID, requestID uint32, _ set.Set[ids.ID]) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), zap.Stringer("messageOp", message.AcceptedStateSummaryOp), @@ -122,7 +123,7 @@ func NewNoOpAcceptedHandler(log logging.Logger) AcceptedHandler { return &noOpAcceptedHandler{log: log} } -func (nop *noOpAcceptedHandler) Accepted(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []ids.ID) error { +func (nop *noOpAcceptedHandler) Accepted(_ context.Context, nodeID ids.NodeID, requestID uint32, _ set.Set[ids.ID]) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), zap.Stringer("messageOp", message.AcceptedOp), @@ -287,12 +288,13 @@ func (nop *noOpAppHandler) CrossChainAppRequest(_ context.Context, chainID ids.I return nil } -func (nop *noOpAppHandler) CrossChainAppRequestFailed(_ context.Context, chainID ids.ID, requestID uint32) error { +func (nop *noOpAppHandler) CrossChainAppRequestFailed(_ context.Context, chainID ids.ID, requestID uint32, appErr *AppError) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.CrossChainAppRequestFailedOp), + zap.Stringer("messageOp", message.CrossChainAppErrorOp), zap.Stringer("chainID", chainID), zap.Uint32("requestID", requestID), + zap.Error(appErr), ) return nil } @@ -317,12 +319,13 @@ func (nop *noOpAppHandler) AppRequest(_ context.Context, nodeID ids.NodeID, requ return nil } -func (nop *noOpAppHandler) AppRequestFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { +func (nop *noOpAppHandler) AppRequestFailed(_ context.Context, nodeID ids.NodeID, requestID uint32, appErr *AppError) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.AppRequestFailedOp), + zap.Stringer("messageOp", message.AppErrorOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), + zap.Error(appErr), ) return nil } diff --git a/snow/engine/common/queue/job.go b/snow/engine/common/queue/job.go index 4ac5a60fb835..3b36893f1d96 100644 --- a/snow/engine/common/queue/job.go +++ b/snow/engine/common/queue/job.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue diff --git a/snow/engine/common/queue/jobs.go b/snow/engine/common/queue/jobs.go index 5592ad822439..0577602a3729 100644 --- a/snow/engine/common/queue/jobs.go +++ b/snow/engine/common/queue/jobs.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue diff --git a/snow/engine/common/queue/jobs_test.go b/snow/engine/common/queue/jobs_test.go index 57b63e19750d..a098cb6e6d6d 100644 --- a/snow/engine/common/queue/jobs_test.go +++ b/snow/engine/common/queue/jobs_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue @@ -16,8 +16,8 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/set" ) @@ -116,7 +116,8 @@ func TestPushAndExecute(t *testing.T) { return job, nil } - count, err := jobs.ExecuteAll(context.Background(), snow.DefaultConsensusContextTest(), &common.Halter{}, false) + snowCtx := snowtest.Context(t, snowtest.CChainID) + count, err := jobs.ExecuteAll(context.Background(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) require.NoError(err) require.Equal(1, count) @@ -182,7 +183,8 @@ func TestRemoveDependency(t *testing.T) { } } - count, err := jobs.ExecuteAll(context.Background(), snow.DefaultConsensusContextTest(), &common.Halter{}, false) + snowCtx := snowtest.Context(t, snowtest.CChainID) + count, err := jobs.ExecuteAll(context.Background(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) require.NoError(err) require.Equal(2, count) require.True(executed0) @@ -355,7 +357,8 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { } } - _, err = jobs.ExecuteAll(context.Background(), snow.DefaultConsensusContextTest(), &common.Halter{}, false) + snowCtx := snowtest.Context(t, snowtest.CChainID) + _, err = jobs.ExecuteAll(context.Background(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) // Assert that the database closed error on job1 causes ExecuteAll // to fail in the middle of execution. require.ErrorIs(err, database.ErrClosed) @@ -387,7 +390,7 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { require.NoError(err) require.True(hasNext) - count, err := jobs.ExecuteAll(context.Background(), snow.DefaultConsensusContextTest(), &common.Halter{}, false) + count, err := jobs.ExecuteAll(context.Background(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) require.NoError(err) require.Equal(2, count) require.True(executed1) diff --git a/snow/engine/common/queue/parser.go b/snow/engine/common/queue/parser.go index ee8f39807f35..07e9df50887d 100644 --- a/snow/engine/common/queue/parser.go +++ b/snow/engine/common/queue/parser.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue diff --git a/snow/engine/common/queue/state.go b/snow/engine/common/queue/state.go index cae43f8c2101..76bce7c838c1 100644 --- a/snow/engine/common/queue/state.go +++ b/snow/engine/common/queue/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue @@ -16,6 +16,7 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/set" ) @@ -61,7 +62,7 @@ func newState( metricsNamespace string, metricsRegisterer prometheus.Registerer, ) (*state, error) { - jobsCacheMetricsNamespace := fmt.Sprintf("%s_jobs_cache", metricsNamespace) + jobsCacheMetricsNamespace := metric.AppendNamespace(metricsNamespace, "jobs_cache") jobsCache, err := metercacher.New[ids.ID, Job]( jobsCacheMetricsNamespace, metricsRegisterer, diff --git a/snow/engine/common/queue/test_job.go b/snow/engine/common/queue/test_job.go index 98ea33614b50..fd9af544fb62 100644 --- a/snow/engine/common/queue/test_job.go +++ b/snow/engine/common/queue/test_job.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue diff --git a/snow/engine/common/queue/test_parser.go b/snow/engine/common/queue/test_parser.go index 85a079cc1435..1cc1cfd2973f 100644 --- a/snow/engine/common/queue/test_parser.go +++ b/snow/engine/common/queue/test_parser.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue diff --git a/snow/engine/common/request.go b/snow/engine/common/request.go new file mode 100644 index 000000000000..f92e347cc73b --- /dev/null +++ b/snow/engine/common/request.go @@ -0,0 +1,19 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/ids" +) + +type Request struct { + NodeID ids.NodeID + RequestID uint32 +} + +func (r Request) MarshalText() ([]byte, error) { + return []byte(fmt.Sprintf("%s:%d", r.NodeID, r.RequestID)), nil +} diff --git a/snow/engine/common/request_test.go b/snow/engine/common/request_test.go new file mode 100644 index 000000000000..0da4c8c438f7 --- /dev/null +++ b/snow/engine/common/request_test.go @@ -0,0 +1,24 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" +) + +func TestRequestJSONMarshal(t *testing.T) { + requestMap := map[Request]ids.ID{ + { + NodeID: ids.GenerateTestNodeID(), + RequestID: 12345, + }: ids.GenerateTestID(), + } + _, err := json.Marshal(requestMap) + require.NoError(t, err) +} diff --git a/snow/engine/common/requests.go b/snow/engine/common/requests.go deleted file mode 100644 index ce66585e590d..000000000000 --- a/snow/engine/common/requests.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "fmt" - "strings" - - "github.com/ava-labs/avalanchego/ids" -) - -const ( - minRequestsSize = 32 -) - -type req struct { - vdr ids.NodeID - id uint32 -} - -// Requests tracks pending container messages from a peer. -type Requests struct { - reqsToID map[ids.NodeID]map[uint32]ids.ID - idToReq map[ids.ID]req -} - -// Add a request. Assumes that requestIDs are unique. Assumes that containerIDs -// are only in one request at a time. -func (r *Requests) Add(vdr ids.NodeID, requestID uint32, containerID ids.ID) { - if r.reqsToID == nil { - r.reqsToID = make(map[ids.NodeID]map[uint32]ids.ID, minRequestsSize) - } - vdrReqs, ok := r.reqsToID[vdr] - if !ok { - vdrReqs = make(map[uint32]ids.ID) - r.reqsToID[vdr] = vdrReqs - } - vdrReqs[requestID] = containerID - - if r.idToReq == nil { - r.idToReq = make(map[ids.ID]req, minRequestsSize) - } - r.idToReq[containerID] = req{ - vdr: vdr, - id: requestID, - } -} - -// Get the containerID the request is expecting and if the request exists. -func (r *Requests) Get(vdr ids.NodeID, requestID uint32) (ids.ID, bool) { - containerID, ok := r.reqsToID[vdr][requestID] - return containerID, ok -} - -// Remove attempts to abandon a requestID sent to a validator. If the request is -// currently outstanding, the requested ID will be returned along with true. If -// the request isn't currently outstanding, false will be returned. -func (r *Requests) Remove(vdr ids.NodeID, requestID uint32) (ids.ID, bool) { - vdrReqs := r.reqsToID[vdr] - containerID, ok := vdrReqs[requestID] - if !ok { - return ids.ID{}, false - } - - if len(vdrReqs) == 1 { - delete(r.reqsToID, vdr) - } else { - delete(vdrReqs, requestID) - } - - delete(r.idToReq, containerID) - return containerID, true -} - -// RemoveAny outstanding requests for the container ID. True is returned if the -// container ID had an outstanding request. -func (r *Requests) RemoveAny(containerID ids.ID) bool { - req, ok := r.idToReq[containerID] - if !ok { - return false - } - - r.Remove(req.vdr, req.id) - return true -} - -// Len returns the total number of outstanding requests. -func (r *Requests) Len() int { - return len(r.idToReq) -} - -// Contains returns true if there is an outstanding request for the container -// ID. -func (r *Requests) Contains(containerID ids.ID) bool { - _, ok := r.idToReq[containerID] - return ok -} - -func (r Requests) String() string { - sb := strings.Builder{} - sb.WriteString(fmt.Sprintf("Requests: (Num Validators = %d)", len(r.reqsToID))) - for vdr, reqs := range r.reqsToID { - sb.WriteString(fmt.Sprintf("\n VDR[%s]: (Outstanding Requests %d)", vdr, len(reqs))) - for reqID, containerID := range reqs { - sb.WriteString(fmt.Sprintf("\n Request[%d]: %s", reqID, containerID)) - } - } - return sb.String() -} diff --git a/snow/engine/common/requests_test.go b/snow/engine/common/requests_test.go deleted file mode 100644 index 00e648dfa90a..000000000000 --- a/snow/engine/common/requests_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" -) - -func TestRequests(t *testing.T) { - require := require.New(t) - - req := Requests{} - - require.Empty(req) - - _, removed := req.Remove(ids.EmptyNodeID, 0) - require.False(removed) - - require.False(req.RemoveAny(ids.Empty)) - require.False(req.Contains(ids.Empty)) - - req.Add(ids.EmptyNodeID, 0, ids.Empty) - require.Equal(1, req.Len()) - - _, removed = req.Remove(ids.EmptyNodeID, 1) - require.False(removed) - - _, removed = req.Remove(ids.NodeID{1}, 0) - require.False(removed) - - require.True(req.Contains(ids.Empty)) - require.Equal(1, req.Len()) - - req.Add(ids.EmptyNodeID, 10, ids.Empty.Prefix(0)) - require.Equal(2, req.Len()) - - _, removed = req.Remove(ids.EmptyNodeID, 1) - require.False(removed) - - _, removed = req.Remove(ids.NodeID{1}, 0) - require.False(removed) - - require.True(req.Contains(ids.Empty)) - require.Equal(2, req.Len()) - - removedID, removed := req.Remove(ids.EmptyNodeID, 0) - require.True(removed) - require.Equal(ids.Empty, removedID) - - removedID, removed = req.Remove(ids.EmptyNodeID, 10) - require.True(removed) - require.Equal(ids.Empty.Prefix(0), removedID) - - require.Zero(req.Len()) - - req.Add(ids.EmptyNodeID, 0, ids.Empty) - require.Equal(1, req.Len()) - - require.True(req.RemoveAny(ids.Empty)) - require.Zero(req.Len()) - - require.False(req.RemoveAny(ids.Empty)) - require.Zero(req.Len()) -} diff --git a/snow/engine/common/sender.go b/snow/engine/common/sender.go index d596a35bcc2e..b40084fc714f 100644 --- a/snow/engine/common/sender.go +++ b/snow/engine/common/sender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -175,15 +175,12 @@ type NetworkAppSender interface { // * An AppResponse from nodeID with ID [requestID] // * An AppRequestFailed from nodeID with ID [requestID] // Exactly one of the above messages will eventually be received per nodeID. - // A non-nil error should be considered fatal. SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, appRequestBytes []byte) error // Send an application-level response to a request. // This response must be in response to an AppRequest that the VM corresponding // to this AppSender received from [nodeID] with ID [requestID]. - // A non-nil error should be considered fatal. SendAppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error // Gossip an application-level message. - // A non-nil error should be considered fatal. SendAppGossip(ctx context.Context, appGossipBytes []byte) error SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids.NodeID], appGossipBytes []byte) error } @@ -199,7 +196,6 @@ type CrossChainAppSender interface { // * A CrossChainAppRequestFailed from [chainID] with ID [requestID] // Exactly one of the above messages will eventually be received from // [chainID]. - // A non-nil error should be considered fatal. SendCrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, appRequestBytes []byte) error // SendCrossChainAppResponse sends an application-level response to a // specific chain @@ -207,7 +203,6 @@ type CrossChainAppSender interface { // This response must be in response to a CrossChainAppRequest that the VM // corresponding to this CrossChainAppSender received from [chainID] with ID // [requestID]. - // A non-nil error should be considered fatal. SendCrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, appResponseBytes []byte) error } diff --git a/snow/engine/common/state_syncer.go b/snow/engine/common/state_syncer.go index e23ad126407c..a6d159bb6949 100644 --- a/snow/engine/common/state_syncer.go +++ b/snow/engine/common/state_syncer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/snow/engine/common/test_bootstrap_tracker.go b/snow/engine/common/test_bootstrap_tracker.go index ba377b39dce3..2e940f1a43b1 100644 --- a/snow/engine/common/test_bootstrap_tracker.go +++ b/snow/engine/common/test_bootstrap_tracker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/snow/engine/common/test_bootstrapable.go b/snow/engine/common/test_bootstrapable.go deleted file mode 100644 index 625070616377..000000000000 --- a/snow/engine/common/test_bootstrapable.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "context" - "errors" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" -) - -var ( - _ Bootstrapable = (*BootstrapableTest)(nil) - - errForceAccepted = errors.New("unexpectedly called ForceAccepted") - errClear = errors.New("unexpectedly called Clear") -) - -// BootstrapableTest is a test engine that supports bootstrapping -type BootstrapableTest struct { - T *testing.T - - CantForceAccepted, CantClear bool - - ClearF func(ctx context.Context) error - ForceAcceptedF func(ctx context.Context, acceptedContainerIDs []ids.ID) error -} - -// Default sets the default on call handling -func (b *BootstrapableTest) Default(cant bool) { - b.CantForceAccepted = cant -} - -func (b *BootstrapableTest) Clear(ctx context.Context) error { - if b.ClearF != nil { - return b.ClearF(ctx) - } - if b.CantClear && b.T != nil { - require.FailNow(b.T, errClear.Error()) - } - return errClear -} - -func (b *BootstrapableTest) ForceAccepted(ctx context.Context, containerIDs []ids.ID) error { - if b.ForceAcceptedF != nil { - return b.ForceAcceptedF(ctx, containerIDs) - } - if b.CantForceAccepted && b.T != nil { - require.FailNow(b.T, errForceAccepted.Error()) - } - return errForceAccepted -} diff --git a/snow/engine/common/test_bootstrapper.go b/snow/engine/common/test_bootstrapper.go index 1f8fd59bf3d3..259fcb07fb3e 100644 --- a/snow/engine/common/test_bootstrapper.go +++ b/snow/engine/common/test_bootstrapper.go @@ -1,20 +1,41 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common +import ( + "context" + "errors" + + "github.com/stretchr/testify/require" +) + var ( - _ Engine = (*BootstrapperTest)(nil) - _ Bootstrapable = (*BootstrapperTest)(nil) + _ BootstrapableEngine = (*BootstrapperTest)(nil) + + errClear = errors.New("unexpectedly called Clear") ) -// EngineTest is a test engine type BootstrapperTest struct { - BootstrapableTest EngineTest + + CantClear bool + + ClearF func(ctx context.Context) error } func (b *BootstrapperTest) Default(cant bool) { - b.BootstrapableTest.Default(cant) b.EngineTest.Default(cant) + + b.CantClear = cant +} + +func (b *BootstrapperTest) Clear(ctx context.Context) error { + if b.ClearF != nil { + return b.ClearF(ctx) + } + if b.CantClear && b.T != nil { + require.FailNow(b.T, errClear.Error()) + } + return errClear } diff --git a/snow/engine/common/test_config.go b/snow/engine/common/test_config.go deleted file mode 100644 index d39e6078fd01..000000000000 --- a/snow/engine/common/test_config.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/engine/common/tracker" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/constants" -) - -// DefaultConfigTest returns a test configuration -func DefaultConfigTest() Config { - isBootstrapped := false - bootstrapTracker := &BootstrapTrackerTest{ - IsBootstrappedF: func() bool { - return isBootstrapped - }, - BootstrappedF: func(ids.ID) { - isBootstrapped = true - }, - } - - beacons := validators.NewManager() - - connectedPeers := tracker.NewPeers() - startupTracker := tracker.NewStartup(connectedPeers, 0) - beacons.RegisterCallbackListener(constants.PrimaryNetworkID, startupTracker) - - return Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: beacons, - StartupTracker: startupTracker, - Sender: &SenderTest{}, - Bootstrapable: &BootstrapableTest{}, - BootstrapTracker: bootstrapTracker, - Timer: &TimerTest{}, - AncestorsMaxContainersSent: 2000, - AncestorsMaxContainersReceived: 2000, - SharedCfg: &SharedConfig{}, - } -} diff --git a/snow/engine/common/test_engine.go b/snow/engine/common/test_engine.go index 579f2ca94c9d..e07352d43713 100644 --- a/snow/engine/common/test_engine.go +++ b/snow/engine/common/test_engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) @@ -119,15 +120,15 @@ type EngineTest struct { PushQueryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte, requestedHeight uint64) error AncestorsF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containers [][]byte) error AcceptedFrontierF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error - GetAcceptedF, AcceptedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredIDs []ids.ID) error + GetAcceptedF, AcceptedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredIDs set.Set[ids.ID]) error ChitsF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredID ids.ID, preferredIDAtHeight ids.ID, acceptedID ids.ID) error GetStateSummaryFrontierF, GetStateSummaryFrontierFailedF, GetAcceptedStateSummaryFailedF, GetAcceptedFrontierF, GetFailedF, GetAncestorsFailedF, QueryFailedF, GetAcceptedFrontierFailedF, GetAcceptedFailedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error - AppRequestFailedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error + AppRequestFailedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *AppError) error StateSummaryFrontierF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, summary []byte) error - GetAcceptedStateSummaryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, keys []uint64) error - AcceptedStateSummaryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error + GetAcceptedStateSummaryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, keys set.Set[uint64]) error + AcceptedStateSummaryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs set.Set[ids.ID]) error ConnectedF func(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error DisconnectedF func(ctx context.Context, nodeID ids.NodeID) error HealthF func(context.Context) (interface{}, error) @@ -137,7 +138,7 @@ type EngineTest struct { AppGossipF func(ctx context.Context, nodeID ids.NodeID, msg []byte) error CrossChainAppRequestF func(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, msg []byte) error CrossChainAppResponseF func(ctx context.Context, chainID ids.ID, requestID uint32, msg []byte) error - CrossChainAppRequestFailedF func(ctx context.Context, chainID ids.ID, requestID uint32) error + CrossChainAppRequestFailedF func(ctx context.Context, chainID ids.ID, requestID uint32, appErr *AppError) error } func (e *EngineTest) Default(cant bool) { @@ -314,7 +315,7 @@ func (e *EngineTest) GetStateSummaryFrontierFailed(ctx context.Context, validato return errGetStateSummaryFrontierFailed } -func (e *EngineTest) GetAcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, keys []uint64) error { +func (e *EngineTest) GetAcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, keys set.Set[uint64]) error { if e.GetAcceptedStateSummaryF != nil { return e.GetAcceptedStateSummaryF(ctx, validatorID, requestID, keys) } @@ -327,7 +328,7 @@ func (e *EngineTest) GetAcceptedStateSummary(ctx context.Context, validatorID id return errGetAcceptedStateSummary } -func (e *EngineTest) AcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { +func (e *EngineTest) AcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, summaryIDs set.Set[ids.ID]) error { if e.AcceptedStateSummaryF != nil { return e.AcceptedStateSummaryF(ctx, validatorID, requestID, summaryIDs) } @@ -392,7 +393,7 @@ func (e *EngineTest) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, re return errAcceptedFrontier } -func (e *EngineTest) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *EngineTest) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { if e.GetAcceptedF != nil { return e.GetAcceptedF(ctx, nodeID, requestID, containerIDs) } @@ -418,7 +419,7 @@ func (e *EngineTest) GetAcceptedFailed(ctx context.Context, nodeID ids.NodeID, r return errGetAcceptedFailed } -func (e *EngineTest) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *EngineTest) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { if e.AcceptedF != nil { return e.AcceptedF(ctx, nodeID, requestID, containerIDs) } @@ -561,9 +562,9 @@ func (e *EngineTest) CrossChainAppRequest(ctx context.Context, chainID ids.ID, r return errCrossChainAppRequest } -func (e *EngineTest) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { +func (e *EngineTest) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32, appErr *AppError) error { if e.CrossChainAppRequestFailedF != nil { - return e.CrossChainAppRequestFailedF(ctx, chainID, requestID) + return e.CrossChainAppRequestFailedF(ctx, chainID, requestID, appErr) } if !e.CantCrossChainAppRequestFailed { return nil @@ -613,9 +614,9 @@ func (e *EngineTest) AppResponse(ctx context.Context, nodeID ids.NodeID, request return errAppResponse } -func (e *EngineTest) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { +func (e *EngineTest) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *AppError) error { if e.AppRequestFailedF != nil { - return e.AppRequestFailedF(ctx, nodeID, requestID) + return e.AppRequestFailedF(ctx, nodeID, requestID, appErr) } if !e.CantAppRequestFailed { return nil @@ -690,16 +691,3 @@ func (e *EngineTest) HealthCheck(ctx context.Context) (interface{}, error) { } return nil, errHealthCheck } - -func (e *EngineTest) GetVM() VM { - if e.GetVMF != nil { - return e.GetVMF() - } - if !e.CantGetVM { - return nil - } - if e.T != nil { - require.FailNow(e.T, "Unexpectedly called GetVM") - } - return nil -} diff --git a/snow/engine/common/test_sender.go b/snow/engine/common/test_sender.go index 5896f48dfa25..32af682838fa 100644 --- a/snow/engine/common/test_sender.go +++ b/snow/engine/common/test_sender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -6,7 +6,6 @@ package common import ( "context" "errors" - "testing" "github.com/stretchr/testify/require" @@ -16,7 +15,8 @@ import ( ) var ( - _ Sender = (*SenderTest)(nil) + _ Sender = (*SenderTest)(nil) + _ AppSender = (*FakeSender)(nil) errAccept = errors.New("unexpectedly called Accept") errSendAppRequest = errors.New("unexpectedly called SendAppRequest") @@ -27,7 +27,7 @@ var ( // SenderTest is a test sender type SenderTest struct { - T *testing.T + T require.TestingT CantAccept, CantSendGetStateSummaryFrontier, CantSendStateSummaryFrontier, @@ -359,3 +359,64 @@ func (s *SenderTest) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ } return errSendAppGossipSpecific } + +// FakeSender is used for testing +type FakeSender struct { + SentAppRequest, SentAppResponse, + SentAppGossip, SentAppGossipSpecific, + SentCrossChainAppRequest, SentCrossChainAppResponse chan []byte +} + +func (f FakeSender) SendAppRequest(_ context.Context, _ set.Set[ids.NodeID], _ uint32, bytes []byte) error { + if f.SentAppRequest == nil { + return nil + } + + f.SentAppRequest <- bytes + return nil +} + +func (f FakeSender) SendAppResponse(_ context.Context, _ ids.NodeID, _ uint32, bytes []byte) error { + if f.SentAppResponse == nil { + return nil + } + + f.SentAppResponse <- bytes + return nil +} + +func (f FakeSender) SendAppGossip(_ context.Context, bytes []byte) error { + if f.SentAppGossip == nil { + return nil + } + + f.SentAppGossip <- bytes + return nil +} + +func (f FakeSender) SendAppGossipSpecific(_ context.Context, _ set.Set[ids.NodeID], bytes []byte) error { + if f.SentAppGossipSpecific == nil { + return nil + } + + f.SentAppGossipSpecific <- bytes + return nil +} + +func (f FakeSender) SendCrossChainAppRequest(_ context.Context, _ ids.ID, _ uint32, bytes []byte) error { + if f.SentCrossChainAppRequest == nil { + return nil + } + + f.SentCrossChainAppRequest <- bytes + return nil +} + +func (f FakeSender) SendCrossChainAppResponse(_ context.Context, _ ids.ID, _ uint32, bytes []byte) error { + if f.SentCrossChainAppResponse == nil { + return nil + } + + f.SentCrossChainAppResponse <- bytes + return nil +} diff --git a/snow/engine/common/test_timer.go b/snow/engine/common/test_timer.go index e5e2b232d390..6da0d9251712 100644 --- a/snow/engine/common/test_timer.go +++ b/snow/engine/common/test_timer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/snow/engine/common/test_vm.go b/snow/engine/common/test_vm.go index 9d1a77ef2a9f..828b49f5e1fe 100644 --- a/snow/engine/common/test_vm.go +++ b/snow/engine/common/test_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -23,7 +23,6 @@ var ( errSetState = errors.New("unexpectedly called SetState") errShutdown = errors.New("unexpectedly called Shutdown") errCreateHandlers = errors.New("unexpectedly called CreateHandlers") - errCreateStaticHandlers = errors.New("unexpectedly called CreateStaticHandlers") errHealthCheck = errors.New("unexpectedly called HealthCheck") errConnected = errors.New("unexpectedly called Connected") errDisconnected = errors.New("unexpectedly called Disconnected") @@ -44,7 +43,7 @@ type TestVM struct { T *testing.T CantInitialize, CantSetState, - CantShutdown, CantCreateHandlers, CantCreateStaticHandlers, + CantShutdown, CantCreateHandlers, CantHealthCheck, CantConnected, CantDisconnected, CantVersion, CantAppRequest, CantAppResponse, CantAppGossip, CantAppRequestFailed, CantCrossChainAppRequest, CantCrossChainAppResponse, CantCrossChainAppRequestFailed bool @@ -53,18 +52,17 @@ type TestVM struct { SetStateF func(ctx context.Context, state snow.State) error ShutdownF func(context.Context) error CreateHandlersF func(context.Context) (map[string]http.Handler, error) - CreateStaticHandlersF func(context.Context) (map[string]http.Handler, error) ConnectedF func(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error DisconnectedF func(ctx context.Context, nodeID ids.NodeID) error HealthCheckF func(context.Context) (interface{}, error) AppRequestF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, msg []byte) error AppResponseF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, msg []byte) error AppGossipF func(ctx context.Context, nodeID ids.NodeID, msg []byte) error - AppRequestFailedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error + AppRequestFailedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *AppError) error VersionF func(context.Context) (string, error) CrossChainAppRequestF func(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, msg []byte) error CrossChainAppResponseF func(ctx context.Context, chainID ids.ID, requestID uint32, msg []byte) error - CrossChainAppRequestFailedF func(ctx context.Context, chainID ids.ID, requestID uint32) error + CrossChainAppRequestFailedF func(ctx context.Context, chainID ids.ID, requestID uint32, appErr *AppError) error } func (vm *TestVM) Default(cant bool) { @@ -72,7 +70,6 @@ func (vm *TestVM) Default(cant bool) { vm.CantSetState = cant vm.CantShutdown = cant vm.CantCreateHandlers = cant - vm.CantCreateStaticHandlers = cant vm.CantHealthCheck = cant vm.CantAppRequest = cant vm.CantAppRequestFailed = cant @@ -152,16 +149,6 @@ func (vm *TestVM) CreateHandlers(ctx context.Context) (map[string]http.Handler, return nil, nil } -func (vm *TestVM) CreateStaticHandlers(ctx context.Context) (map[string]http.Handler, error) { - if vm.CreateStaticHandlersF != nil { - return vm.CreateStaticHandlersF(ctx) - } - if vm.CantCreateStaticHandlers && vm.T != nil { - require.FailNow(vm.T, errCreateStaticHandlers.Error()) - } - return nil, nil -} - func (vm *TestVM) HealthCheck(ctx context.Context) (interface{}, error) { if vm.HealthCheckF != nil { return vm.HealthCheckF(ctx) @@ -185,9 +172,9 @@ func (vm *TestVM) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID u return errAppRequest } -func (vm *TestVM) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { +func (vm *TestVM) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *AppError) error { if vm.AppRequestFailedF != nil { - return vm.AppRequestFailedF(ctx, nodeID, requestID) + return vm.AppRequestFailedF(ctx, nodeID, requestID, appErr) } if !vm.CantAppRequestFailed { return nil @@ -237,9 +224,9 @@ func (vm *TestVM) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requ return errCrossChainAppRequest } -func (vm *TestVM) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { +func (vm *TestVM) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32, appErr *AppError) error { if vm.CrossChainAppRequestFailedF != nil { - return vm.CrossChainAppRequestFailedF(ctx, chainID, requestID) + return vm.CrossChainAppRequestFailedF(ctx, chainID, requestID, appErr) } if !vm.CantCrossChainAppRequestFailed { return nil diff --git a/snow/engine/common/timer.go b/snow/engine/common/timer.go index 56312d08a4fa..432bb9170ccb 100644 --- a/snow/engine/common/timer.go +++ b/snow/engine/common/timer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/snow/engine/common/traced_bootstrapable_engine.go b/snow/engine/common/traced_bootstrapable_engine.go index ba7a0d89228d..4c64206ae081 100644 --- a/snow/engine/common/traced_bootstrapable_engine.go +++ b/snow/engine/common/traced_bootstrapable_engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -6,11 +6,6 @@ package common import ( "context" - "go.opentelemetry.io/otel/attribute" - - oteltrace "go.opentelemetry.io/otel/trace" - - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" ) @@ -29,15 +24,6 @@ func TraceBootstrapableEngine(bootstrapableEngine BootstrapableEngine, tracer tr } } -func (e *tracedBootstrapableEngine) ForceAccepted(ctx context.Context, acceptedContainerIDs []ids.ID) error { - ctx, span := e.tracer.Start(ctx, "tracedBootstrapableEngine.ForceAccepted", oteltrace.WithAttributes( - attribute.Int("numAcceptedContainerIDs", len(acceptedContainerIDs)), - )) - defer span.End() - - return e.bootstrapableEngine.ForceAccepted(ctx, acceptedContainerIDs) -} - func (e *tracedBootstrapableEngine) Clear(ctx context.Context) error { ctx, span := e.tracer.Start(ctx, "tracedBootstrapableEngine.Clear") defer span.End() diff --git a/snow/engine/common/traced_engine.go b/snow/engine/common/traced_engine.go index 387ee8289e2a..5ffad7c543d7 100644 --- a/snow/engine/common/traced_engine.go +++ b/snow/engine/common/traced_engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -14,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) @@ -62,22 +63,22 @@ func (e *tracedEngine) GetStateSummaryFrontierFailed(ctx context.Context, nodeID return e.engine.GetStateSummaryFrontierFailed(ctx, nodeID, requestID) } -func (e *tracedEngine) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, heights []uint64) error { +func (e *tracedEngine) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, heights set.Set[uint64]) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.GetAcceptedStateSummary", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numHeights", len(heights)), + attribute.Int("numHeights", heights.Len()), )) defer span.End() return e.engine.GetAcceptedStateSummary(ctx, nodeID, requestID, heights) } -func (e *tracedEngine) AcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { +func (e *tracedEngine) AcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs set.Set[ids.ID]) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.AcceptedStateSummary", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numSummaryIDs", len(summaryIDs)), + attribute.Int("numSummaryIDs", summaryIDs.Len()), )) defer span.End() @@ -125,22 +126,22 @@ func (e *tracedEngine) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids return e.engine.GetAcceptedFrontierFailed(ctx, nodeID, requestID) } -func (e *tracedEngine) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *tracedEngine) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.GetAccepted", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numContainerIDs", len(containerIDs)), + attribute.Int("numContainerIDs", containerIDs.Len()), )) defer span.End() return e.engine.GetAccepted(ctx, nodeID, requestID, containerIDs) } -func (e *tracedEngine) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *tracedEngine) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.Accepted", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numContainerIDs", len(containerIDs)), + attribute.Int("numContainerIDs", containerIDs.Len()), )) defer span.End() @@ -290,14 +291,14 @@ func (e *tracedEngine) AppResponse(ctx context.Context, nodeID ids.NodeID, reque return e.engine.AppResponse(ctx, nodeID, requestID, response) } -func (e *tracedEngine) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { +func (e *tracedEngine) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *AppError) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.AppRequestFailed", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), )) defer span.End() - return e.engine.AppRequestFailed(ctx, nodeID, requestID) + return e.engine.AppRequestFailed(ctx, nodeID, requestID, appErr) } func (e *tracedEngine) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) error { @@ -332,14 +333,14 @@ func (e *tracedEngine) CrossChainAppResponse(ctx context.Context, chainID ids.ID return e.engine.CrossChainAppResponse(ctx, chainID, requestID, response) } -func (e *tracedEngine) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { +func (e *tracedEngine) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32, appErr *AppError) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.CrossChainAppRequestFailed", oteltrace.WithAttributes( attribute.Stringer("chainID", chainID), attribute.Int64("requestID", int64(requestID)), )) defer span.End() - return e.engine.CrossChainAppRequestFailed(ctx, chainID, requestID) + return e.engine.CrossChainAppRequestFailed(ctx, chainID, requestID, appErr) } func (e *tracedEngine) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { @@ -417,7 +418,3 @@ func (e *tracedEngine) HealthCheck(ctx context.Context) (interface{}, error) { return e.engine.HealthCheck(ctx) } - -func (e *tracedEngine) GetVM() VM { - return e.engine.GetVM() -} diff --git a/snow/engine/common/traced_state_syncer.go b/snow/engine/common/traced_state_syncer.go index db2569eef995..e598b6094076 100644 --- a/snow/engine/common/traced_state_syncer.go +++ b/snow/engine/common/traced_state_syncer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/snow/engine/common/tracker/accepted.go b/snow/engine/common/tracker/accepted.go index 0be64bc9035e..f6c63e3ff28d 100644 --- a/snow/engine/common/tracker/accepted.go +++ b/snow/engine/common/tracker/accepted.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker diff --git a/snow/engine/common/tracker/accepted_test.go b/snow/engine/common/tracker/accepted_test.go index 7bb617d789f9..8ff489f51aea 100644 --- a/snow/engine/common/tracker/accepted_test.go +++ b/snow/engine/common/tracker/accepted_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker diff --git a/snow/engine/common/tracker/peers.go b/snow/engine/common/tracker/peers.go index ad9592209a5a..fdf070613d83 100644 --- a/snow/engine/common/tracker/peers.go +++ b/snow/engine/common/tracker/peers.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker @@ -33,6 +33,9 @@ type Peers interface { ConnectedPercent() float64 // TotalWeight returns the total validator weight TotalWeight() uint64 + // SampleValidator returns a randomly selected connected validator. If there + // are no currently connected validators then it will return false. + SampleValidator() (ids.NodeID, bool) // PreferredPeers returns the currently connected validators. If there are // no currently connected validators then it will return the currently // connected peers. @@ -108,6 +111,13 @@ func (p *lockedPeers) TotalWeight() uint64 { return p.peers.TotalWeight() } +func (p *lockedPeers) SampleValidator() (ids.NodeID, bool) { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.peers.SampleValidator() +} + func (p *lockedPeers) PreferredPeers() set.Set[ids.NodeID] { p.lock.RLock() defer p.lock.RUnlock() @@ -263,6 +273,10 @@ func (p *peerData) TotalWeight() uint64 { return p.totalWeight } +func (p *peerData) SampleValidator() (ids.NodeID, bool) { + return p.connectedValidators.Peek() +} + func (p *peerData) PreferredPeers() set.Set[ids.NodeID] { if p.connectedValidators.Len() == 0 { connectedPeers := set.NewSet[ids.NodeID](p.connectedPeers.Len()) diff --git a/snow/engine/common/tracker/peers_test.go b/snow/engine/common/tracker/peers_test.go index 4af065113385..b627b79a16ed 100644 --- a/snow/engine/common/tracker/peers_test.go +++ b/snow/engine/common/tracker/peers_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker diff --git a/snow/engine/common/tracker/startup.go b/snow/engine/common/tracker/startup.go index 282d88ce832d..c5e75613fcaa 100644 --- a/snow/engine/common/tracker/startup.go +++ b/snow/engine/common/tracker/startup.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker diff --git a/snow/engine/common/vm.go b/snow/engine/common/vm.go index e77bdd552bbf..65cbfb158656 100644 --- a/snow/engine/common/vm.go +++ b/snow/engine/common/vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -65,23 +65,6 @@ type VM interface { // Version returns the version of the VM. Version(context.Context) (string, error) - // Creates the HTTP handlers for custom VM network calls. - // - // This exposes handlers that the outside world can use to communicate with - // a static reference to the VM. Each handler has the path: - // [Address of node]/ext/VM/[VM ID]/[extension] - // - // Returns a mapping from [extension]s to HTTP handlers. - // - // For example, it might make sense to have an extension for creating - // genesis bytes this VM can interpret. - // - // Note: If this method is called, no other method will be called on this VM. - // Each registered VM will have a single instance created to handle static - // APIs. This instance will be handled separately from instances created to - // service an instance of a chain. - CreateStaticHandlers(context.Context) (map[string]http.Handler, error) - // Creates the HTTP handlers for custom chain network calls. // // This exposes handlers that the outside world can use to communicate with diff --git a/snow/engine/snowman/ancestor/tree.go b/snow/engine/snowman/ancestor/tree.go index 3d7fda833cbc..9e0eb4e4f02f 100644 --- a/snow/engine/snowman/ancestor/tree.go +++ b/snow/engine/snowman/ancestor/tree.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ancestor diff --git a/snow/engine/snowman/ancestor/tree_test.go b/snow/engine/snowman/ancestor/tree_test.go index 605f2a08572d..d17d38ceb0ca 100644 --- a/snow/engine/snowman/ancestor/tree_test.go +++ b/snow/engine/snowman/ancestor/tree_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ancestor diff --git a/snow/engine/snowman/block/batched_vm.go b/snow/engine/snowman/block/batched_vm.go index 3d5b869b98c1..ad52e3592ae6 100644 --- a/snow/engine/snowman/block/batched_vm.go +++ b/snow/engine/snowman/block/batched_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/snow/engine/snowman/block/batched_vm_test.go b/snow/engine/snowman/block/batched_vm_test.go index 92426c1b474d..b4d251c284ba 100644 --- a/snow/engine/snowman/block/batched_vm_test.go +++ b/snow/engine/snowman/block/batched_vm_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/snow/engine/snowman/block/block_context_vm.go b/snow/engine/snowman/block/block_context_vm.go index 4a259571a006..6b8b78235431 100644 --- a/snow/engine/snowman/block/block_context_vm.go +++ b/snow/engine/snowman/block/block_context_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/snow/engine/snowman/block/mocks/build_block_with_context_vm.go b/snow/engine/snowman/block/mock_build_block_with_context_vm.go similarity index 82% rename from snow/engine/snowman/block/mocks/build_block_with_context_vm.go rename to snow/engine/snowman/block/mock_build_block_with_context_vm.go index 2a72ada73e2c..016007b0dee7 100644 --- a/snow/engine/snowman/block/mocks/build_block_with_context_vm.go +++ b/snow/engine/snowman/block/mock_build_block_with_context_vm.go @@ -1,18 +1,19 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/engine/snowman/block (interfaces: BuildBlockWithContextChainVM) +// +// Generated by this command: +// +// mockgen -package=block -destination=snow/engine/snowman/block/mock_build_block_with_context_vm.go github.com/ava-labs/avalanchego/snow/engine/snowman/block BuildBlockWithContextChainVM +// -// Package mocks is a generated GoMock package. -package mocks +// Package block is a generated GoMock package. +package block import ( context "context" reflect "reflect" snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" - block "github.com/ava-labs/avalanchego/snow/engine/snowman/block" gomock "go.uber.org/mock/gomock" ) @@ -40,7 +41,7 @@ func (m *MockBuildBlockWithContextChainVM) EXPECT() *MockBuildBlockWithContextCh } // BuildBlockWithContext mocks base method. -func (m *MockBuildBlockWithContextChainVM) BuildBlockWithContext(arg0 context.Context, arg1 *block.Context) (snowman.Block, error) { +func (m *MockBuildBlockWithContextChainVM) BuildBlockWithContext(arg0 context.Context, arg1 *Context) (snowman.Block, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BuildBlockWithContext", arg0, arg1) ret0, _ := ret[0].(snowman.Block) @@ -49,7 +50,7 @@ func (m *MockBuildBlockWithContextChainVM) BuildBlockWithContext(arg0 context.Co } // BuildBlockWithContext indicates an expected call of BuildBlockWithContext. -func (mr *MockBuildBlockWithContextChainVMMockRecorder) BuildBlockWithContext(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockBuildBlockWithContextChainVMMockRecorder) BuildBlockWithContext(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildBlockWithContext", reflect.TypeOf((*MockBuildBlockWithContextChainVM)(nil).BuildBlockWithContext), arg0, arg1) } diff --git a/snow/engine/snowman/block/mocks/chain_vm.go b/snow/engine/snowman/block/mock_chain_vm.go similarity index 82% rename from snow/engine/snowman/block/mocks/chain_vm.go rename to snow/engine/snowman/block/mock_chain_vm.go index 2a2446a94ee5..ad99e3f716d0 100644 --- a/snow/engine/snowman/block/mocks/chain_vm.go +++ b/snow/engine/snowman/block/mock_chain_vm.go @@ -1,11 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/engine/snowman/block (interfaces: ChainVM) +// +// Generated by this command: +// +// mockgen -package=block -destination=snow/engine/snowman/block/mock_chain_vm.go github.com/ava-labs/avalanchego/snow/engine/snowman/block ChainVM +// -// Package mocks is a generated GoMock package. -package mocks +// Package block is a generated GoMock package. +package block import ( context "context" @@ -54,7 +56,7 @@ func (m *MockChainVM) AppGossip(arg0 context.Context, arg1 ids.NodeID, arg2 []by } // AppGossip indicates an expected call of AppGossip. -func (mr *MockChainVMMockRecorder) AppGossip(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) AppGossip(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppGossip", reflect.TypeOf((*MockChainVM)(nil).AppGossip), arg0, arg1, arg2) } @@ -68,23 +70,23 @@ func (m *MockChainVM) AppRequest(arg0 context.Context, arg1 ids.NodeID, arg2 uin } // AppRequest indicates an expected call of AppRequest. -func (mr *MockChainVMMockRecorder) AppRequest(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) AppRequest(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequest", reflect.TypeOf((*MockChainVM)(nil).AppRequest), arg0, arg1, arg2, arg3, arg4) } // AppRequestFailed mocks base method. -func (m *MockChainVM) AppRequestFailed(arg0 context.Context, arg1 ids.NodeID, arg2 uint32) error { +func (m *MockChainVM) AppRequestFailed(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 *common.AppError) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppRequestFailed", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "AppRequestFailed", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // AppRequestFailed indicates an expected call of AppRequestFailed. -func (mr *MockChainVMMockRecorder) AppRequestFailed(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) AppRequestFailed(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequestFailed", reflect.TypeOf((*MockChainVM)(nil).AppRequestFailed), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequestFailed", reflect.TypeOf((*MockChainVM)(nil).AppRequestFailed), arg0, arg1, arg2, arg3) } // AppResponse mocks base method. @@ -96,7 +98,7 @@ func (m *MockChainVM) AppResponse(arg0 context.Context, arg1 ids.NodeID, arg2 ui } // AppResponse indicates an expected call of AppResponse. -func (mr *MockChainVMMockRecorder) AppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) AppResponse(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppResponse", reflect.TypeOf((*MockChainVM)(nil).AppResponse), arg0, arg1, arg2, arg3) } @@ -111,7 +113,7 @@ func (m *MockChainVM) BuildBlock(arg0 context.Context) (snowman.Block, error) { } // BuildBlock indicates an expected call of BuildBlock. -func (mr *MockChainVMMockRecorder) BuildBlock(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) BuildBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildBlock", reflect.TypeOf((*MockChainVM)(nil).BuildBlock), arg0) } @@ -125,7 +127,7 @@ func (m *MockChainVM) Connected(arg0 context.Context, arg1 ids.NodeID, arg2 *ver } // Connected indicates an expected call of Connected. -func (mr *MockChainVMMockRecorder) Connected(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) Connected(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connected", reflect.TypeOf((*MockChainVM)(nil).Connected), arg0, arg1, arg2) } @@ -140,26 +142,11 @@ func (m *MockChainVM) CreateHandlers(arg0 context.Context) (map[string]http.Hand } // CreateHandlers indicates an expected call of CreateHandlers. -func (mr *MockChainVMMockRecorder) CreateHandlers(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) CreateHandlers(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateHandlers", reflect.TypeOf((*MockChainVM)(nil).CreateHandlers), arg0) } -// CreateStaticHandlers mocks base method. -func (m *MockChainVM) CreateStaticHandlers(arg0 context.Context) (map[string]http.Handler, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateStaticHandlers", arg0) - ret0, _ := ret[0].(map[string]http.Handler) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateStaticHandlers indicates an expected call of CreateStaticHandlers. -func (mr *MockChainVMMockRecorder) CreateStaticHandlers(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateStaticHandlers", reflect.TypeOf((*MockChainVM)(nil).CreateStaticHandlers), arg0) -} - // CrossChainAppRequest mocks base method. func (m *MockChainVM) CrossChainAppRequest(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 time.Time, arg4 []byte) error { m.ctrl.T.Helper() @@ -169,23 +156,23 @@ func (m *MockChainVM) CrossChainAppRequest(arg0 context.Context, arg1 ids.ID, ar } // CrossChainAppRequest indicates an expected call of CrossChainAppRequest. -func (mr *MockChainVMMockRecorder) CrossChainAppRequest(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) CrossChainAppRequest(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequest", reflect.TypeOf((*MockChainVM)(nil).CrossChainAppRequest), arg0, arg1, arg2, arg3, arg4) } // CrossChainAppRequestFailed mocks base method. -func (m *MockChainVM) CrossChainAppRequestFailed(arg0 context.Context, arg1 ids.ID, arg2 uint32) error { +func (m *MockChainVM) CrossChainAppRequestFailed(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 *common.AppError) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CrossChainAppRequestFailed", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "CrossChainAppRequestFailed", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // CrossChainAppRequestFailed indicates an expected call of CrossChainAppRequestFailed. -func (mr *MockChainVMMockRecorder) CrossChainAppRequestFailed(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) CrossChainAppRequestFailed(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequestFailed", reflect.TypeOf((*MockChainVM)(nil).CrossChainAppRequestFailed), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequestFailed", reflect.TypeOf((*MockChainVM)(nil).CrossChainAppRequestFailed), arg0, arg1, arg2, arg3) } // CrossChainAppResponse mocks base method. @@ -197,7 +184,7 @@ func (m *MockChainVM) CrossChainAppResponse(arg0 context.Context, arg1 ids.ID, a } // CrossChainAppResponse indicates an expected call of CrossChainAppResponse. -func (mr *MockChainVMMockRecorder) CrossChainAppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) CrossChainAppResponse(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppResponse", reflect.TypeOf((*MockChainVM)(nil).CrossChainAppResponse), arg0, arg1, arg2, arg3) } @@ -211,7 +198,7 @@ func (m *MockChainVM) Disconnected(arg0 context.Context, arg1 ids.NodeID) error } // Disconnected indicates an expected call of Disconnected. -func (mr *MockChainVMMockRecorder) Disconnected(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) Disconnected(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnected", reflect.TypeOf((*MockChainVM)(nil).Disconnected), arg0, arg1) } @@ -226,7 +213,7 @@ func (m *MockChainVM) GetBlock(arg0 context.Context, arg1 ids.ID) (snowman.Block } // GetBlock indicates an expected call of GetBlock. -func (mr *MockChainVMMockRecorder) GetBlock(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) GetBlock(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockChainVM)(nil).GetBlock), arg0, arg1) } @@ -241,22 +228,22 @@ func (m *MockChainVM) GetBlockIDAtHeight(arg0 context.Context, arg1 uint64) (ids } // GetBlockIDAtHeight indicates an expected call of GetBlockIDAtHeight. -func (mr *MockChainVMMockRecorder) GetBlockIDAtHeight(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) GetBlockIDAtHeight(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockChainVM)(nil).GetBlockIDAtHeight), arg0, arg1) } // HealthCheck mocks base method. -func (m *MockChainVM) HealthCheck(arg0 context.Context) (interface{}, error) { +func (m *MockChainVM) HealthCheck(arg0 context.Context) (any, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "HealthCheck", arg0) - ret0, _ := ret[0].(interface{}) + ret0, _ := ret[0].(any) ret1, _ := ret[1].(error) return ret0, ret1 } // HealthCheck indicates an expected call of HealthCheck. -func (mr *MockChainVMMockRecorder) HealthCheck(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) HealthCheck(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockChainVM)(nil).HealthCheck), arg0) } @@ -270,7 +257,7 @@ func (m *MockChainVM) Initialize(arg0 context.Context, arg1 *snow.Context, arg2 } // Initialize indicates an expected call of Initialize. -func (mr *MockChainVMMockRecorder) Initialize(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) Initialize(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockChainVM)(nil).Initialize), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) } @@ -285,7 +272,7 @@ func (m *MockChainVM) LastAccepted(arg0 context.Context) (ids.ID, error) { } // LastAccepted indicates an expected call of LastAccepted. -func (mr *MockChainVMMockRecorder) LastAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) LastAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastAccepted", reflect.TypeOf((*MockChainVM)(nil).LastAccepted), arg0) } @@ -300,7 +287,7 @@ func (m *MockChainVM) ParseBlock(arg0 context.Context, arg1 []byte) (snowman.Blo } // ParseBlock indicates an expected call of ParseBlock. -func (mr *MockChainVMMockRecorder) ParseBlock(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) ParseBlock(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseBlock", reflect.TypeOf((*MockChainVM)(nil).ParseBlock), arg0, arg1) } @@ -314,7 +301,7 @@ func (m *MockChainVM) SetPreference(arg0 context.Context, arg1 ids.ID) error { } // SetPreference indicates an expected call of SetPreference. -func (mr *MockChainVMMockRecorder) SetPreference(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) SetPreference(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreference", reflect.TypeOf((*MockChainVM)(nil).SetPreference), arg0, arg1) } @@ -328,7 +315,7 @@ func (m *MockChainVM) SetState(arg0 context.Context, arg1 snow.State) error { } // SetState indicates an expected call of SetState. -func (mr *MockChainVMMockRecorder) SetState(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) SetState(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetState", reflect.TypeOf((*MockChainVM)(nil).SetState), arg0, arg1) } @@ -342,7 +329,7 @@ func (m *MockChainVM) Shutdown(arg0 context.Context) error { } // Shutdown indicates an expected call of Shutdown. -func (mr *MockChainVMMockRecorder) Shutdown(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) Shutdown(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockChainVM)(nil).Shutdown), arg0) } @@ -356,7 +343,7 @@ func (m *MockChainVM) VerifyHeightIndex(arg0 context.Context) error { } // VerifyHeightIndex indicates an expected call of VerifyHeightIndex. -func (mr *MockChainVMMockRecorder) VerifyHeightIndex(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) VerifyHeightIndex(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyHeightIndex", reflect.TypeOf((*MockChainVM)(nil).VerifyHeightIndex), arg0) } @@ -371,7 +358,7 @@ func (m *MockChainVM) Version(arg0 context.Context) (string, error) { } // Version indicates an expected call of Version. -func (mr *MockChainVMMockRecorder) Version(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) Version(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockChainVM)(nil).Version), arg0) } diff --git a/snow/engine/snowman/block/mocks/state_syncable_vm.go b/snow/engine/snowman/block/mock_state_syncable_vm.go similarity index 83% rename from snow/engine/snowman/block/mocks/state_syncable_vm.go rename to snow/engine/snowman/block/mock_state_syncable_vm.go index 50a1fe92e117..8d8abca53a0c 100644 --- a/snow/engine/snowman/block/mocks/state_syncable_vm.go +++ b/snow/engine/snowman/block/mock_state_syncable_vm.go @@ -1,17 +1,18 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/engine/snowman/block (interfaces: StateSyncableVM) +// +// Generated by this command: +// +// mockgen -package=block -destination=snow/engine/snowman/block/mock_state_syncable_vm.go github.com/ava-labs/avalanchego/snow/engine/snowman/block StateSyncableVM +// -// Package mocks is a generated GoMock package. -package mocks +// Package block is a generated GoMock package. +package block import ( context "context" reflect "reflect" - block "github.com/ava-labs/avalanchego/snow/engine/snowman/block" gomock "go.uber.org/mock/gomock" ) @@ -39,61 +40,61 @@ func (m *MockStateSyncableVM) EXPECT() *MockStateSyncableVMMockRecorder { } // GetLastStateSummary mocks base method. -func (m *MockStateSyncableVM) GetLastStateSummary(arg0 context.Context) (block.StateSummary, error) { +func (m *MockStateSyncableVM) GetLastStateSummary(arg0 context.Context) (StateSummary, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLastStateSummary", arg0) - ret0, _ := ret[0].(block.StateSummary) + ret0, _ := ret[0].(StateSummary) ret1, _ := ret[1].(error) return ret0, ret1 } // GetLastStateSummary indicates an expected call of GetLastStateSummary. -func (mr *MockStateSyncableVMMockRecorder) GetLastStateSummary(arg0 interface{}) *gomock.Call { +func (mr *MockStateSyncableVMMockRecorder) GetLastStateSummary(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).GetLastStateSummary), arg0) } // GetOngoingSyncStateSummary mocks base method. -func (m *MockStateSyncableVM) GetOngoingSyncStateSummary(arg0 context.Context) (block.StateSummary, error) { +func (m *MockStateSyncableVM) GetOngoingSyncStateSummary(arg0 context.Context) (StateSummary, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetOngoingSyncStateSummary", arg0) - ret0, _ := ret[0].(block.StateSummary) + ret0, _ := ret[0].(StateSummary) ret1, _ := ret[1].(error) return ret0, ret1 } // GetOngoingSyncStateSummary indicates an expected call of GetOngoingSyncStateSummary. -func (mr *MockStateSyncableVMMockRecorder) GetOngoingSyncStateSummary(arg0 interface{}) *gomock.Call { +func (mr *MockStateSyncableVMMockRecorder) GetOngoingSyncStateSummary(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOngoingSyncStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).GetOngoingSyncStateSummary), arg0) } // GetStateSummary mocks base method. -func (m *MockStateSyncableVM) GetStateSummary(arg0 context.Context, arg1 uint64) (block.StateSummary, error) { +func (m *MockStateSyncableVM) GetStateSummary(arg0 context.Context, arg1 uint64) (StateSummary, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetStateSummary", arg0, arg1) - ret0, _ := ret[0].(block.StateSummary) + ret0, _ := ret[0].(StateSummary) ret1, _ := ret[1].(error) return ret0, ret1 } // GetStateSummary indicates an expected call of GetStateSummary. -func (mr *MockStateSyncableVMMockRecorder) GetStateSummary(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateSyncableVMMockRecorder) GetStateSummary(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).GetStateSummary), arg0, arg1) } // ParseStateSummary mocks base method. -func (m *MockStateSyncableVM) ParseStateSummary(arg0 context.Context, arg1 []byte) (block.StateSummary, error) { +func (m *MockStateSyncableVM) ParseStateSummary(arg0 context.Context, arg1 []byte) (StateSummary, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ParseStateSummary", arg0, arg1) - ret0, _ := ret[0].(block.StateSummary) + ret0, _ := ret[0].(StateSummary) ret1, _ := ret[1].(error) return ret0, ret1 } // ParseStateSummary indicates an expected call of ParseStateSummary. -func (mr *MockStateSyncableVMMockRecorder) ParseStateSummary(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateSyncableVMMockRecorder) ParseStateSummary(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).ParseStateSummary), arg0, arg1) } @@ -108,7 +109,7 @@ func (m *MockStateSyncableVM) StateSyncEnabled(arg0 context.Context) (bool, erro } // StateSyncEnabled indicates an expected call of StateSyncEnabled. -func (mr *MockStateSyncableVMMockRecorder) StateSyncEnabled(arg0 interface{}) *gomock.Call { +func (mr *MockStateSyncableVMMockRecorder) StateSyncEnabled(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSyncEnabled", reflect.TypeOf((*MockStateSyncableVM)(nil).StateSyncEnabled), arg0) } diff --git a/snow/engine/snowman/block/mocks/with_verify_context.go b/snow/engine/snowman/block/mock_with_verify_context.go similarity index 85% rename from snow/engine/snowman/block/mocks/with_verify_context.go rename to snow/engine/snowman/block/mock_with_verify_context.go index ea509980f130..1c18e3e9f6cb 100644 --- a/snow/engine/snowman/block/mocks/with_verify_context.go +++ b/snow/engine/snowman/block/mock_with_verify_context.go @@ -1,17 +1,18 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/engine/snowman/block (interfaces: WithVerifyContext) +// +// Generated by this command: +// +// mockgen -package=block -destination=snow/engine/snowman/block/mock_with_verify_context.go github.com/ava-labs/avalanchego/snow/engine/snowman/block WithVerifyContext +// -// Package mocks is a generated GoMock package. -package mocks +// Package block is a generated GoMock package. +package block import ( context "context" reflect "reflect" - block "github.com/ava-labs/avalanchego/snow/engine/snowman/block" gomock "go.uber.org/mock/gomock" ) @@ -48,13 +49,13 @@ func (m *MockWithVerifyContext) ShouldVerifyWithContext(arg0 context.Context) (b } // ShouldVerifyWithContext indicates an expected call of ShouldVerifyWithContext. -func (mr *MockWithVerifyContextMockRecorder) ShouldVerifyWithContext(arg0 interface{}) *gomock.Call { +func (mr *MockWithVerifyContextMockRecorder) ShouldVerifyWithContext(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldVerifyWithContext", reflect.TypeOf((*MockWithVerifyContext)(nil).ShouldVerifyWithContext), arg0) } // VerifyWithContext mocks base method. -func (m *MockWithVerifyContext) VerifyWithContext(arg0 context.Context, arg1 *block.Context) error { +func (m *MockWithVerifyContext) VerifyWithContext(arg0 context.Context, arg1 *Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "VerifyWithContext", arg0, arg1) ret0, _ := ret[0].(error) @@ -62,7 +63,7 @@ func (m *MockWithVerifyContext) VerifyWithContext(arg0 context.Context, arg1 *bl } // VerifyWithContext indicates an expected call of VerifyWithContext. -func (mr *MockWithVerifyContextMockRecorder) VerifyWithContext(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockWithVerifyContextMockRecorder) VerifyWithContext(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyWithContext", reflect.TypeOf((*MockWithVerifyContext)(nil).VerifyWithContext), arg0, arg1) } diff --git a/snow/engine/snowman/block/state_summary.go b/snow/engine/snowman/block/state_summary.go index 337a27d9f1d8..d89d77a22396 100644 --- a/snow/engine/snowman/block/state_summary.go +++ b/snow/engine/snowman/block/state_summary.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/snow/engine/snowman/block/state_sync_mode.go b/snow/engine/snowman/block/state_sync_mode.go index 79f5c2e8043e..35da3ab4eda9 100644 --- a/snow/engine/snowman/block/state_sync_mode.go +++ b/snow/engine/snowman/block/state_sync_mode.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/snow/engine/snowman/block/state_syncable_vm.go b/snow/engine/snowman/block/state_syncable_vm.go index 5c25f37a7ad7..0457505183e5 100644 --- a/snow/engine/snowman/block/state_syncable_vm.go +++ b/snow/engine/snowman/block/state_syncable_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/snow/engine/snowman/block/test_batched_vm.go b/snow/engine/snowman/block/test_batched_vm.go index ef7991156070..e5d654ec4a87 100644 --- a/snow/engine/snowman/block/test_batched_vm.go +++ b/snow/engine/snowman/block/test_batched_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/snow/engine/snowman/block/test_state_summary.go b/snow/engine/snowman/block/test_state_summary.go index 089e6dcfd364..7287cff10120 100644 --- a/snow/engine/snowman/block/test_state_summary.go +++ b/snow/engine/snowman/block/test_state_summary.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/snow/engine/snowman/block/test_state_syncable_vm.go b/snow/engine/snowman/block/test_state_syncable_vm.go index b05dd8118683..f1eeb9606642 100644 --- a/snow/engine/snowman/block/test_state_syncable_vm.go +++ b/snow/engine/snowman/block/test_state_syncable_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/snow/engine/snowman/block/test_vm.go b/snow/engine/snowman/block/test_vm.go index 4f3a2835eda5..376dd27066f7 100644 --- a/snow/engine/snowman/block/test_vm.go +++ b/snow/engine/snowman/block/test_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/snow/engine/snowman/block/vm.go b/snow/engine/snowman/block/vm.go index 13d4fa75ed02..4153632a7616 100644 --- a/snow/engine/snowman/block/vm.go +++ b/snow/engine/snowman/block/vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/snow/engine/snowman/bootstrap/block_job.go b/snow/engine/snowman/bootstrap/block_job.go index 83d86af27d1a..696cbddb58a9 100644 --- a/snow/engine/snowman/bootstrap/block_job.go +++ b/snow/engine/snowman/bootstrap/block_job.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap @@ -98,7 +98,8 @@ func (b *blockJob) Execute(ctx context.Context) error { b.numAccepted.Inc() b.log.Trace("accepting block in bootstrapping", zap.Stringer("blkID", blkID), - zap.Uint64("blkHeight", b.blk.Height()), + zap.Uint64("height", b.blk.Height()), + zap.Time("timestamp", b.blk.Timestamp()), ) if err := b.blk.Accept(ctx); err != nil { b.log.Debug("failed to accept block during bootstrapping", diff --git a/snow/engine/snowman/bootstrap/bootstrapper.go b/snow/engine/snowman/bootstrap/bootstrapper.go index f6725aa00ba5..29754a24d734 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper.go +++ b/snow/engine/snowman/bootstrap/bootstrapper.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap @@ -18,26 +18,54 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/bootstrapper" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils/bimap" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" ) -// Parameters for delaying bootstrapping to avoid potential CPU burns -const bootstrappingDelay = 10 * time.Second +const ( + // Delay bootstrapping to avoid potential CPU burns + bootstrappingDelay = 10 * time.Second + + // statusUpdateFrequency is how many containers should be processed between + // logs + statusUpdateFrequency = 5000 + + // maxOutstandingBroadcastRequests is the maximum number of requests to have + // outstanding when broadcasting. + maxOutstandingBroadcastRequests = 50 +) var ( - _ common.BootstrapableEngine = (*bootstrapper)(nil) + _ common.BootstrapableEngine = (*Bootstrapper)(nil) errUnexpectedTimeout = errors.New("unexpected timeout fired") ) +// bootstrapper repeatedly performs the bootstrapping protocol. +// +// 1. Wait until a sufficient amount of stake is connected. +// 2. Sample a small number of nodes to get the last accepted block ID +// 3. Verify against the full network that the last accepted block ID received +// in step 2 is an accepted block. +// 4. Sync the full ancestry of the last accepted block. +// 5. Execute all the fetched blocks that haven't already been executed. +// 6. Restart the bootstrapping protocol until the number of blocks being +// accepted during a bootstrapping round stops decreasing. +// +// Note: Because of step 6, the bootstrapping protocol will generally be +// performed multiple times. +// // Invariant: The VM is not guaranteed to be initialized until Start has been // called, so it must be guaranteed the VM is not used until after Start. -type bootstrapper struct { +type Bootstrapper struct { Config + common.Halter + *metrics // list of NoOpsHandler for messages dropped by bootstrapper common.StateSummaryFrontierHandler @@ -47,21 +75,26 @@ type bootstrapper struct { common.ChitsHandler common.AppHandler - common.Bootstrapper - common.Fetcher - *metrics + requestID uint32 // Tracks the last requestID that was used in a request + + started bool + restarted bool - started bool + minority bootstrapper.Poll + majority bootstrapper.Poll - // Greatest height of the blocks passed in ForceAccepted + // Greatest height of the blocks passed in startSyncing tipHeight uint64 // Height of the last accepted block when bootstrapping starts startingHeight uint64 - // Number of blocks that were fetched on ForceAccepted + // Number of blocks that were fetched on startSyncing initiallyFetched uint64 - // Time that ForceAccepted was last called + // Time that startSyncing was last called startTime time.Time + // tracks which validators were asked for which containers in which requests + outstandingRequests *bimap.BiMap[common.Request, ids.ID] + // number of state transitions executed executedStateTransitions int @@ -81,15 +114,14 @@ type bootstrapper struct { // bootstrappedOnce ensures that the [Bootstrapped] callback is only invoked // once, even if bootstrapping is retried. bootstrappedOnce sync.Once + + // Called when bootstrapping is done on a specific chain + onFinished func(ctx context.Context, lastReqID uint32) error } -func New(config Config, onFinished func(ctx context.Context, lastReqID uint32) error) (common.BootstrapableEngine, error) { +func New(config Config, onFinished func(ctx context.Context, lastReqID uint32) error) (*Bootstrapper, error) { metrics, err := newMetrics("bs", config.Ctx.Registerer) - if err != nil { - return nil, err - } - - b := &bootstrapper{ + return &Bootstrapper{ Config: config, metrics: metrics, StateSummaryFrontierHandler: common.NewNoOpStateSummaryFrontierHandler(config.Ctx.Log), @@ -99,19 +131,31 @@ func New(config Config, onFinished func(ctx context.Context, lastReqID uint32) e ChitsHandler: common.NewNoOpChitsHandler(config.Ctx.Log), AppHandler: config.VM, - Fetcher: common.Fetcher{ - OnFinished: onFinished, - }, - executedStateTransitions: math.MaxInt32, - } + minority: bootstrapper.Noop, + majority: bootstrapper.Noop, + + outstandingRequests: bimap.New[common.Request, ids.ID](), - config.Bootstrapable = b - b.Bootstrapper = common.NewCommonBootstrapper(config.Config) + executedStateTransitions: math.MaxInt, + onFinished: onFinished, + }, err +} + +func (b *Bootstrapper) Context() *snow.ConsensusContext { + return b.Ctx +} + +func (b *Bootstrapper) Clear(context.Context) error { + b.Ctx.Lock.Lock() + defer b.Ctx.Lock.Unlock() - return b, nil + if err := b.Config.Blocked.Clear(); err != nil { + return err + } + return b.Config.Blocked.Commit() } -func (b *bootstrapper) Start(ctx context.Context, startReqID uint32) error { +func (b *Bootstrapper) Start(ctx context.Context, startReqID uint32) error { b.Ctx.Log.Info("starting bootstrapper") b.Ctx.State.Set(snow.EngineState{ @@ -143,207 +187,218 @@ func (b *bootstrapper) Start(ctx context.Context, startReqID uint32) error { return fmt.Errorf("couldn't get last accepted block: %w", err) } b.startingHeight = lastAccepted.Height() - b.Config.SharedCfg.RequestID = startReqID + b.requestID = startReqID - if !b.StartupTracker.ShouldStart() { - return nil + return b.tryStartBootstrapping(ctx) +} + +func (b *Bootstrapper) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { + if err := b.VM.Connected(ctx, nodeID, nodeVersion); err != nil { + return err } - b.started = true - return b.Startup(ctx) + if err := b.StartupTracker.Connected(ctx, nodeID, nodeVersion); err != nil { + return err + } + // Ensure fetchFrom reflects proper validator list + if _, ok := b.Beacons.GetValidator(b.Ctx.SubnetID, nodeID); ok { + b.fetchFrom.Add(nodeID) + } + + return b.tryStartBootstrapping(ctx) } -// Ancestors handles the receipt of multiple containers. Should be received in -// response to a GetAncestors message to [nodeID] with request ID [requestID] -func (b *bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, blks [][]byte) error { - // Make sure this is in response to a request we made - wantedBlkID, ok := b.OutstandingRequests.Remove(nodeID, requestID) - if !ok { // this message isn't in response to a request we made - b.Ctx.Log.Debug("received unexpected Ancestors", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) - return nil +func (b *Bootstrapper) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + if err := b.VM.Disconnected(ctx, nodeID); err != nil { + return err } - lenBlks := len(blks) - if lenBlks == 0 { - b.Ctx.Log.Debug("received Ancestors with no block", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) + if err := b.StartupTracker.Disconnected(ctx, nodeID); err != nil { + return err + } - b.markUnavailable(nodeID) + b.markUnavailable(nodeID) + return nil +} - // Send another request for this - return b.fetch(ctx, wantedBlkID) +// tryStartBootstrapping will start bootstrapping the first time it is called +// while the startupTracker is reporting that the protocol should start. +func (b *Bootstrapper) tryStartBootstrapping(ctx context.Context) error { + if b.started || !b.StartupTracker.ShouldStart() { + return nil } - // This node has responded - so add it back into the set - b.fetchFrom.Add(nodeID) + b.started = true + return b.startBootstrapping(ctx) +} - if lenBlks > b.Config.AncestorsMaxContainersReceived { - blks = blks[:b.Config.AncestorsMaxContainersReceived] - b.Ctx.Log.Debug("ignoring containers in Ancestors", - zap.Int("numContainers", lenBlks-b.Config.AncestorsMaxContainersReceived), - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) +func (b *Bootstrapper) startBootstrapping(ctx context.Context) error { + currentBeacons := b.Beacons.GetMap(b.Ctx.SubnetID) + nodeWeights := make(map[ids.NodeID]uint64, len(currentBeacons)) + for nodeID, beacon := range currentBeacons { + nodeWeights[nodeID] = beacon.Weight } - blocks, err := block.BatchedParseBlock(ctx, b.VM, blks) - if err != nil { // the provided blocks couldn't be parsed - b.Ctx.Log.Debug("failed to parse blocks in Ancestors", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Error(err), - ) - return b.fetch(ctx, wantedBlkID) + frontierNodes, err := bootstrapper.Sample(nodeWeights, b.SampleK) + if err != nil { + return err } - if len(blocks) == 0 { - b.Ctx.Log.Debug("parsing blocks returned an empty set of blocks", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) - return b.fetch(ctx, wantedBlkID) - } + b.Ctx.Log.Debug("sampled nodes to seed bootstrapping frontier", + zap.Reflect("sampledNodes", frontierNodes), + zap.Int("numNodes", len(nodeWeights)), + ) - requestedBlock := blocks[0] - if actualID := requestedBlock.ID(); actualID != wantedBlkID { - b.Ctx.Log.Debug("first block is not the requested block", - zap.Stringer("expectedBlkID", wantedBlkID), - zap.Stringer("blkID", actualID), + b.minority = bootstrapper.NewMinority( + b.Ctx.Log, + frontierNodes, + maxOutstandingBroadcastRequests, + ) + b.majority = bootstrapper.NewMajority( + b.Ctx.Log, + nodeWeights, + maxOutstandingBroadcastRequests, + ) + + if accepted, finalized := b.majority.Result(ctx); finalized { + b.Ctx.Log.Info("bootstrapping skipped", + zap.String("reason", "no provided bootstraps"), ) - return b.fetch(ctx, wantedBlkID) + return b.startSyncing(ctx, accepted) } - blockSet := make(map[ids.ID]snowman.Block, len(blocks)) - for _, block := range blocks[1:] { - blockSet[block.ID()] = block - } - return b.process(ctx, requestedBlock, blockSet) + b.requestID++ + return b.sendBootstrappingMessagesOrFinish(ctx) } -func (b *bootstrapper) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - blkID, ok := b.OutstandingRequests.Remove(nodeID, requestID) - if !ok { - b.Ctx.Log.Debug("unexpectedly called GetAncestorsFailed", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) +func (b *Bootstrapper) sendBootstrappingMessagesOrFinish(ctx context.Context) error { + if peers := b.minority.GetPeers(ctx); peers.Len() > 0 { + b.Sender.SendGetAcceptedFrontier(ctx, peers, b.requestID) return nil } - // This node timed out their request, so we can add them back to [fetchFrom] - b.fetchFrom.Add(nodeID) - - // Send another request for this - return b.fetch(ctx, blkID) -} - -func (b *bootstrapper) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { - if err := b.VM.Connected(ctx, nodeID, nodeVersion); err != nil { - return err + potentialAccepted, finalized := b.minority.Result(ctx) + if !finalized { + // We haven't finalized the accepted frontier, so we should wait for the + // outstanding requests. + return nil } - if err := b.StartupTracker.Connected(ctx, nodeID, nodeVersion); err != nil { - return err - } - // Ensure fetchFrom reflects proper validator list - if _, ok := b.Beacons.GetValidator(b.Ctx.SubnetID, nodeID); ok { - b.fetchFrom.Add(nodeID) + if peers := b.majority.GetPeers(ctx); peers.Len() > 0 { + b.Sender.SendGetAccepted(ctx, peers, b.requestID, potentialAccepted) + return nil } - if b.started || !b.StartupTracker.ShouldStart() { + accepted, finalized := b.majority.Result(ctx) + if !finalized { + // We haven't finalized the accepted set, so we should wait for the + // outstanding requests. return nil } - b.started = true - return b.Startup(ctx) -} - -func (b *bootstrapper) Disconnected(ctx context.Context, nodeID ids.NodeID) error { - if err := b.VM.Disconnected(ctx, nodeID); err != nil { - return err + numAccepted := len(accepted) + if numAccepted == 0 { + b.Ctx.Log.Debug("restarting bootstrap", + zap.String("reason", "no blocks accepted"), + zap.Int("numBeacons", b.Beacons.Count(b.Ctx.SubnetID)), + ) + // Invariant: These functions are mutualy recursive. However, when + // [startBootstrapping] calls [sendMessagesOrFinish], it is guaranteed + // to exit when sending GetAcceptedFrontier requests. + return b.startBootstrapping(ctx) } - if err := b.StartupTracker.Disconnected(ctx, nodeID); err != nil { - return err + if !b.restarted { + b.Ctx.Log.Info("bootstrapping started syncing", + zap.Int("numAccepted", numAccepted), + ) + } else { + b.Ctx.Log.Debug("bootstrapping started syncing", + zap.Int("numAccepted", numAccepted), + ) } - b.markUnavailable(nodeID) - return nil + return b.startSyncing(ctx, accepted) } -func (b *bootstrapper) Timeout(ctx context.Context) error { - if !b.awaitingTimeout { - return errUnexpectedTimeout +func (b *Bootstrapper) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { + if requestID != b.requestID { + b.Ctx.Log.Debug("received out-of-sync AcceptedFrontier message", + zap.Stringer("nodeID", nodeID), + zap.Uint32("expectedRequestID", b.requestID), + zap.Uint32("requestID", requestID), + ) + return nil } - b.awaitingTimeout = false - if !b.Config.BootstrapTracker.IsBootstrapped() { - return b.Restart(ctx, true) + if err := b.minority.RecordOpinion(ctx, nodeID, set.Of(containerID)); err != nil { + return err } - b.fetchETA.Set(0) - return b.OnFinished(ctx, b.Config.SharedCfg.RequestID) + return b.sendBootstrappingMessagesOrFinish(ctx) } -func (*bootstrapper) Gossip(context.Context) error { - return nil -} - -func (b *bootstrapper) Shutdown(ctx context.Context) error { - b.Ctx.Log.Info("shutting down bootstrapper") - - b.Ctx.Lock.Lock() - defer b.Ctx.Lock.Unlock() +func (b *Bootstrapper) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + if requestID != b.requestID { + b.Ctx.Log.Debug("received out-of-sync GetAcceptedFrontierFailed message", + zap.Stringer("nodeID", nodeID), + zap.Uint32("expectedRequestID", b.requestID), + zap.Uint32("requestID", requestID), + ) + return nil + } - return b.VM.Shutdown(ctx) + if err := b.minority.RecordOpinion(ctx, nodeID, nil); err != nil { + return err + } + return b.sendBootstrappingMessagesOrFinish(ctx) } -func (b *bootstrapper) Notify(_ context.Context, msg common.Message) error { - if msg != common.StateSyncDone { - b.Ctx.Log.Warn("received an unexpected message from the VM", - zap.Stringer("msg", msg), +func (b *Bootstrapper) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { + if requestID != b.requestID { + b.Ctx.Log.Debug("received out-of-sync Accepted message", + zap.Stringer("nodeID", nodeID), + zap.Uint32("expectedRequestID", b.requestID), + zap.Uint32("requestID", requestID), ) return nil } - b.Ctx.StateSyncing.Set(false) - return nil + if err := b.majority.RecordOpinion(ctx, nodeID, containerIDs); err != nil { + return err + } + return b.sendBootstrappingMessagesOrFinish(ctx) } -func (b *bootstrapper) HealthCheck(ctx context.Context) (interface{}, error) { - b.Ctx.Lock.Lock() - defer b.Ctx.Lock.Unlock() - - vmIntf, vmErr := b.VM.HealthCheck(ctx) - intf := map[string]interface{}{ - "consensus": struct{}{}, - "vm": vmIntf, +func (b *Bootstrapper) GetAcceptedFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + if requestID != b.requestID { + b.Ctx.Log.Debug("received out-of-sync GetAcceptedFailed message", + zap.Stringer("nodeID", nodeID), + zap.Uint32("expectedRequestID", b.requestID), + zap.Uint32("requestID", requestID), + ) + return nil } - return intf, vmErr -} -func (b *bootstrapper) GetVM() common.VM { - return b.VM + if err := b.majority.RecordOpinion(ctx, nodeID, nil); err != nil { + return err + } + return b.sendBootstrappingMessagesOrFinish(ctx) } -func (b *bootstrapper) ForceAccepted(ctx context.Context, acceptedContainerIDs []ids.ID) error { - pendingContainerIDs := b.Blocked.MissingIDs() - +func (b *Bootstrapper) startSyncing(ctx context.Context, acceptedContainerIDs []ids.ID) error { // Initialize the fetch from set to the currently preferred peers b.fetchFrom = b.StartupTracker.PreferredPeers() + pendingContainerIDs := b.Blocked.MissingIDs() // Append the list of accepted container IDs to pendingContainerIDs to ensure // we iterate over every container that must be traversed. pendingContainerIDs = append(pendingContainerIDs, acceptedContainerIDs...) - toProcess := make([]snowman.Block, 0, len(pendingContainerIDs)) b.Ctx.Log.Debug("starting bootstrapping", zap.Int("numPendingBlocks", len(pendingContainerIDs)), zap.Int("numAcceptedBlocks", len(acceptedContainerIDs)), ) + + toProcess := make([]snowman.Block, 0, len(pendingContainerIDs)) for _, blkID := range pendingContainerIDs { b.Blocked.AddMissingID(blkID) @@ -369,19 +424,19 @@ func (b *bootstrapper) ForceAccepted(ctx context.Context, acceptedContainerIDs [ } } - return b.checkFinish(ctx) + return b.tryStartExecuting(ctx) } // Get block [blkID] and its ancestors from a validator -func (b *bootstrapper) fetch(ctx context.Context, blkID ids.ID) error { +func (b *Bootstrapper) fetch(ctx context.Context, blkID ids.ID) error { // Make sure we haven't already requested this block - if b.OutstandingRequests.Contains(blkID) { + if b.outstandingRequests.HasValue(blkID) { return nil } // Make sure we don't already have this block if _, err := b.VM.GetBlock(ctx, blkID); err == nil { - return b.checkFinish(ctx) + return b.tryStartExecuting(ctx) } validatorID, ok := b.fetchFrom.Peek() @@ -392,17 +447,118 @@ func (b *bootstrapper) fetch(ctx context.Context, blkID ids.ID) error { // We only allow one outbound request at a time from a node b.markUnavailable(validatorID) - b.Config.SharedCfg.RequestID++ + b.requestID++ - b.OutstandingRequests.Add(validatorID, b.Config.SharedCfg.RequestID, blkID) - b.Config.Sender.SendGetAncestors(ctx, validatorID, b.Config.SharedCfg.RequestID, blkID) // request block and ancestors + b.outstandingRequests.Put( + common.Request{ + NodeID: validatorID, + RequestID: b.requestID, + }, + blkID, + ) + b.Config.Sender.SendGetAncestors(ctx, validatorID, b.requestID, blkID) // request block and ancestors return nil } +// Ancestors handles the receipt of multiple containers. Should be received in +// response to a GetAncestors message to [nodeID] with request ID [requestID] +func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, blks [][]byte) error { + // Make sure this is in response to a request we made + wantedBlkID, ok := b.outstandingRequests.DeleteKey(common.Request{ + NodeID: nodeID, + RequestID: requestID, + }) + if !ok { // this message isn't in response to a request we made + b.Ctx.Log.Debug("received unexpected Ancestors", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) + return nil + } + + lenBlks := len(blks) + if lenBlks == 0 { + b.Ctx.Log.Debug("received Ancestors with no block", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) + + b.markUnavailable(nodeID) + + // Send another request for this + return b.fetch(ctx, wantedBlkID) + } + + // This node has responded - so add it back into the set + b.fetchFrom.Add(nodeID) + + if lenBlks > b.Config.AncestorsMaxContainersReceived { + blks = blks[:b.Config.AncestorsMaxContainersReceived] + b.Ctx.Log.Debug("ignoring containers in Ancestors", + zap.Int("numContainers", lenBlks-b.Config.AncestorsMaxContainersReceived), + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) + } + + blocks, err := block.BatchedParseBlock(ctx, b.VM, blks) + if err != nil { // the provided blocks couldn't be parsed + b.Ctx.Log.Debug("failed to parse blocks in Ancestors", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Error(err), + ) + return b.fetch(ctx, wantedBlkID) + } + + if len(blocks) == 0 { + b.Ctx.Log.Debug("parsing blocks returned an empty set of blocks", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) + return b.fetch(ctx, wantedBlkID) + } + + requestedBlock := blocks[0] + if actualID := requestedBlock.ID(); actualID != wantedBlkID { + b.Ctx.Log.Debug("first block is not the requested block", + zap.Stringer("expectedBlkID", wantedBlkID), + zap.Stringer("blkID", actualID), + ) + return b.fetch(ctx, wantedBlkID) + } + + blockSet := make(map[ids.ID]snowman.Block, len(blocks)) + for _, block := range blocks[1:] { + blockSet[block.ID()] = block + } + return b.process(ctx, requestedBlock, blockSet) +} + +func (b *Bootstrapper) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + blkID, ok := b.outstandingRequests.DeleteKey(common.Request{ + NodeID: nodeID, + RequestID: requestID, + }) + if !ok { + b.Ctx.Log.Debug("unexpectedly called GetAncestorsFailed", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) + return nil + } + + // This node timed out their request, so we can add them back to [fetchFrom] + b.fetchFrom.Add(nodeID) + + // Send another request for this + return b.fetch(ctx, blkID) +} + // markUnavailable removes [nodeID] from the set of peers used to fetch // ancestors. If the set becomes empty, it is reset to the currently preferred // peers so bootstrapping can continue. -func (b *bootstrapper) markUnavailable(nodeID ids.NodeID) { +func (b *Bootstrapper) markUnavailable(nodeID ids.NodeID) { b.fetchFrom.Remove(nodeID) // if [fetchFrom] has become empty, reset it to the currently preferred @@ -412,16 +568,6 @@ func (b *bootstrapper) markUnavailable(nodeID ids.NodeID) { } } -func (b *bootstrapper) Clear(context.Context) error { - b.Ctx.Lock.Lock() - defer b.Ctx.Lock.Unlock() - - if err := b.Config.Blocked.Clear(); err != nil { - return err - } - return b.Config.Blocked.Commit() -} - // process a series of consecutive blocks starting at [blk]. // // - blk is a block that is assumed to have been marked as acceptable by the @@ -432,7 +578,7 @@ func (b *bootstrapper) Clear(context.Context) error { // // If [blk]'s height is <= the last accepted height, then it will be removed // from the missingIDs set. -func (b *bootstrapper) process(ctx context.Context, blk snowman.Block, processingBlocks map[ids.ID]snowman.Block) error { +func (b *Bootstrapper) process(ctx context.Context, blk snowman.Block, processingBlocks map[ids.ID]snowman.Block) error { for { blkID := blk.ID() if b.Halted() { @@ -458,7 +604,7 @@ func (b *bootstrapper) process(ctx context.Context, blk snowman.Block, processin if err := b.Blocked.Commit(); err != nil { return err } - return b.checkFinish(ctx) + return b.tryStartExecuting(ctx) } // If this block is going to be accepted, make sure to update the @@ -484,7 +630,7 @@ func (b *bootstrapper) process(ctx context.Context, blk snowman.Block, processin if err := b.Blocked.Commit(); err != nil { return err } - return b.checkFinish(ctx) + return b.tryStartExecuting(ctx) } // We added a new block to the queue, so track that it was fetched @@ -492,7 +638,7 @@ func (b *bootstrapper) process(ctx context.Context, blk snowman.Block, processin // Periodically log progress blocksFetchedSoFar := b.Blocked.Jobs.PendingJobs() - if blocksFetchedSoFar%common.StatusUpdateFrequency == 0 { + if blocksFetchedSoFar%statusUpdateFrequency == 0 { totalBlocksToFetch := b.tipHeight - b.startingHeight eta := timer.EstimateETA( b.startTime, @@ -501,7 +647,7 @@ func (b *bootstrapper) process(ctx context.Context, blk snowman.Block, processin ) b.fetchETA.Set(float64(eta)) - if !b.Config.SharedCfg.Restarted { + if !b.restarted { b.Ctx.Log.Info("fetching blocks", zap.Uint64("numFetchedBlocks", blocksFetchedSoFar), zap.Uint64("numTotalBlocks", totalBlocksToFetch), @@ -545,22 +691,23 @@ func (b *bootstrapper) process(ctx context.Context, blk snowman.Block, processin if err := b.Blocked.Commit(); err != nil { return err } - return b.checkFinish(ctx) + return b.tryStartExecuting(ctx) } } -// checkFinish repeatedly executes pending transactions and requests new frontier vertices until there aren't any new ones -// after which it finishes the bootstrap process -func (b *bootstrapper) checkFinish(ctx context.Context) error { +// tryStartExecuting executes all pending blocks if there are no more blocks +// being fetched. After executing all pending blocks it will either restart +// bootstrapping, or transition into normal operations. +func (b *Bootstrapper) tryStartExecuting(ctx context.Context) error { if numPending := b.Blocked.NumMissingIDs(); numPending != 0 { return nil } - if b.IsBootstrapped() || b.awaitingTimeout { + if b.Ctx.State.Get().State == snow.NormalOp || b.awaitingTimeout { return nil } - if !b.Config.SharedCfg.Restarted { + if !b.restarted { b.Ctx.Log.Info("executing blocks", zap.Uint64("numPendingJobs", b.Blocked.PendingJobs()), ) @@ -574,7 +721,7 @@ func (b *bootstrapper) checkFinish(ctx context.Context) error { ctx, b.Config.Ctx, b, - b.Config.SharedCfg.Restarted, + b.restarted, b.Ctx.BlockAcceptor, ) if err != nil || b.Halted() { @@ -587,16 +734,14 @@ func (b *bootstrapper) checkFinish(ctx context.Context) error { // Note that executedBlocks < c*previouslyExecuted ( 0 <= c < 1 ) is enforced // so that the bootstrapping process will terminate even as new blocks are // being issued. - if b.Config.RetryBootstrap && executedBlocks > 0 && executedBlocks < previouslyExecuted/2 { - return b.Restart(ctx, true) + if executedBlocks > 0 && executedBlocks < previouslyExecuted/2 { + return b.restartBootstrapping(ctx) } // If there is an additional callback, notify them that this chain has been // synced. if b.Bootstrapped != nil { - b.bootstrappedOnce.Do(func() { - b.Bootstrapped() - }) + b.bootstrappedOnce.Do(b.Bootstrapped) } // Notify the subnet that this chain is synced @@ -605,7 +750,7 @@ func (b *bootstrapper) checkFinish(ctx context.Context) error { // If the subnet hasn't finished bootstrapping, this chain should remain // syncing. if !b.Config.BootstrapTracker.IsBootstrapped() { - if !b.Config.SharedCfg.Restarted { + if !b.restarted { b.Ctx.Log.Info("waiting for the remaining chains in this subnet to finish syncing") } else { b.Ctx.Log.Debug("waiting for the remaining chains in this subnet to finish syncing") @@ -617,5 +762,62 @@ func (b *bootstrapper) checkFinish(ctx context.Context) error { return nil } b.fetchETA.Set(0) - return b.OnFinished(ctx, b.Config.SharedCfg.RequestID) + return b.onFinished(ctx, b.requestID) +} + +func (b *Bootstrapper) Timeout(ctx context.Context) error { + if !b.awaitingTimeout { + return errUnexpectedTimeout + } + b.awaitingTimeout = false + + if !b.Config.BootstrapTracker.IsBootstrapped() { + return b.restartBootstrapping(ctx) + } + b.fetchETA.Set(0) + return b.onFinished(ctx, b.requestID) +} + +func (b *Bootstrapper) restartBootstrapping(ctx context.Context) error { + b.Ctx.Log.Debug("Checking for new frontiers") + b.restarted = true + b.outstandingRequests = bimap.New[common.Request, ids.ID]() + return b.startBootstrapping(ctx) +} + +func (b *Bootstrapper) Notify(_ context.Context, msg common.Message) error { + if msg != common.StateSyncDone { + b.Ctx.Log.Warn("received an unexpected message from the VM", + zap.Stringer("msg", msg), + ) + return nil + } + + b.Ctx.StateSyncing.Set(false) + return nil +} + +func (b *Bootstrapper) HealthCheck(ctx context.Context) (interface{}, error) { + b.Ctx.Lock.Lock() + defer b.Ctx.Lock.Unlock() + + vmIntf, vmErr := b.VM.HealthCheck(ctx) + intf := map[string]interface{}{ + "consensus": struct{}{}, + "vm": vmIntf, + } + return intf, vmErr +} + +func (b *Bootstrapper) Shutdown(ctx context.Context) error { + b.Ctx.Log.Info("shutting down bootstrapper") + + b.Ctx.Lock.Lock() + defer b.Ctx.Lock.Unlock() + + return b.VM.Shutdown(ctx) +} + +func (*Bootstrapper) Gossip(context.Context) error { + return nil } diff --git a/snow/engine/snowman/bootstrap/bootstrapper_test.go b/snow/engine/snowman/bootstrap/bootstrapper_test.go index 620d85b0ba80..08f63f163b80 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrap/bootstrapper_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap @@ -8,6 +8,7 @@ import ( "context" "errors" "testing" + "time" "github.com/prometheus/client_golang/prometheus" @@ -25,6 +26,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" @@ -37,7 +39,8 @@ var errUnknownBlock = errors.New("unknown block") func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *block.TestVM) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() @@ -74,29 +77,22 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *block.Tes require.NoError(startupTracker.Connected(context.Background(), peer, version.CurrentApp)) - commonConfig := common.Config{ + snowGetHandler, err := getter.New(vm, sender, ctx.Log, time.Second, 2000, ctx.Registerer) + require.NoError(err) + + blocker, _ := queue.NewWithMissing(memdb.New(), "", prometheus.NewRegistry()) + return Config{ + AllGetsServer: snowGetHandler, Ctx: ctx, Beacons: vdrs, SampleK: vdrs.Count(ctx.SubnetID), - Alpha: totalWeight/2 + 1, StartupTracker: startupTracker, Sender: sender, BootstrapTracker: bootstrapTracker, Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, - } - - snowGetHandler, err := getter.New(vm, commonConfig) - require.NoError(err) - - blocker, _ := queue.NewWithMissing(memdb.New(), "", prometheus.NewRegistry()) - return Config{ - Config: commonConfig, - AllGetsServer: snowGetHandler, - Blocked: blocker, - VM: vm, + Blocked: blocker, + VM: vm, }, peer, sender, vm } @@ -110,7 +106,8 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { sender.Default(true) vm.Default(true) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) // create boostrapper configuration peers := validators.NewManager() sampleK := 2 @@ -121,28 +118,21 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { startupTracker := tracker.NewStartup(peerTracker, startupAlpha) peers.RegisterCallbackListener(ctx.SubnetID, startupTracker) - commonCfg := common.Config{ + blocker, _ := queue.NewWithMissing(memdb.New(), "", prometheus.NewRegistry()) + snowGetHandler, err := getter.New(vm, sender, ctx.Log, time.Second, 2000, ctx.Registerer) + require.NoError(err) + cfg := Config{ + AllGetsServer: snowGetHandler, Ctx: ctx, Beacons: peers, SampleK: sampleK, - Alpha: alpha, StartupTracker: startupTracker, Sender: sender, BootstrapTracker: &common.BootstrapTrackerTest{}, Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, - } - - blocker, _ := queue.NewWithMissing(memdb.New(), "", prometheus.NewRegistry()) - snowGetHandler, err := getter.New(vm, commonCfg) - require.NoError(err) - cfg := Config{ - Config: commonCfg, - AllGetsServer: snowGetHandler, - Blocked: blocker, - VM: vm, + Blocked: blocker, + VM: vm, } blkID0 := ids.Empty.Prefix(0) @@ -193,7 +183,7 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { // attempt starting bootstrapper with not enough stake connected. Bootstrapper should stall. vdr0 := ids.GenerateTestNodeID() - require.NoError(peers.AddStaker(commonCfg.Ctx.SubnetID, vdr0, nil, ids.Empty, startupAlpha/2)) + require.NoError(peers.AddStaker(ctx.SubnetID, vdr0, nil, ids.Empty, startupAlpha/2)) require.NoError(bs.Connected(context.Background(), vdr0, version.CurrentApp)) require.NoError(bs.Start(context.Background(), 0)) @@ -201,7 +191,7 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { // finally attempt starting bootstrapper with enough stake connected. Frontiers should be requested. vdr := ids.GenerateTestNodeID() - require.NoError(peers.AddStaker(commonCfg.Ctx.SubnetID, vdr, nil, ids.Empty, startupAlpha)) + require.NoError(peers.AddStaker(ctx.SubnetID, vdr, nil, ids.Empty, startupAlpha)) require.NoError(bs.Connected(context.Background(), vdr, version.CurrentApp)) require.True(frontierRequested) } @@ -284,7 +274,7 @@ func TestBootstrapperSingleFrontier(t *testing.T) { return nil, errUnknownBlock } - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) + require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) require.Equal(choices.Accepted, blk1.Status()) } @@ -357,8 +347,6 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { require.NoError(bs.Start(context.Background(), 0)) - acceptedIDs := []ids.ID{blkID2} - parsedBlk1 := false vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -391,31 +379,29 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { return nil, errUnknownBlock } - requestID := new(uint32) + var requestID uint32 sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { require.Equal(peerID, vdr) require.Equal(blkID1, blkID) - *requestID = reqID + requestID = reqID } vm.CantSetState = false - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request blk1 - - oldReqID := *requestID - require.NoError(bs.Ancestors(context.Background(), peerID, *requestID+1, [][]byte{blkBytes1})) // respond with wrong request ID - require.Equal(oldReqID, *requestID) + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID2})) // should request blk1 - require.NoError(bs.Ancestors(context.Background(), ids.NodeID{1, 2, 3}, *requestID, [][]byte{blkBytes1})) // respond from wrong peer - require.Equal(oldReqID, *requestID) + oldReqID := requestID + require.NoError(bs.Ancestors(context.Background(), peerID, requestID, [][]byte{blkBytes0})) // respond with wrong block + require.NotEqual(oldReqID, requestID) - require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes0})) // respond with wrong block - require.NotEqual(oldReqID, *requestID) + require.NoError(bs.Ancestors(context.Background(), peerID, requestID, [][]byte{blkBytes1})) - require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes1})) - require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) require.Equal(choices.Accepted, blk0.Status()) require.Equal(choices.Accepted, blk1.Status()) require.Equal(choices.Accepted, blk2.Status()) + + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID2})) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } // There are multiple needed blocks and Ancestors returns one at a time @@ -547,7 +533,7 @@ func TestBootstrapperPartialFetch(t *testing.T) { requested = blkID } - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request blk2 + require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) // should request blk2 require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes2})) // respond with blk2 require.Equal(blkID1, requested) @@ -555,10 +541,13 @@ func TestBootstrapperPartialFetch(t *testing.T) { require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes1})) // respond with blk1 require.Equal(blkID1, requested) - require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) require.Equal(choices.Accepted, blk0.Status()) require.Equal(choices.Accepted, blk1.Status()) require.Equal(choices.Accepted, blk2.Status()) + + require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } // There are multiple needed blocks and some validators do not have all the blocks @@ -692,16 +681,16 @@ func TestBootstrapperEmptyResponse(t *testing.T) { } // should request blk2 - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) + require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) require.Equal(peerID, requestedVdr) require.Equal(blkID2, requestedBlock) // add another two validators to the fetch set to test behavior on empty response newPeerID := ids.GenerateTestNodeID() - bs.(*bootstrapper).fetchFrom.Add(newPeerID) + bs.fetchFrom.Add(newPeerID) newPeerID = ids.GenerateTestNodeID() - bs.(*bootstrapper).fetchFrom.Add(newPeerID) + bs.fetchFrom.Add(newPeerID) require.NoError(bs.Ancestors(context.Background(), peerID, requestID, [][]byte{blkBytes2})) require.Equal(blkID1, requestedBlock) @@ -715,13 +704,13 @@ func TestBootstrapperEmptyResponse(t *testing.T) { require.NoError(bs.Ancestors(context.Background(), requestedVdr, requestID, [][]byte{blkBytes1})) // respond with blk1 - require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) require.Equal(choices.Accepted, blk0.Status()) require.Equal(choices.Accepted, blk1.Status()) require.Equal(choices.Accepted, blk2.Status()) // check peerToBlacklist was removed from the fetch set - require.NotContains(bs.(*bootstrapper).fetchFrom, peerToBlacklist) + require.NotContains(bs.fetchFrom, peerToBlacklist) } // There are multiple needed blocks and Ancestors returns all at once @@ -853,14 +842,17 @@ func TestBootstrapperAncestors(t *testing.T) { requested = blkID } - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request blk2 + require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) // should request blk2 require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes2, blkBytes1})) // respond with blk2 and blk1 require.Equal(blkID2, requested) - require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) require.Equal(choices.Accepted, blk0.Status()) require.Equal(choices.Accepted, blk1.Status()) require.Equal(choices.Accepted, blk2.Status()) + + require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } func TestBootstrapperFinalized(t *testing.T) { @@ -970,17 +962,20 @@ func TestBootstrapperFinalized(t *testing.T) { requestIDs[blkID] = reqID } - require.NoError(bs.ForceAccepted(context.Background(), []ids.ID{blkID1, blkID2})) // should request blk2 and blk1 + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID1, blkID2})) // should request blk2 and blk1 reqIDBlk2, ok := requestIDs[blkID2] require.True(ok) require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk2, [][]byte{blkBytes2, blkBytes1})) - require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) require.Equal(choices.Accepted, blk0.Status()) require.Equal(choices.Accepted, blk1.Status()) require.Equal(choices.Accepted, blk2.Status()) + + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID2})) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } func TestRestartBootstrapping(t *testing.T) { @@ -1107,7 +1102,7 @@ func TestRestartBootstrapping(t *testing.T) { return nil, errUnknownBlock } - bsIntf, err := New( + bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -1118,8 +1113,6 @@ func TestRestartBootstrapping(t *testing.T) { }, ) require.NoError(err) - require.IsType(&bootstrapper{}, bsIntf) - bs := bsIntf.(*bootstrapper) vm.CantSetState = false require.NoError(bs.Start(context.Background(), 0)) @@ -1131,7 +1124,7 @@ func TestRestartBootstrapping(t *testing.T) { } // Force Accept blk3 - require.NoError(bs.ForceAccepted(context.Background(), []ids.ID{blkID3})) // should request blk3 + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID3})) // should request blk3 reqID, ok := requestIDs[blkID3] require.True(ok) @@ -1140,11 +1133,12 @@ func TestRestartBootstrapping(t *testing.T) { require.Contains(requestIDs, blkID1) - // Remove request, so we can restart bootstrapping via ForceAccepted - require.True(bs.OutstandingRequests.RemoveAny(blkID1)) + // Remove request, so we can restart bootstrapping via startSyncing + _, removed := bs.outstandingRequests.DeleteValue(blkID1) + require.True(removed) requestIDs = map[ids.ID]uint32{} - require.NoError(bs.ForceAccepted(context.Background(), []ids.ID{blkID4})) + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID4})) blk1RequestID, ok := requestIDs[blkID1] require.True(ok) @@ -1157,12 +1151,15 @@ func TestRestartBootstrapping(t *testing.T) { require.NoError(bs.Ancestors(context.Background(), peerID, blk4RequestID, [][]byte{blkBytes4})) - require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) require.Equal(choices.Accepted, blk0.Status()) require.Equal(choices.Accepted, blk1.Status()) require.Equal(choices.Accepted, blk2.Status()) require.Equal(choices.Accepted, blk3.Status()) require.Equal(choices.Accepted, blk4.Status()) + + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID4})) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } func TestBootstrapOldBlockAfterStateSync(t *testing.T) { @@ -1213,7 +1210,7 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { return nil, errUnknownBlock } - bsIntf, err := New( + bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -1224,8 +1221,6 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { }, ) require.NoError(err) - require.IsType(&bootstrapper{}, bsIntf) - bs := bsIntf.(*bootstrapper) vm.CantSetState = false require.NoError(bs.Start(context.Background(), 0)) @@ -1237,7 +1232,7 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { } // Force Accept, the already transitively accepted, blk0 - require.NoError(bs.ForceAccepted(context.Background(), []ids.ID{blk0.ID()})) // should request blk0 + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blk0.ID()})) // should request blk0 reqID, ok := requestIDs[blk0.ID()] require.True(ok) @@ -1284,7 +1279,7 @@ func TestBootstrapContinueAfterHalt(t *testing.T) { return blk0.ID(), nil } - bsIntf, err := New( + bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -1295,8 +1290,6 @@ func TestBootstrapContinueAfterHalt(t *testing.T) { }, ) require.NoError(err) - require.IsType(&bootstrapper{}, bsIntf) - bs := bsIntf.(*bootstrapper) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -1316,7 +1309,7 @@ func TestBootstrapContinueAfterHalt(t *testing.T) { vm.CantSetState = false require.NoError(bs.Start(context.Background(), 0)) - require.NoError(bs.ForceAccepted(context.Background(), []ids.ID{blk2.ID()})) + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blk2.ID()})) require.Equal(1, bs.Blocked.NumMissingIDs()) } @@ -1324,7 +1317,8 @@ func TestBootstrapContinueAfterHalt(t *testing.T) { func TestBootstrapNoParseOnNew(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) peers := validators.NewManager() sender := &common.SenderTest{} @@ -1359,21 +1353,7 @@ func TestBootstrapNoParseOnNew(t *testing.T) { peers.RegisterCallbackListener(ctx.SubnetID, startupTracker) require.NoError(startupTracker.Connected(context.Background(), peer, version.CurrentApp)) - commonConfig := common.Config{ - Ctx: ctx, - Beacons: peers, - SampleK: peers.Count(ctx.SubnetID), - Alpha: totalWeight/2 + 1, - StartupTracker: startupTracker, - Sender: sender, - BootstrapTracker: bootstrapTracker, - Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, - AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, - } - - snowGetHandler, err := getter.New(vm, commonConfig) + snowGetHandler, err := getter.New(vm, sender, ctx.Log, time.Second, 2000, ctx.Registerer) require.NoError(err) queueDB := memdb.New() @@ -1422,10 +1402,17 @@ func TestBootstrapNoParseOnNew(t *testing.T) { require.NoError(err) config := Config{ - Config: commonConfig, - AllGetsServer: snowGetHandler, - Blocked: blocker, - VM: vm, + AllGetsServer: snowGetHandler, + Ctx: ctx, + Beacons: peers, + SampleK: peers.Count(ctx.SubnetID), + StartupTracker: startupTracker, + Sender: sender, + BootstrapTracker: bootstrapTracker, + Timer: &common.TimerTest{}, + AncestorsMaxContainersReceived: 2000, + Blocked: blocker, + VM: vm, } _, err = New( @@ -1440,3 +1427,124 @@ func TestBootstrapNoParseOnNew(t *testing.T) { ) require.NoError(err) } + +func TestBootstrapperReceiveStaleAncestorsMessage(t *testing.T) { + require := require.New(t) + + config, peerID, sender, vm := newConfig(t) + + var ( + blkID0 = ids.GenerateTestID() + blkBytes0 = utils.RandomBytes(1024) + blk0 = &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: blkID0, + StatusV: choices.Accepted, + }, + HeightV: 0, + BytesV: blkBytes0, + } + + blkID1 = ids.GenerateTestID() + blkBytes1 = utils.RandomBytes(1024) + blk1 = &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: blkID1, + StatusV: choices.Processing, + }, + ParentV: blk0.IDV, + HeightV: blk0.HeightV + 1, + BytesV: blkBytes1, + } + + blkID2 = ids.GenerateTestID() + blkBytes2 = utils.RandomBytes(1024) + blk2 = &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: blkID2, + StatusV: choices.Processing, + }, + ParentV: blk1.IDV, + HeightV: blk1.HeightV + 1, + BytesV: blkBytes2, + } + ) + + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return blk0.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + require.Equal(blkID0, blkID) + return blk0, nil + } + bs, err := New( + config, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + }, + ) + require.NoError(err) + + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) + + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + switch blkID { + case blkID0: + return blk0, nil + case blkID1: + if blk1.StatusV == choices.Accepted { + return blk1, nil + } + return nil, database.ErrNotFound + case blkID2: + if blk2.StatusV == choices.Accepted { + return blk2, nil + } + return nil, database.ErrNotFound + default: + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound + } + } + vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { + switch { + case bytes.Equal(blkBytes, blkBytes0): + return blk0, nil + case bytes.Equal(blkBytes, blkBytes1): + return blk1, nil + case bytes.Equal(blkBytes, blkBytes2): + return blk2, nil + default: + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock + } + } + + requestIDs := map[ids.ID]uint32{} + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, vdr) + requestIDs[blkID] = reqID + } + + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID1, blkID2})) // should request blk2 and blk1 + + reqIDBlk1, ok := requestIDs[blkID1] + require.True(ok) + reqIDBlk2, ok := requestIDs[blkID2] + require.True(ok) + + require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk2, [][]byte{blkBytes2, blkBytes1})) + + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) + + require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk1, [][]byte{blkBytes1})) + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) +} diff --git a/snow/engine/snowman/bootstrap/config.go b/snow/engine/snowman/bootstrap/config.go index 0c05feb7dfa5..6fb8894db96f 100644 --- a/snow/engine/snowman/bootstrap/config.go +++ b/snow/engine/snowman/bootstrap/config.go @@ -1,18 +1,33 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap import ( + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/queue" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/validators" ) type Config struct { - common.Config common.AllGetsServer + Ctx *snow.ConsensusContext + Beacons validators.Manager + + SampleK int + StartupTracker tracker.Startup + Sender common.Sender + BootstrapTracker common.BootstrapTracker + Timer common.Timer + + // This node will only consider the first [AncestorsMaxContainersReceived] + // containers in an ancestors message it receives. + AncestorsMaxContainersReceived int + // Blocked tracks operations that are blocked on blocks // // It should be guaranteed that `MissingIDs` should contain all IDs diff --git a/snow/engine/snowman/bootstrap/metrics.go b/snow/engine/snowman/bootstrap/metrics.go index 9359ecfadb19..f6ad90d16419 100644 --- a/snow/engine/snowman/bootstrap/metrics.go +++ b/snow/engine/snowman/bootstrap/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap diff --git a/snow/engine/snowman/config.go b/snow/engine/snowman/config.go index ed63af2f4936..3162471a2476 100644 --- a/snow/engine/snowman/config.go +++ b/snow/engine/snowman/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" ) @@ -16,11 +17,12 @@ import ( type Config struct { common.AllGetsServer - Ctx *snow.ConsensusContext - VM block.ChainVM - Sender common.Sender - Validators validators.Manager - Params snowball.Parameters - Consensus snowman.Consensus - PartialSync bool + Ctx *snow.ConsensusContext + VM block.ChainVM + Sender common.Sender + Validators validators.Manager + ConnectedValidators tracker.Peers + Params snowball.Parameters + Consensus snowman.Consensus + PartialSync bool } diff --git a/snow/engine/snowman/config_test.go b/snow/engine/snowman/config_test.go index 54d9536a4884..fe66256c68db 100644 --- a/snow/engine/snowman/config_test.go +++ b/snow/engine/snowman/config_test.go @@ -1,23 +1,29 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( + "testing" + "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" ) -func DefaultConfigs() Config { - commonCfg := common.DefaultConfigTest() +func DefaultConfig(t testing.TB) Config { + ctx := snowtest.Context(t, snowtest.PChainID) + return Config{ - Ctx: commonCfg.Ctx, - Sender: commonCfg.Sender, - Validators: validators.NewManager(), - VM: &block.TestVM{}, + Ctx: snowtest.ConsensusContext(ctx), + VM: &block.TestVM{}, + Sender: &common.SenderTest{}, + Validators: validators.NewManager(), + ConnectedValidators: tracker.NewPeers(), Params: snowball.Parameters{ K: 1, AlphaPreference: 1, diff --git a/snow/engine/snowman/engine.go b/snow/engine/snowman/engine.go index 37985f5b48fa..b5e3fb1020e3 100644 --- a/snow/engine/snowman/engine.go +++ b/snow/engine/snowman/engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/snow/engine/snowman/getter/getter.go b/snow/engine/snowman/getter/getter.go index a8cb405d57ce..ff8fe13f8fe9 100644 --- a/snow/engine/snowman/getter/getter.go +++ b/snow/engine/snowman/getter/getter.go @@ -1,10 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package getter import ( "context" + "time" + + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" @@ -15,6 +18,7 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/metric" + "github.com/ava-labs/avalanchego/utils/set" ) // Get requests are always served, regardless node state (bootstrapping or normal operations). @@ -22,15 +26,20 @@ var _ common.AllGetsServer = (*getter)(nil) func New( vm block.ChainVM, - commonCfg common.Config, + sender common.Sender, + log logging.Logger, + maxTimeGetAncestors time.Duration, + maxContainersGetAncestors int, + reg prometheus.Registerer, ) (common.AllGetsServer, error) { ssVM, _ := vm.(block.StateSyncableVM) gh := &getter{ - vm: vm, - ssVM: ssVM, - sender: commonCfg.Sender, - cfg: commonCfg, - log: commonCfg.Ctx.Log, + vm: vm, + ssVM: ssVM, + sender: sender, + log: log, + maxTimeGetAncestors: maxTimeGetAncestors, + maxContainersGetAncestors: maxContainersGetAncestors, } var err error @@ -38,18 +47,23 @@ func New( "bs", "get_ancestors_blks", "blocks fetched in a call to GetAncestors", - commonCfg.Ctx.Registerer, + reg, ) return gh, err } type getter struct { - vm block.ChainVM - ssVM block.StateSyncableVM // can be nil + vm block.ChainVM + ssVM block.StateSyncableVM // can be nil + sender common.Sender - cfg common.Config + log logging.Logger + // Max time to spend fetching a container and its ancestors when responding + // to a GetAncestors + maxTimeGetAncestors time.Duration + // Max number of containers in an ancestors message sent by this node. + maxContainersGetAncestors int - log logging.Logger getAncestorsBlks metric.Averager } @@ -81,10 +95,10 @@ func (gh *getter) GetStateSummaryFrontier(ctx context.Context, nodeID ids.NodeID return nil } -func (gh *getter) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, heights []uint64) error { +func (gh *getter) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, heights set.Set[uint64]) error { // If there are no requested heights, then we can return the result // immediately, regardless of if the underlying VM implements state sync. - if len(heights) == 0 { + if heights.Len() == 0 { gh.sender.SendAcceptedStateSummary(ctx, nodeID, requestID, nil) return nil } @@ -101,8 +115,8 @@ func (gh *getter) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID return nil } - summaryIDs := make([]ids.ID, 0, len(heights)) - for _, height := range heights { + summaryIDs := make([]ids.ID, 0, heights.Len()) + for height := range heights { summary, err := gh.ssVM.GetStateSummary(ctx, height) if err == block.ErrStateSyncableVMNotImplemented { gh.log.Debug("dropping GetAcceptedStateSummary message", @@ -135,9 +149,9 @@ func (gh *getter) GetAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, re return nil } -func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - acceptedIDs := make([]ids.ID, 0, len(containerIDs)) - for _, blkID := range containerIDs { +func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { + acceptedIDs := make([]ids.ID, 0, containerIDs.Len()) + for blkID := range containerIDs { blk, err := gh.vm.GetBlock(ctx, blkID) if err == nil && blk.Status() == choices.Accepted { acceptedIDs = append(acceptedIDs, blkID) @@ -153,9 +167,9 @@ func (gh *getter) GetAncestors(ctx context.Context, nodeID ids.NodeID, requestID gh.log, gh.vm, blkID, - gh.cfg.AncestorsMaxContainersSent, + gh.maxContainersGetAncestors, constants.MaxContainersLen, - gh.cfg.MaxTimeGetAncestors, + gh.maxTimeGetAncestors, ) if err != nil { gh.log.Verbo("dropping GetAncestors message", diff --git a/snow/engine/snowman/getter/getter_test.go b/snow/engine/snowman/getter/getter_test.go index 35a0e11f9ebb..4fc03d4795d6 100644 --- a/snow/engine/snowman/getter/getter_test.go +++ b/snow/engine/snowman/getter/getter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package getter @@ -7,108 +7,64 @@ import ( "context" "errors" "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" - "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" ) var errUnknownBlock = errors.New("unknown block") type StateSyncEnabledMock struct { *block.TestVM - *mocks.MockStateSyncableVM + *block.MockStateSyncableVM } -func testSetup( - t *testing.T, - ctrl *gomock.Controller, -) (StateSyncEnabledMock, *common.SenderTest, common.Config) { - ctx := snow.DefaultConsensusContextTest() +func newTest(t *testing.T) (common.AllGetsServer, StateSyncEnabledMock, *common.SenderTest) { + ctrl := gomock.NewController(t) - peers := validators.NewManager() - sender := &common.SenderTest{} vm := StateSyncEnabledMock{ TestVM: &block.TestVM{}, - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } - sender.T = t - - sender.Default(true) - - isBootstrapped := false - bootstrapTracker := &common.BootstrapTrackerTest{ + sender := &common.SenderTest{ T: t, - IsBootstrappedF: func() bool { - return isBootstrapped - }, - BootstrappedF: func(ids.ID) { - isBootstrapped = true - }, } + sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - - peer := ids.GenerateTestNodeID() - require.NoError(t, peers.AddStaker(ctx.SubnetID, peer, nil, ids.Empty, 1)) - totalWeight, err := peers.TotalWeight(ctx.SubnetID) + bs, err := New( + vm, + sender, + logging.NoLog{}, + time.Second, + 2000, + prometheus.NewRegistry(), + ) require.NoError(t, err) - commonConfig := common.Config{ - Ctx: ctx, - Beacons: peers, - SampleK: peers.Count(ctx.SubnetID), - Alpha: totalWeight/2 + 1, - Sender: sender, - BootstrapTracker: bootstrapTracker, - Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, - AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, - } - - return vm, sender, commonConfig + return bs, vm, sender } func TestAcceptedFrontier(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - - vm, sender, config := testSetup(t, ctrl) + bs, vm, sender := newTest(t) blkID := ids.GenerateTestID() - - dummyBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID, - StatusV: choices.Accepted, - }, - HeightV: 0, - BytesV: []byte{1, 2, 3}, - } - vm.CantLastAccepted = false vm.LastAcceptedF = func(context.Context) (ids.ID, error) { return blkID, nil } - vm.GetBlockF = func(_ context.Context, bID ids.ID) (snowman.Block, error) { - require.Equal(blkID, bID) - return dummyBlk, nil - } - - bsIntf, err := New(vm, config) - require.NoError(err) - require.IsType(&getter{}, bsIntf) - bs := bsIntf.(*getter) var accepted ids.ID sender.SendAcceptedFrontierF = func(_ context.Context, _ ids.NodeID, _ uint32, containerID ids.ID) { @@ -121,9 +77,7 @@ func TestAcceptedFrontier(t *testing.T) { func TestFilterAccepted(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - - vm, sender, config := testSetup(t, ctrl) + bs, vm, sender := newTest(t) blkID0 := ids.GenerateTestID() blkID1 := ids.GenerateTestID() @@ -138,21 +92,6 @@ func TestFilterAccepted(t *testing.T) { StatusV: choices.Accepted, }} - vm.CantLastAccepted = false - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return blk1.ID(), nil - } - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(blk1.ID(), blkID) - return blk1, nil - } - - bsIntf, err := New(vm, config) - require.NoError(err) - require.IsType(&getter{}, bsIntf) - bs := bsIntf.(*getter) - - blkIDs := []ids.ID{blkID0, blkID1, blkID2} vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case blkID0: @@ -171,6 +110,7 @@ func TestFilterAccepted(t *testing.T) { accepted = frontier } + blkIDs := set.Of(blkID0, blkID1, blkID2) require.NoError(bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, blkIDs)) require.Len(accepted, 2) diff --git a/snow/engine/snowman/issuer.go b/snow/engine/snowman/issuer.go index ca69064105e1..d952dfe2cc6b 100644 --- a/snow/engine/snowman/issuer.go +++ b/snow/engine/snowman/issuer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -6,6 +6,8 @@ package snowman import ( "context" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/utils/set" @@ -13,11 +15,13 @@ import ( // issuer issues [blk] into to consensus after its dependencies are met. type issuer struct { - t *Transitive - blk snowman.Block - abandoned bool - deps set.Set[ids.ID] - push bool + t *Transitive + nodeID ids.NodeID // nodeID of the peer that provided this block + blk snowman.Block + issuedMetric prometheus.Counter + abandoned bool + deps set.Set[ids.ID] + push bool } func (i *issuer) Dependencies() set.Set[ids.ID] { @@ -51,5 +55,5 @@ func (i *issuer) Update(ctx context.Context) { return } // Issue the block into consensus - i.t.errs.Add(i.t.deliver(ctx, i.blk, i.push)) + i.t.errs.Add(i.t.deliver(ctx, i.nodeID, i.blk, i.push, i.issuedMetric)) } diff --git a/snow/engine/snowman/memory_block.go b/snow/engine/snowman/memory_block.go index c3b476b9f496..d91118afa5b3 100644 --- a/snow/engine/snowman/memory_block.go +++ b/snow/engine/snowman/memory_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/snow/engine/snowman/metrics.go b/snow/engine/snowman/metrics.go index ae7cc66cfbfb..5dd65d8afa14 100644 --- a/snow/engine/snowman/metrics.go +++ b/snow/engine/snowman/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -10,6 +10,14 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) +const ( + pullGossipSource = "pull_gossip" + pushGossipSource = "push_gossip" + putGossipSource = "put_gossip" + builtSource = "built" + unknownSource = "unknown" +) + type metrics struct { bootstrapFinished prometheus.Gauge numRequests prometheus.Gauge @@ -27,6 +35,8 @@ type metrics struct { numProcessingAncestorFetchesUnneeded prometheus.Counter getAncestorsBlks metric.Averager selectedVoteIndex metric.Averager + issuerStake metric.Averager + issued *prometheus.CounterVec } func (m *metrics) Initialize(namespace string, reg prometheus.Registerer) error { @@ -115,6 +125,25 @@ func (m *metrics) Initialize(namespace string, reg prometheus.Registerer) error reg, &errs, ) + m.issuerStake = metric.NewAveragerWithErrs( + namespace, + "issuer_stake", + "stake weight of the peer who provided a block that was issued into consensus", + reg, + &errs, + ) + m.issued = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "blks_issued", + Help: "number of blocks that have been issued into consensus by discovery mechanism", + }, []string{"source"}) + + // Register the labels + m.issued.WithLabelValues(pullGossipSource) + m.issued.WithLabelValues(pushGossipSource) + m.issued.WithLabelValues(putGossipSource) + m.issued.WithLabelValues(builtSource) + m.issued.WithLabelValues(unknownSource) errs.Add( reg.Register(m.bootstrapFinished), @@ -131,6 +160,7 @@ func (m *metrics) Initialize(namespace string, reg prometheus.Registerer) error reg.Register(m.numProcessingAncestorFetchesDropped), reg.Register(m.numProcessingAncestorFetchesSucceeded), reg.Register(m.numProcessingAncestorFetchesUnneeded), + reg.Register(m.issued), ) return errs.Err } diff --git a/snow/engine/snowman/syncer/config.go b/snow/engine/snowman/syncer/config.go index 4e10d412f38a..b5fae133a376 100644 --- a/snow/engine/snowman/syncer/config.go +++ b/snow/engine/snowman/syncer/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package syncer @@ -7,15 +7,22 @@ import ( "fmt" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/math" ) type Config struct { - common.Config common.AllGetsServer + Ctx *snow.ConsensusContext + + StartupTracker tracker.Startup + Sender common.Sender + // SampleK determines the number of nodes to attempt to fetch the latest // state sync summary from. In order for a round of voting to succeed, there // must be at least one correct node sampled. @@ -33,18 +40,18 @@ type Config struct { } func NewConfig( - commonCfg common.Config, - stateSyncerIDs []ids.NodeID, snowGetHandler common.AllGetsServer, + ctx *snow.ConsensusContext, + startupTracker tracker.Startup, + sender common.Sender, + beacons validators.Manager, + sampleK int, + alpha uint64, + stateSyncerIDs []ids.NodeID, vm block.ChainVM, ) (Config, error) { - // Initialize the default values that will be used if stateSyncerIDs is - // empty. - var ( - stateSyncBeacons = commonCfg.Beacons - syncAlpha = commonCfg.Alpha - syncSampleK = commonCfg.SampleK - ) + // Initialize the beacons that will be used if stateSyncerIDs is empty. + stateSyncBeacons := beacons // If the user has manually provided state syncer IDs, then override the // state sync beacons to them. @@ -52,24 +59,24 @@ func NewConfig( stateSyncBeacons = validators.NewManager() for _, peerID := range stateSyncerIDs { // Invariant: We never use the TxID or BLS keys populated here. - if err := stateSyncBeacons.AddStaker(commonCfg.Ctx.SubnetID, peerID, nil, ids.Empty, 1); err != nil { + if err := stateSyncBeacons.AddStaker(ctx.SubnetID, peerID, nil, ids.Empty, 1); err != nil { return Config{}, err } } - stateSyncingWeight, err := stateSyncBeacons.TotalWeight(commonCfg.Ctx.SubnetID) + stateSyncingWeight, err := stateSyncBeacons.TotalWeight(ctx.SubnetID) if err != nil { - return Config{}, fmt.Errorf("failed to calculate total weight of state sync beacons for subnet %s: %w", commonCfg.Ctx.SubnetID, err) - } - if uint64(syncSampleK) > stateSyncingWeight { - syncSampleK = int(stateSyncingWeight) + return Config{}, fmt.Errorf("failed to calculate total weight of state sync beacons for subnet %s: %w", ctx.SubnetID, err) } - syncAlpha = stateSyncingWeight/2 + 1 // must be > 50% + sampleK = int(math.Min(uint64(sampleK), stateSyncingWeight)) + alpha = stateSyncingWeight/2 + 1 // must be > 50% } return Config{ - Config: commonCfg, AllGetsServer: snowGetHandler, - SampleK: syncSampleK, - Alpha: syncAlpha, + Ctx: ctx, + StartupTracker: startupTracker, + Sender: sender, + SampleK: sampleK, + Alpha: alpha, StateSyncBeacons: stateSyncBeacons, VM: vm, }, nil diff --git a/snow/engine/snowman/syncer/state_syncer.go b/snow/engine/snowman/syncer/state_syncer.go index 87e6d1786173..bc549a0ce93a 100644 --- a/snow/engine/snowman/syncer/state_syncer.go +++ b/snow/engine/snowman/syncer/state_syncer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package syncer @@ -24,6 +24,10 @@ import ( safemath "github.com/ava-labs/avalanchego/utils/math" ) +// maxOutstandingBroadcastRequests is the maximum number of requests to have +// outstanding when broadcasting. +const maxOutstandingBroadcastRequests = 50 + var _ common.StateSyncer = (*stateSyncer)(nil) // summary content as received from network, along with accumulated weight. @@ -84,9 +88,6 @@ type stateSyncer struct { // we keep a list of deduplicated height ready for voting summariesHeights set.Set[uint64] uniqueSummariesHeights []uint64 - - // number of times the state sync has been attempted - attempts int } func New( @@ -108,6 +109,57 @@ func New( } } +func (ss *stateSyncer) Context() *snow.ConsensusContext { + return ss.Ctx +} + +func (ss *stateSyncer) Start(ctx context.Context, startReqID uint32) error { + ss.Ctx.Log.Info("starting state sync") + + ss.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.StateSyncing, + }) + if err := ss.VM.SetState(ctx, snow.StateSyncing); err != nil { + return fmt.Errorf("failed to notify VM that state syncing has started: %w", err) + } + + ss.requestID = startReqID + + return ss.tryStartSyncing(ctx) +} + +func (ss *stateSyncer) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { + if err := ss.VM.Connected(ctx, nodeID, nodeVersion); err != nil { + return err + } + + if err := ss.StartupTracker.Connected(ctx, nodeID, nodeVersion); err != nil { + return err + } + + return ss.tryStartSyncing(ctx) +} + +func (ss *stateSyncer) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + if err := ss.VM.Disconnected(ctx, nodeID); err != nil { + return err + } + + return ss.StartupTracker.Disconnected(ctx, nodeID) +} + +// tryStartSyncing will start syncing the first time it is called while the +// startupTracker is reporting that the protocol should start. +func (ss *stateSyncer) tryStartSyncing(ctx context.Context) error { + if ss.started || !ss.StartupTracker.ShouldStart() { + return nil + } + + ss.started = true + return ss.startup(ctx) +} + func (ss *stateSyncer) StateSummaryFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryBytes []byte) error { // ignores any late responses if requestID != ss.requestID { @@ -207,15 +259,11 @@ func (ss *stateSyncer) receivedStateSummaryFrontier(ctx context.Context) error { frontierStake := frontiersTotalWeight - failedBeaconWeight if float64(frontierStake) < frontierAlpha { - ss.Ctx.Log.Debug("didn't receive enough frontiers", + ss.Ctx.Log.Debug("restarting state sync", + zap.String("reason", "didn't receive enough frontiers"), zap.Int("numFailedValidators", ss.failedSeeders.Len()), - zap.Int("numStateSyncAttempts", ss.attempts), ) - - if ss.Config.RetryBootstrap { - ss.Ctx.Log.Debug("restarting state sync") - return ss.restart(ctx) - } + return ss.startup(ctx) } ss.requestID++ @@ -223,7 +271,7 @@ func (ss *stateSyncer) receivedStateSummaryFrontier(ctx context.Context) error { return nil } -func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { +func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs set.Set[ids.ID]) error { // ignores any late responses if requestID != ss.requestID { ss.Ctx.Log.Debug("received out-of-sync AcceptedStateSummary message", @@ -248,10 +296,10 @@ func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.Node ss.Ctx.Log.Debug("adding weight to summaries", zap.Stringer("nodeID", nodeID), zap.Stringer("subnetID", ss.Ctx.SubnetID), - zap.Stringers("summaryIDs", summaryIDs), + zap.Reflect("summaryIDs", summaryIDs), zap.Uint64("nodeWeight", nodeWeight), ) - for _, summaryID := range summaryIDs { + for summaryID := range summaryIDs { ws, ok := ss.weightedSummaries[summaryID] if !ok { ss.Ctx.Log.Debug("skipping summary", @@ -326,14 +374,13 @@ func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.Node return fmt.Errorf("failed to get total weight of state sync beacons for subnet %s: %w", ss.Ctx.SubnetID, err) } votingStakes := beaconsTotalWeight - failedVotersWeight - if ss.Config.RetryBootstrap && votingStakes < ss.Alpha { + if votingStakes < ss.Alpha { ss.Ctx.Log.Debug("restarting state sync", zap.String("reason", "not enough votes received"), zap.Int("numBeacons", ss.StateSyncBeacons.Count(ss.Ctx.SubnetID)), zap.Int("numFailedSyncers", ss.failedVoters.Len()), - zap.Int("numAttempts", ss.attempts), ) - return ss.restart(ctx) + return ss.startup(ctx) } ss.Ctx.Log.Info("skipping state sync", @@ -422,27 +469,6 @@ func (ss *stateSyncer) GetAcceptedStateSummaryFailed(ctx context.Context, nodeID return ss.AcceptedStateSummary(ctx, nodeID, requestID, nil) } -func (ss *stateSyncer) Start(ctx context.Context, startReqID uint32) error { - ss.Ctx.Log.Info("starting state sync") - - ss.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, - State: snow.StateSyncing, - }) - if err := ss.VM.SetState(ctx, snow.StateSyncing); err != nil { - return fmt.Errorf("failed to notify VM that state syncing has started: %w", err) - } - - ss.requestID = startReqID - - if !ss.StartupTracker.ShouldStart() { - return nil - } - - ss.started = true - return ss.startup(ctx) -} - // startup do start the whole state sync process by // sampling frontier seeders, listing state syncers to request votes to // and reaching out frontier seeders if any. Otherwise, it moves immediately @@ -484,9 +510,7 @@ func (ss *stateSyncer) startup(ctx context.Context) error { } // list all beacons, to reach them for voting on frontier - for _, nodeID := range ss.StateSyncBeacons.GetValidatorIDs(ss.Ctx.SubnetID) { - ss.targetVoters.Add(nodeID) - } + ss.targetVoters.Add(ss.StateSyncBeacons.GetValidatorIDs(ss.Ctx.SubnetID)...) // check if there is an ongoing state sync; if so add its state summary // to the frontier to request votes on @@ -509,7 +533,6 @@ func (ss *stateSyncer) startup(ctx context.Context) error { } // initiate messages exchange - ss.attempts++ if ss.targetSeeders.Len() == 0 { ss.Ctx.Log.Info("State syncing skipped due to no provided syncers") return ss.onDoneStateSyncing(ctx, ss.requestID) @@ -520,22 +543,12 @@ func (ss *stateSyncer) startup(ctx context.Context) error { return nil } -func (ss *stateSyncer) restart(ctx context.Context) error { - if ss.attempts > 0 && ss.attempts%ss.RetryBootstrapWarnFrequency == 0 { - ss.Ctx.Log.Debug("check internet connection", - zap.Int("numSyncAttempts", ss.attempts), - ) - } - - return ss.startup(ctx) -} - // Ask up to [common.MaxOutstandingBroadcastRequests] state sync validators at a time // to send their accepted state summary. It is called again until there are // no more seeders to be reached in the pending set func (ss *stateSyncer) sendGetStateSummaryFrontiers(ctx context.Context) { vdrs := set.NewSet[ids.NodeID](1) - for ss.targetSeeders.Len() > 0 && ss.pendingSeeders.Len() < common.MaxOutstandingBroadcastRequests { + for ss.targetSeeders.Len() > 0 && ss.pendingSeeders.Len() < maxOutstandingBroadcastRequests { vdr, _ := ss.targetSeeders.Pop() vdrs.Add(vdr) ss.pendingSeeders.Add(vdr) @@ -551,7 +564,7 @@ func (ss *stateSyncer) sendGetStateSummaryFrontiers(ctx context.Context) { // no more voters to be reached in the pending set. func (ss *stateSyncer) sendGetAcceptedStateSummaries(ctx context.Context) { vdrs := set.NewSet[ids.NodeID](1) - for ss.targetVoters.Len() > 0 && ss.pendingVoters.Len() < common.MaxOutstandingBroadcastRequests { + for ss.targetVoters.Len() > 0 && ss.pendingVoters.Len() < maxOutstandingBroadcastRequests { vdr, _ := ss.targetVoters.Pop() vdrs.Add(vdr) ss.pendingVoters.Add(vdr) @@ -578,31 +591,6 @@ func (ss *stateSyncer) Notify(ctx context.Context, msg common.Message) error { return ss.onDoneStateSyncing(ctx, ss.requestID) } -func (ss *stateSyncer) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { - if err := ss.VM.Connected(ctx, nodeID, nodeVersion); err != nil { - return err - } - - if err := ss.StartupTracker.Connected(ctx, nodeID, nodeVersion); err != nil { - return err - } - - if ss.started || !ss.StartupTracker.ShouldStart() { - return nil - } - - ss.started = true - return ss.startup(ctx) -} - -func (ss *stateSyncer) Disconnected(ctx context.Context, nodeID ids.NodeID) error { - if err := ss.VM.Disconnected(ctx, nodeID); err != nil { - return err - } - - return ss.StartupTracker.Disconnected(ctx, nodeID) -} - func (*stateSyncer) Gossip(context.Context) error { return nil } @@ -634,10 +622,6 @@ func (ss *stateSyncer) HealthCheck(ctx context.Context) (interface{}, error) { return intf, vmErr } -func (ss *stateSyncer) GetVM() common.VM { - return ss.VM -} - func (ss *stateSyncer) IsEnabled(ctx context.Context) (bool, error) { if ss.stateSyncVM == nil { // state sync is not implemented diff --git a/snow/engine/snowman/syncer/state_syncer_test.go b/snow/engine/snowman/syncer/state_syncer_test.go index 47a00e744471..11faeae69f67 100644 --- a/snow/engine/snowman/syncer/state_syncer_test.go +++ b/snow/engine/snowman/syncer/state_syncer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package syncer @@ -9,16 +9,20 @@ import ( "errors" "math" "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" + "github.com/ava-labs/avalanchego/snow/snowtest" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" @@ -35,20 +39,25 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { require := require.New(t) // Build state syncer + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) sender := &common.SenderTest{T: t} - commonCfg := &common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Sender: sender, - } // Non state syncableVM case nonStateSyncableVM := &block.TestVM{ TestVM: common.TestVM{T: t}, } - dummyGetter, err := getter.New(nonStateSyncableVM, *commonCfg) + dummyGetter, err := getter.New( + nonStateSyncableVM, + sender, + logging.NoLog{}, + time.Second, + 2000, + prometheus.NewRegistry(), + ) require.NoError(err) - cfg, err := NewConfig(*commonCfg, nil, dummyGetter, nonStateSyncableVM) + cfg, err := NewConfig(dummyGetter, ctx, nil, sender, nil, 0, 0, nil, nonStateSyncableVM) require.NoError(err) syncer := New(cfg, func(context.Context, uint32) error { return nil @@ -59,8 +68,6 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { require.False(enabled) // State syncableVM case - commonCfg.Ctx = snow.DefaultConsensusContextTest() // reset metrics - fullVM := &fullVM{ TestVM: &block.TestVM{ TestVM: common.TestVM{T: t}, @@ -69,10 +76,16 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { T: t, }, } - dummyGetter, err = getter.New(fullVM, *commonCfg) + dummyGetter, err = getter.New( + fullVM, + sender, + logging.NoLog{}, + time.Second, + 2000, + prometheus.NewRegistry()) require.NoError(err) - cfg, err = NewConfig(*commonCfg, nil, dummyGetter, fullVM) + cfg, err = NewConfig(dummyGetter, ctx, nil, sender, nil, 0, 0, nil, fullVM) require.NoError(err) syncer = New(cfg, func(context.Context, uint32) error { return nil @@ -97,73 +110,61 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { func TestStateSyncingStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() - vdrs := buildTestPeers(t, ctx.SubnetID) - alpha, err := vdrs.TotalWeight(ctx.SubnetID) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + alpha, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startupAlpha := alpha peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), - Alpha: alpha, - StartupTracker: startup, - } - syncer, _, sender := buildTestsObjects(t, &commonCfg) + syncer, _, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) sender.CantSendGetStateSummaryFrontier = true sender.SendGetStateSummaryFrontierF = func(context.Context, set.Set[ids.NodeID], uint32) {} startReqID := uint32(0) // attempt starting bootstrapper with no stake connected. Bootstrapper should stall. - require.False(commonCfg.StartupTracker.ShouldStart()) + require.False(startup.ShouldStart()) require.NoError(syncer.Start(context.Background(), startReqID)) require.False(syncer.started) // attempt starting bootstrapper with not enough stake connected. Bootstrapper should stall. vdr0 := ids.GenerateTestNodeID() - require.NoError(vdrs.AddStaker(ctx.SubnetID, vdr0, nil, ids.Empty, startupAlpha/2)) + require.NoError(beacons.AddStaker(ctx.SubnetID, vdr0, nil, ids.Empty, startupAlpha/2)) require.NoError(syncer.Connected(context.Background(), vdr0, version.CurrentApp)) - require.False(commonCfg.StartupTracker.ShouldStart()) + require.False(startup.ShouldStart()) require.NoError(syncer.Start(context.Background(), startReqID)) require.False(syncer.started) // finally attempt starting bootstrapper with enough stake connected. Frontiers should be requested. vdr := ids.GenerateTestNodeID() - require.NoError(vdrs.AddStaker(ctx.SubnetID, vdr, nil, ids.Empty, startupAlpha)) + require.NoError(beacons.AddStaker(ctx.SubnetID, vdr, nil, ids.Empty, startupAlpha)) require.NoError(syncer.Connected(context.Background(), vdr, version.CurrentApp)) - require.True(commonCfg.StartupTracker.ShouldStart()) + require.True(startup.ShouldStart()) require.NoError(syncer.Start(context.Background(), startReqID)) require.True(syncer.started) } func TestStateSyncLocalSummaryIsIncludedAmongFrontiersIfAvailable(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() - vdrs := buildTestPeers(t, ctx.SubnetID) - totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), - Alpha: (totalWeight + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, _ := buildTestsObjects(t, &commonCfg) + syncer, fullVM, _ := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // mock VM to simulate a valid summary is returned localSummary := &block.TestStateSummary{ @@ -177,7 +178,7 @@ func TestStateSyncLocalSummaryIsIncludedAmongFrontiersIfAvailable(t *testing.T) } // Connect enough stake to start syncer - for _, nodeID := range vdrs.GetValidatorIDs(ctx.SubnetID) { + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } @@ -190,24 +191,18 @@ func TestStateSyncLocalSummaryIsIncludedAmongFrontiersIfAvailable(t *testing.T) func TestStateSyncNotFoundOngoingSummaryIsNotIncludedAmongFrontiers(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() - vdrs := buildTestPeers(t, ctx.SubnetID) - totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), - Alpha: (totalWeight + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, _ := buildTestsObjects(t, &commonCfg) + syncer, fullVM, _ := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // mock VM to simulate a no summary returned fullVM.CantStateSyncGetOngoingSummary = true @@ -216,7 +211,7 @@ func TestStateSyncNotFoundOngoingSummaryIsNotIncludedAmongFrontiers(t *testing.T } // Connect enough stake to start syncer - for _, nodeID := range vdrs.GetValidatorIDs(ctx.SubnetID) { + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } @@ -227,24 +222,18 @@ func TestStateSyncNotFoundOngoingSummaryIsNotIncludedAmongFrontiers(t *testing.T func TestBeaconsAreReachedForFrontiersUponStartup(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() - vdrs := buildTestPeers(t, ctx.SubnetID) - totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), - Alpha: (totalWeight + 1) / 2, - StartupTracker: startup, - } - syncer, _, sender := buildTestsObjects(t, &commonCfg) + syncer, _, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := set.NewSet[ids.NodeID](3) @@ -254,12 +243,12 @@ func TestBeaconsAreReachedForFrontiersUponStartup(t *testing.T) { } // Connect enough stake to start syncer - for _, nodeID := range vdrs.GetValidatorIDs(ctx.SubnetID) { + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } // check that vdrs are reached out for frontiers - require.Len(contactedFrontiersProviders, safemath.Min(vdrs.Count(ctx.SubnetID), common.MaxOutstandingBroadcastRequests)) + require.Len(contactedFrontiersProviders, safemath.Min(beacons.Count(ctx.SubnetID), maxOutstandingBroadcastRequests)) for beaconID := range contactedFrontiersProviders { // check that beacon is duly marked as reached out require.Contains(syncer.pendingSeeders, beaconID) @@ -272,24 +261,18 @@ func TestBeaconsAreReachedForFrontiersUponStartup(t *testing.T) { func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() - vdrs := buildTestPeers(t, ctx.SubnetID) - totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), - Alpha: (totalWeight + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -301,13 +284,13 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { } // Connect enough stake to start syncer - for _, nodeID := range vdrs.GetValidatorIDs(ctx.SubnetID) { + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) require.Positive(initiallyReachedOutBeaconsSize) - require.LessOrEqual(initiallyReachedOutBeaconsSize, common.MaxOutstandingBroadcastRequests) + require.LessOrEqual(initiallyReachedOutBeaconsSize, maxOutstandingBroadcastRequests) // mock VM to simulate a valid summary is returned fullVM.CantParseStateSummary = true @@ -362,30 +345,24 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { // other listed vdrs are reached for data require.True( len(contactedFrontiersProviders) > initiallyReachedOutBeaconsSize || - len(contactedFrontiersProviders) == vdrs.Count(ctx.SubnetID)) + len(contactedFrontiersProviders) == beacons.Count(ctx.SubnetID)) } func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() - vdrs := buildTestPeers(t, ctx.SubnetID) - totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), - Alpha: (totalWeight + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -397,13 +374,13 @@ func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { } // Connect enough stake to start syncer - for _, nodeID := range vdrs.GetValidatorIDs(ctx.SubnetID) { + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) require.Positive(initiallyReachedOutBeaconsSize) - require.LessOrEqual(initiallyReachedOutBeaconsSize, common.MaxOutstandingBroadcastRequests) + require.LessOrEqual(initiallyReachedOutBeaconsSize, maxOutstandingBroadcastRequests) // mock VM to simulate an invalid summary is returned summary := []byte{'s', 'u', 'm', 'm', 'a', 'r', 'y'} @@ -437,30 +414,24 @@ func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { // are reached for data require.True( len(contactedFrontiersProviders) > initiallyReachedOutBeaconsSize || - len(contactedFrontiersProviders) == vdrs.Count(ctx.SubnetID)) + len(contactedFrontiersProviders) == beacons.Count(ctx.SubnetID)) } func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() - vdrs := buildTestPeers(t, ctx.SubnetID) - totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), - Alpha: (totalWeight + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -472,13 +443,13 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { } // Connect enough stake to start syncer - for _, nodeID := range vdrs.GetValidatorIDs(ctx.SubnetID) { + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) require.Positive(initiallyReachedOutBeaconsSize) - require.LessOrEqual(initiallyReachedOutBeaconsSize, common.MaxOutstandingBroadcastRequests) + require.LessOrEqual(initiallyReachedOutBeaconsSize, maxOutstandingBroadcastRequests) // pick one of the vdrs that have been reached out unresponsiveBeaconID := pickRandomFrom(contactedFrontiersProviders) @@ -505,7 +476,7 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { // are reached for data require.True( len(contactedFrontiersProviders) > initiallyReachedOutBeaconsSize || - len(contactedFrontiersProviders) == vdrs.Count(ctx.SubnetID)) + len(contactedFrontiersProviders) == beacons.Count(ctx.SubnetID)) // mock VM to simulate a valid but late summary is returned fullVM.CantParseStateSummary = true @@ -532,26 +503,18 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() - vdrs := buildTestPeers(t, ctx.SubnetID) - totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), - Alpha: (totalWeight + 1) / 2, - StartupTracker: startup, - RetryBootstrap: true, - RetryBootstrapWarnFrequency: 1, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -588,7 +551,7 @@ func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { } // Connect enough stake to start syncer - for _, nodeID := range vdrs.GetValidatorIDs(ctx.SubnetID) { + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } require.NotEmpty(syncer.pendingSeeders) @@ -629,24 +592,18 @@ func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() - vdrs := buildTestPeers(t, ctx.SubnetID) - totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), - Alpha: (totalWeight + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -677,7 +634,7 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { } // Connect enough stake to start syncer - for _, nodeID := range vdrs.GetValidatorIDs(ctx.SubnetID) { + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } require.NotEmpty(syncer.pendingSeeders) @@ -700,30 +657,24 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { // check that vote requests are issued initiallyContactedVotersSize := len(contactedVoters) require.Positive(initiallyContactedVotersSize) - require.LessOrEqual(initiallyContactedVotersSize, common.MaxOutstandingBroadcastRequests) + require.LessOrEqual(initiallyContactedVotersSize, maxOutstandingBroadcastRequests) } func TestUnRequestedVotesAreDropped(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() - vdrs := buildTestPeers(t, ctx.SubnetID) - totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), - Alpha: (totalWeight + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -753,7 +704,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { } // Connect enough stake to start syncer - for _, nodeID := range vdrs.GetValidatorIDs(ctx.SubnetID) { + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } require.NotEmpty(syncer.pendingSeeders) @@ -776,7 +727,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { // check that vote requests are issued initiallyContactedVotersSize := len(contactedVoters) require.Positive(initiallyContactedVotersSize) - require.LessOrEqual(initiallyContactedVotersSize, common.MaxOutstandingBroadcastRequests) + require.LessOrEqual(initiallyContactedVotersSize, maxOutstandingBroadcastRequests) _, found := syncer.weightedSummaries[summaryID] require.True(found) @@ -790,7 +741,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { context.Background(), responsiveVoterID, math.MaxInt32, - []ids.ID{summaryID}, + set.Of(summaryID), )) // responsiveVoter still pending @@ -803,7 +754,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { context.Background(), unsolicitedVoterID, responsiveVoterReqID, - []ids.ID{summaryID}, + set.Of(summaryID), )) require.Zero(syncer.weightedSummaries[summaryID].weight) @@ -812,41 +763,35 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { context.Background(), responsiveVoterID, responsiveVoterReqID, - []ids.ID{summaryID}, + set.Of(summaryID), )) // responsiveBeacon not pending anymore require.NotContains(syncer.pendingSeeders, responsiveVoterID) - voterWeight := vdrs.GetWeight(ctx.SubnetID, responsiveVoterID) + voterWeight := beacons.GetWeight(ctx.SubnetID, responsiveVoterID) require.Equal(voterWeight, syncer.weightedSummaries[summaryID].weight) // other listed voters are reached out require.True( len(contactedVoters) > initiallyContactedVotersSize || - len(contactedVoters) == vdrs.Count(ctx.SubnetID)) + len(contactedVoters) == beacons.Count(ctx.SubnetID)) } func TestVotesForUnknownSummariesAreDropped(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() - vdrs := buildTestPeers(t, ctx.SubnetID) - totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), - Alpha: (totalWeight + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -876,7 +821,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { } // Connect enough stake to start syncer - for _, nodeID := range vdrs.GetValidatorIDs(ctx.SubnetID) { + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } require.NotEmpty(syncer.pendingSeeders) @@ -899,7 +844,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { // check that vote requests are issued initiallyContactedVotersSize := len(contactedVoters) require.Positive(initiallyContactedVotersSize) - require.LessOrEqual(initiallyContactedVotersSize, common.MaxOutstandingBroadcastRequests) + require.LessOrEqual(initiallyContactedVotersSize, maxOutstandingBroadcastRequests) _, found := syncer.weightedSummaries[summaryID] require.True(found) @@ -913,7 +858,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { context.Background(), responsiveVoterID, responsiveVoterReqID, - []ids.ID{unknownSummaryID}, + set.Of(unknownSummaryID), )) _, found = syncer.weightedSummaries[unknownSummaryID] require.False(found) @@ -924,7 +869,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { context.Background(), responsiveVoterID, responsiveVoterReqID, - []ids.ID{summaryID}, + set.Of(summaryID), )) require.Zero(syncer.weightedSummaries[summaryID].weight) @@ -932,30 +877,25 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { // on unknown summary require.True( len(contactedVoters) > initiallyContactedVotersSize || - len(contactedVoters) == vdrs.Count(ctx.SubnetID)) + len(contactedVoters) == beacons.Count(ctx.SubnetID)) } func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() - vdrs := buildTestPeers(t, ctx.SubnetID) - totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startupAlpha := (3*totalWeight + 3) / 4 + alpha := (totalWeight + 1) / 2 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), - Alpha: (totalWeight + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -1001,7 +941,7 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { } // Connect enough stake to start syncer - for _, nodeID := range vdrs.GetValidatorIDs(ctx.SubnetID) { + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } require.NotEmpty(syncer.pendingSeeders) @@ -1053,23 +993,23 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { reqID := contactedVoters[voterID] switch { - case cumulatedWeight < commonCfg.Alpha/2: + case cumulatedWeight < alpha/2: require.NoError(syncer.AcceptedStateSummary( context.Background(), voterID, reqID, - []ids.ID{summaryID, minoritySummaryID}, + set.Of(summaryID, minoritySummaryID), )) - cumulatedWeight += vdrs.GetWeight(ctx.SubnetID, voterID) + cumulatedWeight += beacons.GetWeight(ctx.SubnetID, voterID) - case cumulatedWeight < commonCfg.Alpha: + case cumulatedWeight < alpha: require.NoError(syncer.AcceptedStateSummary( context.Background(), voterID, reqID, - []ids.ID{summaryID}, + set.Of(summaryID), )) - cumulatedWeight += vdrs.GetWeight(ctx.SubnetID, voterID) + cumulatedWeight += beacons.GetWeight(ctx.SubnetID, voterID) default: require.NoError(syncer.GetAcceptedStateSummaryFailed( @@ -1088,26 +1028,19 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() - vdrs := buildTestPeers(t, ctx.SubnetID) - totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startupAlpha := (3*totalWeight + 3) / 4 + alpha := (totalWeight + 1) / 2 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), - Alpha: (totalWeight + 1) / 2, - StartupTracker: startup, - RetryBootstrap: true, // this sets RetryStateSyncing too - RetryBootstrapWarnFrequency: 1, // this sets RetrySyncingWarnFrequency too - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -1139,7 +1072,7 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { } // Connect enough stake to start syncer - for _, nodeID := range vdrs.GetValidatorIDs(ctx.SubnetID) { + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } require.NotEmpty(syncer.pendingSeeders) @@ -1173,19 +1106,19 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { reqID := contactedVoters[voterID] // vdr carries the largest weight by far. Make sure it fails - if timedOutWeight <= commonCfg.Alpha { + if timedOutWeight <= alpha { require.NoError(syncer.GetAcceptedStateSummaryFailed( context.Background(), voterID, reqID, )) - timedOutWeight += vdrs.GetWeight(ctx.SubnetID, voterID) + timedOutWeight += beacons.GetWeight(ctx.SubnetID, voterID) } else { require.NoError(syncer.AcceptedStateSummary( context.Background(), voterID, reqID, - []ids.ID{summaryID}, + set.Of(summaryID), )) } } @@ -1201,24 +1134,19 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() - vdrs := buildTestPeers(t, ctx.SubnetID) - totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startupAlpha := (3*totalWeight + 3) / 4 + alpha := (totalWeight + 1) / 2 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), - Alpha: (totalWeight + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -1264,7 +1192,7 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. } // Connect enough stake to start syncer - for _, nodeID := range vdrs.GetValidatorIDs(ctx.SubnetID) { + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } require.NotEmpty(syncer.pendingSeeders) @@ -1323,23 +1251,23 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. reqID := contactedVoters[voterID] switch { - case votingWeightStake < commonCfg.Alpha/2: + case votingWeightStake < alpha/2: require.NoError(syncer.AcceptedStateSummary( context.Background(), voterID, reqID, - []ids.ID{minoritySummary1.ID(), minoritySummary2.ID()}, + set.Of(minoritySummary1.ID(), minoritySummary2.ID()), )) - votingWeightStake += vdrs.GetWeight(ctx.SubnetID, voterID) + votingWeightStake += beacons.GetWeight(ctx.SubnetID, voterID) default: require.NoError(syncer.AcceptedStateSummary( context.Background(), voterID, reqID, - []ids.ID{{'u', 'n', 'k', 'n', 'o', 'w', 'n', 'I', 'D'}}, + set.Of(ids.ID{'u', 'n', 'k', 'n', 'o', 'w', 'n', 'I', 'D'}), )) - votingWeightStake += vdrs.GetWeight(ctx.SubnetID, voterID) + votingWeightStake += beacons.GetWeight(ctx.SubnetID, voterID) } } @@ -1352,27 +1280,18 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. func TestStateSyncIsDoneOnceVMNotifies(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() - vdrs := buildTestPeers(t, ctx.SubnetID) - totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(ctx.SubnetID, startup) - - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), - Alpha: (totalWeight + 1) / 2, - StartupTracker: startup, - RetryBootstrap: true, // this sets RetryStateSyncing too - RetryBootstrapWarnFrequency: 1, // this sets RetrySyncingWarnFrequency too - } - syncer, fullVM, _ := buildTestsObjects(t, &commonCfg) - _ = fullVM + beacons.RegisterCallbackListener(ctx.SubnetID, startup) + + syncer, _, _ := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) stateSyncFullyDone := false syncer.onDoneStateSyncing = func(context.Context, uint32) error { diff --git a/snow/engine/snowman/syncer/utils_test.go b/snow/engine/snowman/syncer/utils_test.go index f83a3006aaa1..a5217a4bf0dd 100644 --- a/snow/engine/snowman/syncer/utils_test.go +++ b/snow/engine/snowman/syncer/utils_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package syncer @@ -6,12 +6,15 @@ package syncer import ( "context" "testing" + "time" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" "github.com/ava-labs/avalanchego/snow/validators" @@ -55,26 +58,29 @@ type fullVM struct { } func buildTestPeers(t *testing.T, subnetID ids.ID) validators.Manager { - // we consider more than common.MaxOutstandingBroadcastRequests peers - // so to test the effect of cap on number of requests sent out + // We consider more than maxOutstandingBroadcastRequests peers to test + // capping the number of requests sent out. vdrs := validators.NewManager() - for idx := 0; idx < 2*common.MaxOutstandingBroadcastRequests; idx++ { + for idx := 0; idx < 2*maxOutstandingBroadcastRequests; idx++ { beaconID := ids.GenerateTestNodeID() require.NoError(t, vdrs.AddStaker(subnetID, beaconID, nil, ids.Empty, 1)) } return vdrs } -func buildTestsObjects(t *testing.T, commonCfg *common.Config) ( +func buildTestsObjects( + t *testing.T, + ctx *snow.ConsensusContext, + startupTracker tracker.Startup, + beacons validators.Manager, + alpha uint64, +) ( *stateSyncer, *fullVM, *common.SenderTest, ) { require := require.New(t) - sender := &common.SenderTest{T: t} - commonCfg.Sender = sender - fullVM := &fullVM{ TestVM: &block.TestVM{ TestVM: common.TestVM{T: t}, @@ -83,10 +89,28 @@ func buildTestsObjects(t *testing.T, commonCfg *common.Config) ( T: t, }, } - dummyGetter, err := getter.New(fullVM, *commonCfg) + sender := &common.SenderTest{T: t} + dummyGetter, err := getter.New( + fullVM, + sender, + ctx.Log, + time.Second, + 2000, + ctx.Registerer, + ) require.NoError(err) - cfg, err := NewConfig(*commonCfg, nil, dummyGetter, fullVM) + cfg, err := NewConfig( + dummyGetter, + ctx, + startupTracker, + sender, + beacons, + beacons.Count(ctx.SubnetID), + alpha, + nil, + fullVM, + ) require.NoError(err) commonSyncer := New(cfg, func(context.Context, uint32) error { return nil diff --git a/snow/engine/snowman/test_engine.go b/snow/engine/snowman/test_engine.go index ed6e1b1743c5..eada8463a041 100644 --- a/snow/engine/snowman/test_engine.go +++ b/snow/engine/snowman/test_engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/snow/engine/snowman/traced_engine.go b/snow/engine/snowman/traced_engine.go index 56b46de45d4e..f736dff48fbf 100644 --- a/snow/engine/snowman/traced_engine.go +++ b/snow/engine/snowman/traced_engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index 803c03237c96..6fe2b05351d6 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -7,6 +7,8 @@ import ( "context" "fmt" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/cache" @@ -23,6 +25,7 @@ import ( "github.com/ava-labs/avalanchego/snow/event" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/bimap" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math" @@ -31,7 +34,14 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) -const nonVerifiedCacheSize = 64 * units.MiB +const ( + nonVerifiedCacheSize = 64 * units.MiB + + // putGossipPeriod specifies the number of times Gossip will be called per + // Put gossip. This is done to avoid splitting Gossip into multiple + // functions and to allow more frequent pull gossip than push gossip. + putGossipPeriod = 10 +) var _ Engine = (*Transitive)(nil) @@ -58,13 +68,16 @@ type Transitive struct { common.AppHandler validators.Connector - RequestID uint32 + requestID uint32 + + gossipCounter int // track outstanding preference requests polls poll.Set // blocks that have we have sent get requests for but haven't yet received - blkReqs common.Requests + blkReqs *bimap.BiMap[common.Request, ids.ID] + blkReqSourceMetric map[common.Request]prometheus.Counter // blocks that are queued to be issued to consensus once missing dependencies are fetched // Block ID --> Block @@ -116,6 +129,16 @@ func newTransitive(config Config) (*Transitive, error) { config.Params.AlphaPreference, config.Params.AlphaConfidence, ) + polls, err := poll.NewSet( + factory, + config.Ctx.Log, + "", + config.Ctx.Registerer, + ) + if err != nil { + return nil, err + } + t := &Transitive{ Config: config, StateSummaryFrontierHandler: common.NewNoOpStateSummaryFrontierHandler(config.Ctx.Log), @@ -129,17 +152,80 @@ func newTransitive(config Config) (*Transitive, error) { nonVerifieds: ancestor.NewTree(), nonVerifiedCache: nonVerifiedCache, acceptedFrontiers: acceptedFrontiers, - polls: poll.NewSet( - factory, - config.Ctx.Log, - "", - config.Ctx.Registerer, - ), + polls: polls, + blkReqs: bimap.New[common.Request, ids.ID](), + blkReqSourceMetric: make(map[common.Request]prometheus.Counter), } return t, t.metrics.Initialize("", config.Ctx.Registerer) } +func (t *Transitive) Gossip(ctx context.Context) error { + lastAcceptedID, lastAcceptedHeight := t.Consensus.LastAccepted() + if numProcessing := t.Consensus.NumProcessing(); numProcessing == 0 { + t.Ctx.Log.Verbo("sampling from validators", + zap.Stringer("validators", t.Validators), + ) + + // Uniform sampling is used here to reduce bandwidth requirements of + // nodes with a large amount of stake weight. + vdrID, ok := t.ConnectedValidators.SampleValidator() + if !ok { + t.Ctx.Log.Error("skipping block gossip", + zap.String("reason", "no connected validators"), + ) + return nil + } + + nextHeightToAccept, err := math.Add64(lastAcceptedHeight, 1) + if err != nil { + t.Ctx.Log.Error("skipping block gossip", + zap.String("reason", "block height overflow"), + zap.Stringer("blkID", lastAcceptedID), + zap.Uint64("lastAcceptedHeight", lastAcceptedHeight), + zap.Error(err), + ) + return nil + } + + t.requestID++ + t.Sender.SendPullQuery( + ctx, + set.Of(vdrID), + t.requestID, + t.Consensus.Preference(), + nextHeightToAccept, + ) + } else { + t.Ctx.Log.Debug("skipping block gossip", + zap.String("reason", "blocks currently processing"), + zap.Int("numProcessing", numProcessing), + ) + } + + // TODO: Remove periodic push gossip after v1.11.x is activated + t.gossipCounter++ + t.gossipCounter %= putGossipPeriod + if t.gossipCounter > 0 { + return nil + } + + lastAccepted, err := t.GetBlock(ctx, lastAcceptedID) + if err != nil { + t.Ctx.Log.Warn("dropping gossip request", + zap.String("reason", "block couldn't be loaded"), + zap.Stringer("blkID", lastAcceptedID), + zap.Error(err), + ) + return nil + } + t.Ctx.Log.Verbo("gossiping accepted block to the network", + zap.Stringer("blkID", lastAcceptedID), + ) + t.Sender.SendGossip(ctx, lastAccepted.Bytes()) + return nil +} + func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkBytes []byte) error { blk, err := t.VM.ParseBlock(ctx, blkBytes) if err != nil { @@ -163,20 +249,39 @@ func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 return t.GetFailed(ctx, nodeID, requestID) } - actualBlkID := blk.ID() - expectedBlkID, ok := t.blkReqs.Get(nodeID, requestID) - // If the provided block is not the requested block, we need to explicitly - // mark the request as failed to avoid having a dangling dependency. - if ok && actualBlkID != expectedBlkID { - t.Ctx.Log.Debug("incorrect block returned in Put", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Stringer("blkID", actualBlkID), - zap.Stringer("expectedBlkID", expectedBlkID), - ) - // We assume that [blk] is useless because it doesn't match what we - // expected. - return t.GetFailed(ctx, nodeID, requestID) + var ( + req = common.Request{ + NodeID: nodeID, + RequestID: requestID, + } + issuedMetric prometheus.Counter + ) + switch expectedBlkID, ok := t.blkReqs.GetValue(req); { + case ok: + actualBlkID := blk.ID() + if actualBlkID != expectedBlkID { + t.Ctx.Log.Debug("incorrect block returned in Put", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Stringer("blkID", actualBlkID), + zap.Stringer("expectedBlkID", expectedBlkID), + ) + // We assume that [blk] is useless because it doesn't match what we + // expected. + return t.GetFailed(ctx, nodeID, requestID) + } + + issuedMetric = t.blkReqSourceMetric[req] + case requestID == constants.GossipMsgRequestID: + issuedMetric = t.metrics.issued.WithLabelValues(putGossipSource) + default: + // This can happen if this block was provided to this engine while a Get + // request was outstanding. For example, the block may have been locally + // built or the node may have received a PushQuery with this block. + // + // Note: It is still possible this block will be issued here, because + // the block may have previously failed verification. + issuedMetric = t.metrics.issued.WithLabelValues(unknownSource) } if t.wasIssued(blk) { @@ -188,7 +293,7 @@ func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 // receive requests to fill the ancestry. dependencies that have already // been fetched, but with missing dependencies themselves won't be requested // from the vdr. - if _, err := t.issueFrom(ctx, nodeID, blk); err != nil { + if _, err := t.issueFrom(ctx, nodeID, blk, issuedMetric); err != nil { return err } return t.buildBlocks(ctx) @@ -196,8 +301,13 @@ func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 func (t *Transitive) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { // We don't assume that this function is called after a failed Get message. - // Check to see if we have an outstanding request and also get what the request was for if it exists. - blkID, ok := t.blkReqs.Remove(nodeID, requestID) + // Check to see if we have an outstanding request and also get what the + // request was for if it exists. + req := common.Request{ + NodeID: nodeID, + RequestID: requestID, + } + blkID, ok := t.blkReqs.DeleteKey(req) if !ok { t.Ctx.Log.Debug("unexpected GetFailed", zap.Stringer("nodeID", nodeID), @@ -205,6 +315,7 @@ func (t *Transitive) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID ) return nil } + delete(t.blkReqSourceMetric, req) // Because the get request was dropped, we no longer expect blkID to be issued. t.blocked.Abandon(ctx, blkID) @@ -216,9 +327,11 @@ func (t *Transitive) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID func (t *Transitive) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkID ids.ID, requestedHeight uint64) error { t.sendChits(ctx, nodeID, requestID, requestedHeight) + issuedMetric := t.metrics.issued.WithLabelValues(pushGossipSource) + // Try to issue [blkID] to consensus. // If we're missing an ancestor, request it from [vdr] - if _, err := t.issueFromByID(ctx, nodeID, blkID); err != nil { + if _, err := t.issueFromByID(ctx, nodeID, blkID, issuedMetric); err != nil { return err } @@ -252,12 +365,14 @@ func (t *Transitive) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID t.metrics.numUselessPushQueryBytes.Add(float64(len(blkBytes))) } + issuedMetric := t.metrics.issued.WithLabelValues(pushGossipSource) + // issue the block into consensus. If the block has already been issued, // this will be a noop. If this block has missing dependencies, nodeID will // receive requests to fill the ancestry. dependencies that have already // been fetched, but with missing dependencies themselves won't be requested // from the vdr. - if _, err := t.issueFrom(ctx, nodeID, blk); err != nil { + if _, err := t.issueFrom(ctx, nodeID, blk, issuedMetric); err != nil { return err } @@ -275,7 +390,9 @@ func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uin zap.Stringer("acceptedID", acceptedID), ) - addedPreferred, err := t.issueFromByID(ctx, nodeID, preferredID) + issuedMetric := t.metrics.issued.WithLabelValues(pullGossipSource) + + addedPreferred, err := t.issueFromByID(ctx, nodeID, preferredID, issuedMetric) if err != nil { return err } @@ -289,7 +406,7 @@ func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uin responseOptions = []ids.ID{preferredID} ) if preferredID != preferredIDAtHeight { - addedPreferredIDAtHeight, err = t.issueFromByID(ctx, nodeID, preferredIDAtHeight) + addedPreferredIDAtHeight, err = t.issueFromByID(ctx, nodeID, preferredIDAtHeight, issuedMetric) if err != nil { return err } @@ -341,28 +458,6 @@ func (*Transitive) Timeout(context.Context) error { return nil } -func (t *Transitive) Gossip(ctx context.Context) error { - blkID, err := t.VM.LastAccepted(ctx) - if err != nil { - return err - } - - blk, err := t.GetBlock(ctx, blkID) - if err != nil { - t.Ctx.Log.Warn("dropping gossip request", - zap.String("reason", "block couldn't be loaded"), - zap.Stringer("blkID", blkID), - zap.Error(err), - ) - return nil - } - t.Ctx.Log.Verbo("gossiping accepted block to the network", - zap.Stringer("blkID", blkID), - ) - t.Sender.SendGossip(ctx, blk.Bytes()) - return nil -} - func (*Transitive) Halt(context.Context) {} func (t *Transitive) Shutdown(ctx context.Context) error { @@ -396,7 +491,7 @@ func (t *Transitive) Context() *snow.ConsensusContext { } func (t *Transitive) Start(ctx context.Context, startReqID uint32) error { - t.RequestID = startReqID + t.requestID = startReqID lastAcceptedID, err := t.VM.LastAccepted(ctx) if err != nil { return err @@ -429,9 +524,10 @@ func (t *Transitive) Start(ctx context.Context, startReqID uint32) error { case err != nil: return err default: + issuedMetric := t.metrics.issued.WithLabelValues(builtSource) for _, blk := range options { // note that deliver will set the VM's preference - if err := t.deliver(ctx, blk, false); err != nil { + if err := t.deliver(ctx, t.Ctx.NodeID, blk, false, issuedMetric); err != nil { return err } } @@ -460,6 +556,15 @@ func (t *Transitive) HealthCheck(ctx context.Context) (interface{}, error) { t.Ctx.Lock.Lock() defer t.Ctx.Lock.Unlock() + t.Ctx.Log.Verbo("running health check", + zap.Uint32("requestID", t.requestID), + zap.Int("gossipCounter", t.gossipCounter), + zap.Stringer("polls", t.polls), + zap.Reflect("outstandingBlockRequests", t.blkReqs), + zap.Stringer("blockedJobs", &t.blocked), + zap.Int("pendingBuildBlocks", t.pendingBuildBlocks), + ) + consensusIntf, consensusErr := t.Consensus.HealthCheck(ctx) vmIntf, vmErr := t.VM.HealthCheck(ctx) intf := map[string]interface{}{ @@ -475,10 +580,6 @@ func (t *Transitive) HealthCheck(ctx context.Context) (interface{}, error) { return intf, fmt.Errorf("vm: %w ; consensus: %w", vmErr, consensusErr) } -func (t *Transitive) GetVM() common.VM { - return t.VM -} - func (t *Transitive) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { if blk, ok := t.pending[blkID]; ok { return blk, nil @@ -595,7 +696,8 @@ func (t *Transitive) buildBlocks(ctx context.Context) error { ) } - added, err := t.issueWithAncestors(ctx, blk) + issuedMetric := t.metrics.issued.WithLabelValues(builtSource) + added, err := t.issueWithAncestors(ctx, blk, issuedMetric) if err != nil { return err } @@ -625,23 +727,33 @@ func (t *Transitive) repoll(ctx context.Context) { // issueFromByID attempts to issue the branch ending with a block [blkID] into consensus. // If we do not have [blkID], request it. // Returns true if the block is processing in consensus or is decided. -func (t *Transitive) issueFromByID(ctx context.Context, nodeID ids.NodeID, blkID ids.ID) (bool, error) { +func (t *Transitive) issueFromByID( + ctx context.Context, + nodeID ids.NodeID, + blkID ids.ID, + issuedMetric prometheus.Counter, +) (bool, error) { blk, err := t.GetBlock(ctx, blkID) if err != nil { - t.sendRequest(ctx, nodeID, blkID) + t.sendRequest(ctx, nodeID, blkID, issuedMetric) return false, nil } - return t.issueFrom(ctx, nodeID, blk) + return t.issueFrom(ctx, nodeID, blk, issuedMetric) } // issueFrom attempts to issue the branch ending with block [blkID] to consensus. // Returns true if the block is processing in consensus or is decided. // If a dependency is missing, request it from [vdr]. -func (t *Transitive) issueFrom(ctx context.Context, nodeID ids.NodeID, blk snowman.Block) (bool, error) { +func (t *Transitive) issueFrom( + ctx context.Context, + nodeID ids.NodeID, + blk snowman.Block, + issuedMetric prometheus.Counter, +) (bool, error) { // issue [blk] and its ancestors to consensus. blkID := blk.ID() for !t.wasIssued(blk) { - if err := t.issue(ctx, blk, false); err != nil { + if err := t.issue(ctx, nodeID, blk, false, issuedMetric); err != nil { return false, err } @@ -651,13 +763,15 @@ func (t *Transitive) issueFrom(ctx context.Context, nodeID ids.NodeID, blk snowm // If we don't have this ancestor, request it from [vdr] if err != nil || !blk.Status().Fetched() { - t.sendRequest(ctx, nodeID, blkID) + t.sendRequest(ctx, nodeID, blkID, issuedMetric) return false, nil } } // Remove any outstanding requests for this block - t.blkReqs.RemoveAny(blkID) + if req, ok := t.blkReqs.DeleteValue(blkID); ok { + delete(t.blkReqSourceMetric, req) + } issued := t.Consensus.Decided(blk) || t.Consensus.Processing(blkID) if issued { @@ -676,12 +790,16 @@ func (t *Transitive) issueFrom(ctx context.Context, nodeID ids.NodeID, blk snowm // issueWithAncestors attempts to issue the branch ending with [blk] to consensus. // Returns true if the block is processing in consensus or is decided. // If a dependency is missing and the dependency hasn't been requested, the issuance will be abandoned. -func (t *Transitive) issueWithAncestors(ctx context.Context, blk snowman.Block) (bool, error) { +func (t *Transitive) issueWithAncestors( + ctx context.Context, + blk snowman.Block, + issuedMetric prometheus.Counter, +) (bool, error) { blkID := blk.ID() // issue [blk] and its ancestors into consensus status := blk.Status() for status.Fetched() && !t.wasIssued(blk) { - err := t.issue(ctx, blk, true) + err := t.issue(ctx, t.Ctx.NodeID, blk, true, issuedMetric) if err != nil { return false, err } @@ -701,7 +819,7 @@ func (t *Transitive) issueWithAncestors(ctx context.Context, blk snowman.Block) // There's an outstanding request for this block. // We can just wait for that request to succeed or fail. - if t.blkReqs.Contains(blkID) { + if t.blkReqs.HasValue(blkID) { return false, nil } @@ -723,20 +841,30 @@ func (t *Transitive) wasIssued(blk snowman.Block) bool { // Issue [blk] to consensus once its ancestors have been issued. // If [push] is true, a push query will be used. Otherwise, a pull query will be // used. -func (t *Transitive) issue(ctx context.Context, blk snowman.Block, push bool) error { +func (t *Transitive) issue( + ctx context.Context, + nodeID ids.NodeID, + blk snowman.Block, + push bool, + issuedMetric prometheus.Counter, +) error { blkID := blk.ID() // mark that the block is queued to be added to consensus once its ancestors have been t.pending[blkID] = blk // Remove any outstanding requests for this block - t.blkReqs.RemoveAny(blkID) + if req, ok := t.blkReqs.DeleteValue(blkID); ok { + delete(t.blkReqSourceMetric, req) + } // Will add [blk] to consensus once its ancestors have been i := &issuer{ - t: t, - blk: blk, - push: push, + t: t, + nodeID: nodeID, + blk: blk, + issuedMetric: issuedMetric, + push: push, } // block on the parent if needed @@ -759,20 +887,31 @@ func (t *Transitive) issue(ctx context.Context, blk snowman.Block, push bool) er } // Request that [vdr] send us block [blkID] -func (t *Transitive) sendRequest(ctx context.Context, nodeID ids.NodeID, blkID ids.ID) { +func (t *Transitive) sendRequest( + ctx context.Context, + nodeID ids.NodeID, + blkID ids.ID, + issuedMetric prometheus.Counter, +) { // There is already an outstanding request for this block - if t.blkReqs.Contains(blkID) { + if t.blkReqs.HasValue(blkID) { return } - t.RequestID++ - t.blkReqs.Add(nodeID, t.RequestID, blkID) + t.requestID++ + req := common.Request{ + NodeID: nodeID, + RequestID: t.requestID, + } + t.blkReqs.Put(req, blkID) + t.blkReqSourceMetric[req] = issuedMetric + t.Ctx.Log.Verbo("sending Get request", zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", t.RequestID), + zap.Uint32("requestID", t.requestID), zap.Stringer("blkID", blkID), ) - t.Sender.SendGet(ctx, nodeID, t.RequestID, blkID) + t.Sender.SendGet(ctx, nodeID, t.requestID, blkID) // Tracks performance statistics t.metrics.numRequests.Set(float64(t.blkReqs.Len())) @@ -796,6 +935,7 @@ func (t *Transitive) sendQuery( t.Ctx.Log.Error("dropped query for block", zap.String("reason", "insufficient number of validators"), zap.Stringer("blkID", blkID), + zap.Int("size", t.Params.K), ) return } @@ -813,28 +953,34 @@ func (t *Transitive) sendQuery( } vdrBag := bag.Of(vdrIDs...) - t.RequestID++ - if !t.polls.Add(t.RequestID, vdrBag) { + t.requestID++ + if !t.polls.Add(t.requestID, vdrBag) { t.Ctx.Log.Error("dropped query for block", zap.String("reason", "failed to add poll"), zap.Stringer("blkID", blkID), - zap.Uint32("requestID", t.RequestID), + zap.Uint32("requestID", t.requestID), ) return } vdrSet := set.Of(vdrIDs...) if push { - t.Sender.SendPushQuery(ctx, vdrSet, t.RequestID, blkBytes, nextHeightToAccept) + t.Sender.SendPushQuery(ctx, vdrSet, t.requestID, blkBytes, nextHeightToAccept) } else { - t.Sender.SendPullQuery(ctx, vdrSet, t.RequestID, blkID, nextHeightToAccept) + t.Sender.SendPullQuery(ctx, vdrSet, t.requestID, blkID, nextHeightToAccept) } } // issue [blk] to consensus // If [push] is true, a push query will be used. Otherwise, a pull query will be // used. -func (t *Transitive) deliver(ctx context.Context, blk snowman.Block, push bool) error { +func (t *Transitive) deliver( + ctx context.Context, + nodeID ids.NodeID, + blk snowman.Block, + push bool, + issuedMetric prometheus.Counter, +) error { blkID := blk.ID() if t.Consensus.Decided(blk) || t.Consensus.Processing(blkID) { return nil @@ -860,7 +1006,7 @@ func (t *Transitive) deliver(ctx context.Context, blk snowman.Block, push bool) // By ensuring that the parent is either processing or accepted, it is // guaranteed that the parent was successfully verified. This means that // calling Verify on this block is allowed. - blkAdded, err := t.addUnverifiedBlockToConsensus(ctx, blk) + blkAdded, err := t.addUnverifiedBlockToConsensus(ctx, nodeID, blk, issuedMetric) if err != nil { return err } @@ -884,7 +1030,7 @@ func (t *Transitive) deliver(ctx context.Context, blk snowman.Block, push bool) } for _, blk := range options { - blkAdded, err := t.addUnverifiedBlockToConsensus(ctx, blk) + blkAdded, err := t.addUnverifiedBlockToConsensus(ctx, nodeID, blk, issuedMetric) if err != nil { return err } @@ -916,13 +1062,17 @@ func (t *Transitive) deliver(ctx context.Context, blk snowman.Block, push bool) t.removeFromPending(blk) t.blocked.Fulfill(ctx, blkID) - t.blkReqs.RemoveAny(blkID) + if req, ok := t.blkReqs.DeleteValue(blkID); ok { + delete(t.blkReqSourceMetric, req) + } } for _, blk := range dropped { blkID := blk.ID() t.removeFromPending(blk) t.blocked.Abandon(ctx, blkID) - t.blkReqs.RemoveAny(blkID) + if req, ok := t.blkReqs.DeleteValue(blkID); ok { + delete(t.blkReqSourceMetric, req) + } } // If we should issue multiple queries at the same time, we need to repoll @@ -964,13 +1114,21 @@ func (t *Transitive) addToNonVerifieds(blk snowman.Block) { // addUnverifiedBlockToConsensus returns whether the block was added and an // error if one occurred while adding it to consensus. -func (t *Transitive) addUnverifiedBlockToConsensus(ctx context.Context, blk snowman.Block) (bool, error) { +func (t *Transitive) addUnverifiedBlockToConsensus( + ctx context.Context, + nodeID ids.NodeID, + blk snowman.Block, + issuedMetric prometheus.Counter, +) (bool, error) { blkID := blk.ID() + blkHeight := blk.Height() // make sure this block is valid if err := blk.Verify(ctx); err != nil { t.Ctx.Log.Debug("block verification failed", + zap.Stringer("nodeID", nodeID), zap.Stringer("blkID", blkID), + zap.Uint64("height", blkHeight), zap.Error(err), ) @@ -979,11 +1137,15 @@ func (t *Transitive) addUnverifiedBlockToConsensus(ctx context.Context, blk snow return false, nil } + issuedMetric.Inc() t.nonVerifieds.Remove(blkID) t.nonVerifiedCache.Evict(blkID) t.metrics.numNonVerifieds.Set(float64(t.nonVerifieds.Len())) + t.metrics.issuerStake.Observe(float64(t.Validators.GetWeight(t.Ctx.SubnetID, nodeID))) t.Ctx.Log.Verbo("adding block to consensus", + zap.Stringer("nodeID", nodeID), zap.Stringer("blkID", blkID), + zap.Uint64("height", blkHeight), ) return true, t.Consensus.Add(ctx, &memoryBlock{ Block: blk, diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 8993a4e90f9b..a6b96ced21bd 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -8,6 +8,7 @@ import ( "context" "errors" "testing" + "time" "github.com/stretchr/testify/require" @@ -21,6 +22,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" ) var ( @@ -32,25 +34,34 @@ var ( Genesis = ids.GenerateTestID() ) -func setup(t *testing.T, commonCfg common.Config, engCfg Config) (ids.NodeID, validators.Manager, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { +func setup(t *testing.T, engCfg Config) (ids.NodeID, validators.Manager, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { require := require.New(t) vals := validators.NewManager() engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - require.NoError(vals.AddStaker(commonCfg.Ctx.SubnetID, vdr, nil, ids.Empty, 1)) + require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr, nil, ids.Empty, 1)) + require.NoError(engCfg.ConnectedValidators.Connected(context.Background(), vdr, version.CurrentApp)) + + vals.RegisterCallbackListener(engCfg.Ctx.SubnetID, engCfg.ConnectedValidators) sender := &common.SenderTest{T: t} engCfg.Sender = sender - commonCfg.Sender = sender sender.Default(true) vm := &block.TestVM{} vm.T = t engCfg.VM = vm - snowGetHandler, err := getter.New(vm, commonCfg) + snowGetHandler, err := getter.New( + vm, + sender, + engCfg.Ctx.Log, + time.Second, + 2000, + engCfg.Ctx.Registerer, + ) require.NoError(err) engCfg.AllGetsServer = snowGetHandler @@ -87,9 +98,8 @@ func setup(t *testing.T, commonCfg common.Config, engCfg Config) (ids.NodeID, va } func setupDefaultConfig(t *testing.T) (ids.NodeID, validators.Manager, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { - commonCfg := common.DefaultConfigTest() - engCfg := DefaultConfigs() - return setup(t, commonCfg, engCfg) + engCfg := DefaultConfig(t) + return setup(t, engCfg) } func TestEngineShutdown(t *testing.T) { @@ -318,7 +328,7 @@ func TestEngineQuery(t *testing.T) { func TestEngineMultipleQuery(t *testing.T) { require := require.New(t) - engCfg := DefaultConfigs() + engCfg := DefaultConfig(t) engCfg.Params = snowball.Parameters{ K: 3, AlphaPreference: 2, @@ -406,7 +416,13 @@ func TestEngineMultipleQuery(t *testing.T) { } } - require.NoError(te.issue(context.Background(), blk0, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk0, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) blk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -516,10 +532,22 @@ func TestEngineBlockedIssue(t *testing.T) { } } - require.NoError(te.issue(context.Background(), blk1, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk1, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) blk0.StatusV = choices.Processing - require.NoError(te.issue(context.Background(), blk0, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk0, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) require.Equal(blk1.ID(), te.Consensus.Preference()) } @@ -552,7 +580,13 @@ func TestEngineAbandonResponse(t *testing.T) { return nil, errUnknownBlock } - require.NoError(te.issue(context.Background(), blk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) require.NoError(te.QueryFailed(context.Background(), vdr, 1)) require.Empty(te.blocked) @@ -713,7 +747,7 @@ func TestEngineRepoll(t *testing.T) { func TestVoteCanceling(t *testing.T) { require := require.New(t) - engCfg := DefaultConfigs() + engCfg := DefaultConfig(t) engCfg.Params = snowball.Parameters{ K: 3, AlphaPreference: 2, @@ -791,7 +825,13 @@ func TestVoteCanceling(t *testing.T) { require.Equal(uint64(1), requestedHeight) } - require.NoError(te.issue(context.Background(), blk, true)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + true, + te.metrics.issued.WithLabelValues(unknownSource), + )) require.Equal(1, te.polls.Len()) @@ -811,7 +851,7 @@ func TestVoteCanceling(t *testing.T) { func TestEngineNoQuery(t *testing.T) { require := require.New(t) - engCfg := DefaultConfigs() + engCfg := DefaultConfig(t) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -852,13 +892,19 @@ func TestEngineNoQuery(t *testing.T) { BytesV: []byte{1}, } - require.NoError(te.issue(context.Background(), blk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) } func TestEngineNoRepollQuery(t *testing.T) { require := require.New(t) - engCfg := DefaultConfigs() + engCfg := DefaultConfig(t) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -955,7 +1001,13 @@ func TestEngineAbandonChit(t *testing.T) { reqID = requestID } - require.NoError(te.issue(context.Background(), blk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) fakeBlkID := ids.GenerateTestID() vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { @@ -1010,7 +1062,13 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { reqID = requestID } - require.NoError(te.issue(context.Background(), blk, true)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + true, + te.metrics.issued.WithLabelValues(unknownSource), + )) fakeBlkID := ids.GenerateTestID() vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { @@ -1093,7 +1151,13 @@ func TestEngineBlockingChitRequest(t *testing.T) { return blockingBlk, nil } - require.NoError(te.issue(context.Background(), parentBlk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + parentBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) sender.CantSendChits = false @@ -1104,7 +1168,13 @@ func TestEngineBlockingChitRequest(t *testing.T) { sender.CantSendPullQuery = false missingBlk.StatusV = choices.Processing - require.NoError(te.issue(context.Background(), missingBlk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + missingBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) require.Empty(te.blocked) } @@ -1157,7 +1227,13 @@ func TestEngineBlockingChitResponse(t *testing.T) { } } - require.NoError(te.issue(context.Background(), blockingBlk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blockingBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) queryRequestID := new(uint32) sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { @@ -1168,7 +1244,13 @@ func TestEngineBlockingChitResponse(t *testing.T) { require.Equal(uint64(1), requestedHeight) } - require.NoError(te.issue(context.Background(), issuedBlk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + issuedBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) sender.SendPushQueryF = nil sender.CantSendPushQuery = false @@ -1179,7 +1261,13 @@ func TestEngineBlockingChitResponse(t *testing.T) { sender.CantSendPullQuery = false missingBlk.StatusV = choices.Processing - require.NoError(te.issue(context.Background(), missingBlk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + missingBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) } func TestEngineRetryFetch(t *testing.T) { @@ -1275,9 +1363,21 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { return nil, errUnknownBlock } } - require.NoError(te.issue(context.Background(), validBlk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + validBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) sender.SendPushQueryF = nil - require.NoError(te.issue(context.Background(), invalidBlk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + invalidBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) require.NoError(te.Chits(context.Background(), vdr, *reqID, invalidBlkID, invalidBlkID, invalidBlkID)) require.Equal(choices.Accepted, validBlk.Status()) @@ -1286,7 +1386,7 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { func TestEngineGossip(t *testing.T) { require := require.New(t) - _, _, sender, vm, te, gBlk := setupDefaultConfig(t) + nodeID, _, sender, vm, te, gBlk := setupDefaultConfig(t) vm.LastAcceptedF = func(context.Context) (ids.ID, error) { return gBlk.ID(), nil @@ -1296,15 +1396,15 @@ func TestEngineGossip(t *testing.T) { return gBlk, nil } - called := new(bool) - sender.SendGossipF = func(_ context.Context, blkBytes []byte) { - *called = true - require.Equal(gBlk.Bytes(), blkBytes) + var calledSendPullQuery bool + sender.SendPullQueryF = func(_ context.Context, nodeIDs set.Set[ids.NodeID], _ uint32, _ ids.ID, _ uint64) { + calledSendPullQuery = true + require.Equal(set.Of(nodeID), nodeIDs) } require.NoError(te.Gossip(context.Background())) - require.True(*called) + require.True(calledSendPullQuery) } func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { @@ -1496,7 +1596,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { func TestEngineAggressivePolling(t *testing.T) { require := require.New(t) - engCfg := DefaultConfigs() + engCfg := DefaultConfig(t) engCfg.Params.ConcurrentRepolls = 2 vals := validators.NewManager() @@ -1584,7 +1684,7 @@ func TestEngineAggressivePolling(t *testing.T) { func TestEngineDoubleChit(t *testing.T) { require := require.New(t) - engCfg := DefaultConfigs() + engCfg := DefaultConfig(t) engCfg.Params = snowball.Parameters{ K: 2, AlphaPreference: 2, @@ -1660,7 +1760,13 @@ func TestEngineDoubleChit(t *testing.T) { require.Equal(blk.ID(), blkID) require.Equal(uint64(1), requestedHeight) } - require.NoError(te.issue(context.Background(), blk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { @@ -1688,7 +1794,7 @@ func TestEngineDoubleChit(t *testing.T) { func TestEngineBuildBlockLimit(t *testing.T) { require := require.New(t) - engCfg := DefaultConfigs() + engCfg := DefaultConfig(t) engCfg.Params.K = 1 engCfg.Params.AlphaPreference = 1 engCfg.Params.AlphaConfidence = 1 @@ -2710,7 +2816,7 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { require := require.New(t) - engCfg := DefaultConfigs() + engCfg := DefaultConfig(t) engCfg.Params = snowball.Parameters{ K: 1, AlphaPreference: 1, @@ -2779,7 +2885,13 @@ func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { require.Equal(uint64(1), requestedHeight) } - require.NoError(te.issue(context.Background(), blk, true)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + true, + te.metrics.issued.WithLabelValues(unknownSource), + )) vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { diff --git a/snow/engine/snowman/voter.go b/snow/engine/snowman/voter.go index 7d267b2efbcf..0a029e870ec2 100644 --- a/snow/engine/snowman/voter.go +++ b/snow/engine/snowman/voter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/snow/event/blockable.go b/snow/event/blockable.go index 05521dc2fe16..404e95c2aee3 100644 --- a/snow/event/blockable.go +++ b/snow/event/blockable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package event diff --git a/snow/event/blocker.go b/snow/event/blocker.go index 6f8e76b2d476..9c15ffb50604 100644 --- a/snow/event/blocker.go +++ b/snow/event/blocker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package event diff --git a/snow/event/blocker_test.go b/snow/event/blocker_test.go index 838a4f69d24b..d7620bfebe1a 100644 --- a/snow/event/blocker_test.go +++ b/snow/event/blocker_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package event diff --git a/snow/networking/benchlist/benchable.go b/snow/networking/benchlist/benchable.go index e7ed46c678f9..f1cc85d9fe05 100644 --- a/snow/networking/benchlist/benchable.go +++ b/snow/networking/benchlist/benchable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist diff --git a/snow/networking/benchlist/benchlist.go b/snow/networking/benchlist/benchlist.go index 394899a1f37a..08f7e7d8d65e 100644 --- a/snow/networking/benchlist/benchlist.go +++ b/snow/networking/benchlist/benchlist.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist @@ -16,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/heap" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/utils/timer/mockable" safemath "github.com/ava-labs/avalanchego/utils/math" @@ -54,9 +53,8 @@ type benchlist struct { ctx *snow.ConsensusContext metrics metrics - // Fires when the next validator should leave the bench - // Calls [update] when it fires - timer *timer.Timer + // Used to notify the timer that it should recalculate when it should fire + resetTimer chan struct{} // Tells the time. Can be faked for testing. clock mockable.Clock @@ -105,8 +103,10 @@ func NewBenchlist( if maxPortion < 0 || maxPortion >= 1 { return nil, fmt.Errorf("max portion of benched stake must be in [0,1) but got %f", maxPortion) } + benchlist := &benchlist{ ctx: ctx, + resetTimer: make(chan struct{}, 1), failureStreaks: make(map[ids.NodeID]failureStreak), benchlistSet: set.Set[ids.NodeID]{}, benchable: benchable, @@ -117,38 +117,77 @@ func NewBenchlist( duration: duration, maxPortion: maxPortion, } - benchlist.timer = timer.NewTimer(benchlist.update) - go benchlist.timer.Dispatch() - return benchlist, benchlist.metrics.Initialize(ctx.Registerer) + if err := benchlist.metrics.Initialize(ctx.Registerer); err != nil { + return nil, err + } + + go benchlist.run() + return benchlist, nil +} + +// TODO: Close this goroutine during node shutdown +func (b *benchlist) run() { + timer := time.NewTimer(0) + defer timer.Stop() + + for { + // Invariant: The [timer] is not stopped. + select { + case <-timer.C: + case <-b.resetTimer: + if !timer.Stop() { + <-timer.C + } + } + + b.waitForBenchedNodes() + + b.removedExpiredNodes() + + // Note: If there are no nodes to remove, [duration] will be 0 and we + // will immediately wait until there are benched nodes. + duration := b.durationToSleep() + timer.Reset(duration) + } } -// Update removes benched validators whose time on the bench is over -func (b *benchlist) update() { +func (b *benchlist) waitForBenchedNodes() { + for { + b.lock.RLock() + _, _, ok := b.benchedHeap.Peek() + b.lock.RUnlock() + if ok { + return + } + + // Invariant: Whenever a new node is benched we ensure that resetTimer + // has a pending message while the write lock is held. + <-b.resetTimer + } +} + +func (b *benchlist) removedExpiredNodes() { b.lock.Lock() defer b.lock.Unlock() now := b.clock.Time() for { - if !b.canUnbench(now) { + _, next, ok := b.benchedHeap.Peek() + if !ok { + break + } + if now.Before(next) { break } - b.remove() - } - // Set next time update will be called - b.setNextLeaveTime() -} -// Removes the next node from the benchlist -// Assumes [b.lock] is held -func (b *benchlist) remove() { - nodeID, _, _ := b.benchedHeap.Pop() - b.ctx.Log.Debug("removing node from benchlist", - zap.Stringer("nodeID", nodeID), - ) - b.benchlistSet.Remove(nodeID) - b.benchable.Unbenched(b.ctx.ChainID, nodeID) + nodeID, _, _ := b.benchedHeap.Pop() + b.ctx.Log.Debug("removing node from benchlist", + zap.Stringer("nodeID", nodeID), + ) + b.benchlistSet.Remove(nodeID) + b.benchable.Unbenched(b.ctx.ChainID, nodeID) + } - // Update metrics b.metrics.numBenched.Set(float64(b.benchedHeap.Len())) benchedStake, err := b.vdrs.SubsetWeight(b.ctx.SubnetID, b.benchlistSet) if err != nil { @@ -161,56 +200,37 @@ func (b *benchlist) remove() { b.metrics.weightBenched.Set(float64(benchedStake)) } -// Returns if a validator should leave the bench at time [now]. -// False if no validator should. -// Assumes [b.lock] is held -func (b *benchlist) canUnbench(now time.Time) bool { - _, next, ok := b.benchedHeap.Peek() - if !ok { - return false - } - return now.After(next) -} +func (b *benchlist) durationToSleep() time.Duration { + b.lock.RLock() + defer b.lock.RUnlock() -// Set [b.timer] to fire when the next validator should leave the bench -// Assumes [b.lock] is held -func (b *benchlist) setNextLeaveTime() { _, next, ok := b.benchedHeap.Peek() if !ok { - b.timer.Cancel() - return + return 0 } + now := b.clock.Time() - nextLeave := next.Sub(now) - b.timer.SetTimeoutIn(nextLeave) + return next.Sub(now) } -// IsBenched returns true if messages to [nodeID] -// should not be sent over the network and should immediately fail. +// IsBenched returns true if messages to [nodeID] should not be sent over the +// network and should immediately fail. func (b *benchlist) IsBenched(nodeID ids.NodeID) bool { b.lock.RLock() defer b.lock.RUnlock() - return b.isBenched(nodeID) -} -// isBenched checks if [nodeID] is currently benched -// and calls cleanup if its benching period has elapsed -// Assumes [b.lock] is held. -func (b *benchlist) isBenched(nodeID ids.NodeID) bool { - if _, ok := b.benchlistSet[nodeID]; ok { - return true - } - return false + return b.benchlistSet.Contains(nodeID) } -// RegisterResponse notes that we received a response from validator [validatorID] +// RegisterResponse notes that we received a response from [nodeID] func (b *benchlist) RegisterResponse(nodeID ids.NodeID) { b.streaklock.Lock() defer b.streaklock.Unlock() + delete(b.failureStreaks, nodeID) } -// RegisterResponse notes that a request to validator [validatorID] timed out +// RegisterResponse notes that a request to [nodeID] timed out func (b *benchlist) RegisterFailure(nodeID ids.NodeID) { b.lock.Lock() defer b.lock.Unlock() @@ -295,6 +315,12 @@ func (b *benchlist) bench(nodeID ids.NodeID) { diff := maxBenchedUntil.Sub(minBenchedUntil) benchedUntil := minBenchedUntil.Add(time.Duration(rand.Float64() * float64(diff))) // #nosec G404 + b.ctx.Log.Debug("benching validator after consecutive failed queries", + zap.Stringer("nodeID", nodeID), + zap.Duration("benchDuration", benchedUntil.Sub(now)), + zap.Int("numFailedQueries", b.threshold), + ) + // Add to benchlist times with randomized delay b.benchlistSet.Add(nodeID) b.benchable.Benched(b.ctx.ChainID, nodeID) @@ -304,14 +330,12 @@ func (b *benchlist) bench(nodeID ids.NodeID) { b.streaklock.Unlock() b.benchedHeap.Push(nodeID, benchedUntil) - b.ctx.Log.Debug("benching validator after consecutive failed queries", - zap.Stringer("nodeID", nodeID), - zap.Duration("benchDuration", benchedUntil.Sub(now)), - zap.Int("numFailedQueries", b.threshold), - ) - // Set [b.timer] to fire when next validator should leave bench - b.setNextLeaveTime() + // Update the timer to account for the newly benched node. + select { + case b.resetTimer <- struct{}{}: + default: + } // Update metrics b.metrics.numBenched.Set(float64(b.benchedHeap.Len())) diff --git a/snow/networking/benchlist/benchlist_test.go b/snow/networking/benchlist/benchlist_test.go index 75df4f454292..45568392297e 100644 --- a/snow/networking/benchlist/benchlist_test.go +++ b/snow/networking/benchlist/benchlist_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" ) @@ -20,7 +20,8 @@ var minimumFailingDuration = 5 * time.Minute func TestBenchlistAdd(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() vdrID0 := ids.GenerateTestNodeID() vdrID1 := ids.GenerateTestNodeID() @@ -51,20 +52,14 @@ func TestBenchlistAdd(t *testing.T) { ) require.NoError(err) b := benchIntf.(*benchlist) - defer b.timer.Stop() now := time.Now() b.clock.Set(now) // Nobody should be benched at the start b.lock.Lock() - require.False(b.isBenched(vdrID0)) - require.False(b.isBenched(vdrID1)) - require.False(b.isBenched(vdrID2)) - require.False(b.isBenched(vdrID3)) - require.False(b.isBenched(vdrID4)) + require.Empty(b.benchlistSet) require.Empty(b.failureStreaks) require.Zero(b.benchedHeap.Len()) - require.Empty(b.benchlistSet) b.lock.Unlock() // Register [threshold - 1] failures in a row for vdr0 @@ -73,9 +68,8 @@ func TestBenchlistAdd(t *testing.T) { } // Still shouldn't be benched due to not enough consecutive failure - require.False(b.isBenched(vdrID0)) - require.Zero(b.benchedHeap.Len()) require.Empty(b.benchlistSet) + require.Zero(b.benchedHeap.Len()) require.Len(b.failureStreaks, 1) fs := b.failureStreaks[vdrID0] require.Equal(threshold-1, fs.consecutive) @@ -87,9 +81,8 @@ func TestBenchlistAdd(t *testing.T) { // Still shouldn't be benched because not enough time (any in this case) // has passed since the first failure b.lock.Lock() - require.False(b.isBenched(vdrID0)) - require.Zero(b.benchedHeap.Len()) require.Empty(b.benchlistSet) + require.Zero(b.benchedHeap.Len()) b.lock.Unlock() // Move the time up @@ -108,9 +101,9 @@ func TestBenchlistAdd(t *testing.T) { // Now this validator should be benched b.lock.Lock() - require.True(b.isBenched(vdrID0)) - require.Equal(b.benchedHeap.Len(), 1) - require.Equal(b.benchlistSet.Len(), 1) + require.Contains(b.benchlistSet, vdrID0) + require.Equal(1, b.benchedHeap.Len()) + require.Equal(1, b.benchlistSet.Len()) nodeID, benchedUntil, ok := b.benchedHeap.Peek() require.True(ok) @@ -133,10 +126,9 @@ func TestBenchlistAdd(t *testing.T) { // vdr1 shouldn't be benched // The response should have cleared its consecutive failures b.lock.Lock() - require.True(b.isBenched(vdrID0)) - require.False(b.isBenched(vdrID1)) - require.Equal(b.benchedHeap.Len(), 1) - require.Equal(b.benchlistSet.Len(), 1) + require.Contains(b.benchlistSet, vdrID0) + require.Equal(1, b.benchedHeap.Len()) + require.Equal(1, b.benchlistSet.Len()) require.Empty(b.failureStreaks) b.lock.Unlock() @@ -153,7 +145,8 @@ func TestBenchlistAdd(t *testing.T) { func TestBenchlistMaxStake(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() vdrID0 := ids.GenerateTestNodeID() vdrID1 := ids.GenerateTestNodeID() @@ -183,7 +176,6 @@ func TestBenchlistMaxStake(t *testing.T) { ) require.NoError(err) b := benchIntf.(*benchlist) - defer b.timer.Stop() now := time.Now() b.clock.Set(now) @@ -209,11 +201,10 @@ func TestBenchlistMaxStake(t *testing.T) { // Benching vdr2 (weight 1000) would cause the amount benched // to exceed the maximum b.lock.Lock() - require.True(b.isBenched(vdrID0)) - require.True(b.isBenched(vdrID1)) - require.False(b.isBenched(vdrID2)) - require.Equal(b.benchedHeap.Len(), 2) - require.Equal(b.benchlistSet.Len(), 2) + require.Contains(b.benchlistSet, vdrID0) + require.Contains(b.benchlistSet, vdrID1) + require.Equal(2, b.benchedHeap.Len()) + require.Equal(2, b.benchlistSet.Len()) require.Len(b.failureStreaks, 1) fs := b.failureStreaks[vdrID2] fs.consecutive = threshold @@ -236,9 +227,9 @@ func TestBenchlistMaxStake(t *testing.T) { // vdr4 should be benched now b.lock.Lock() - require.True(b.isBenched(vdrID0)) - require.True(b.isBenched(vdrID1)) - require.True(b.isBenched(vdrID4)) + require.Contains(b.benchlistSet, vdrID0) + require.Contains(b.benchlistSet, vdrID1) + require.Contains(b.benchlistSet, vdrID4) require.Equal(3, b.benchedHeap.Len()) require.Equal(3, b.benchlistSet.Len()) require.Contains(b.benchlistSet, vdrID0) @@ -254,10 +245,9 @@ func TestBenchlistMaxStake(t *testing.T) { } b.lock.Lock() - require.True(b.isBenched(vdrID0)) - require.True(b.isBenched(vdrID1)) - require.True(b.isBenched(vdrID4)) - require.False(b.isBenched(vdrID2)) + require.Contains(b.benchlistSet, vdrID0) + require.Contains(b.benchlistSet, vdrID1) + require.Contains(b.benchlistSet, vdrID4) require.Equal(3, b.benchedHeap.Len()) require.Equal(3, b.benchlistSet.Len()) require.Len(b.failureStreaks, 1) @@ -269,7 +259,8 @@ func TestBenchlistMaxStake(t *testing.T) { func TestBenchlistRemove(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() vdrID0 := ids.GenerateTestNodeID() vdrID1 := ids.GenerateTestNodeID() @@ -307,7 +298,6 @@ func TestBenchlistRemove(t *testing.T) { ) require.NoError(err) b := benchIntf.(*benchlist) - defer b.timer.Stop() now := time.Now() b.lock.Lock() b.clock.Set(now) @@ -332,9 +322,9 @@ func TestBenchlistRemove(t *testing.T) { // All 3 should be benched b.lock.Lock() - require.True(b.isBenched(vdrID0)) - require.True(b.isBenched(vdrID1)) - require.True(b.isBenched(vdrID2)) + require.Contains(b.benchlistSet, vdrID0) + require.Contains(b.benchlistSet, vdrID1) + require.Contains(b.benchlistSet, vdrID2) require.Equal(3, b.benchedHeap.Len()) require.Equal(3, b.benchlistSet.Len()) require.Empty(b.failureStreaks) diff --git a/snow/networking/benchlist/manager.go b/snow/networking/benchlist/manager.go index 7a42e8245267..e6ac45da4400 100644 --- a/snow/networking/benchlist/manager.go +++ b/snow/networking/benchlist/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist diff --git a/snow/networking/benchlist/metrics.go b/snow/networking/benchlist/metrics.go index 12da52d396a0..25f9e50f7da8 100644 --- a/snow/networking/benchlist/metrics.go +++ b/snow/networking/benchlist/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist diff --git a/snow/networking/benchlist/test_benchable.go b/snow/networking/benchlist/test_benchable.go index 5e179763d2d1..dabfab564829 100644 --- a/snow/networking/benchlist/test_benchable.go +++ b/snow/networking/benchlist/test_benchable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist diff --git a/snow/networking/handler/engine.go b/snow/networking/handler/engine.go index 94ae54ff08c6..e3de84ac8989 100644 --- a/snow/networking/handler/engine.go +++ b/snow/networking/handler/engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler diff --git a/snow/networking/handler/engine_test.go b/snow/networking/handler/engine_test.go index 142441cfda6d..e9b2b8ae0162 100644 --- a/snow/networking/handler/engine_test.go +++ b/snow/networking/handler/engine_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler diff --git a/snow/networking/handler/handler.go b/snow/networking/handler/handler.go index 1a9a1d89b6ae..35dc40f57f98 100644 --- a/snow/networking/handler/handler.go +++ b/snow/networking/handler/handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler @@ -31,6 +31,7 @@ import ( "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" commontracker "github.com/ava-labs/avalanchego/snow/engine/common/tracker" @@ -270,8 +271,8 @@ func (h *handler) Start(ctx context.Context, recoverPanic bool) { // Push the message onto the handler's queue func (h *handler) Push(ctx context.Context, msg Message) { switch msg.Op() { - case message.AppRequestOp, message.AppRequestFailedOp, message.AppResponseOp, message.AppGossipOp, - message.CrossChainAppRequestOp, message.CrossChainAppRequestFailedOp, message.CrossChainAppResponseOp: + case message.AppRequestOp, message.AppErrorOp, message.AppResponseOp, message.AppGossipOp, + message.CrossChainAppRequestOp, message.CrossChainAppErrorOp, message.CrossChainAppResponseOp: h.asyncMessageQueue.Push(ctx, msg) default: h.syncMessageQueue.Push(ctx, msg) @@ -454,7 +455,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { h.ctx.Log.Verbo("forwarding sync message to consensus", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", op), - zap.Any("message", body), + zap.Stringer("message", body), ) } else { h.ctx.Log.Debug("forwarding sync message to consensus", @@ -487,7 +488,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { zap.Duration("msgHandlingTime", msgHandlingTime), zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", op), - zap.Any("message", body), + zap.Stringer("message", body), ) } }() @@ -556,23 +557,11 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { return engine.GetStateSummaryFrontierFailed(ctx, nodeID, msg.RequestID) case *p2p.GetAcceptedStateSummary: - // TODO: Enforce that the numbers are sorted to make this verification - // more efficient. - if !utils.IsUnique(msg.Heights) { - h.ctx.Log.Debug("message with invalid field", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", message.GetAcceptedStateSummaryOp), - zap.Uint32("requestID", msg.RequestId), - zap.String("field", "Heights"), - ) - return engine.GetAcceptedStateSummaryFailed(ctx, nodeID, msg.RequestId) - } - return engine.GetAcceptedStateSummary( ctx, nodeID, msg.RequestId, - msg.Heights, + set.Of(msg.Heights...), ) case *p2p.AcceptedStateSummary: @@ -804,7 +793,7 @@ func (h *handler) executeAsyncMsg(ctx context.Context, msg Message) error { h.ctx.Log.Verbo("forwarding async message to consensus", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", op), - zap.Any("message", body), + zap.Stringer("message", body), ) } else { h.ctx.Log.Debug("forwarding async message to consensus", @@ -853,8 +842,18 @@ func (h *handler) executeAsyncMsg(ctx context.Context, msg Message) error { case *p2p.AppResponse: return engine.AppResponse(ctx, nodeID, m.RequestId, m.AppBytes) - case *message.AppRequestFailed: - return engine.AppRequestFailed(ctx, nodeID, m.RequestID) + case *p2p.AppError: + err := &common.AppError{ + Code: m.ErrorCode, + Message: m.ErrorMessage, + } + + return engine.AppRequestFailed( + ctx, + nodeID, + m.RequestId, + err, + ) case *p2p.AppGossip: return engine.AppGossip(ctx, nodeID, m.AppBytes) @@ -877,10 +876,16 @@ func (h *handler) executeAsyncMsg(ctx context.Context, msg Message) error { ) case *message.CrossChainAppRequestFailed: + err := &common.AppError{ + Code: m.ErrorCode, + Message: m.ErrorMessage, + } + return engine.CrossChainAppRequestFailed( ctx, m.SourceChainID, m.RequestID, + err, ) default: @@ -904,7 +909,7 @@ func (h *handler) handleChanMsg(msg message.InboundMessage) error { if h.ctx.Log.Enabled(logging.Verbo) { h.ctx.Log.Verbo("forwarding chan message to consensus", zap.Stringer("messageOp", op), - zap.Any("message", body), + zap.Stringer("message", body), ) } else { h.ctx.Log.Debug("forwarding chan message to consensus", @@ -933,7 +938,7 @@ func (h *handler) handleChanMsg(msg message.InboundMessage) error { zap.Duration("processingTime", processingTime), zap.Duration("msgHandlingTime", msgHandlingTime), zap.Stringer("messageOp", op), - zap.Any("message", body), + zap.Stringer("message", body), ) } }() diff --git a/snow/networking/handler/handler_test.go b/snow/networking/handler/handler_test.go index c28da4bc8b71..1f51aa4f1d23 100644 --- a/snow/networking/handler/handler_test.go +++ b/snow/networking/handler/handler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler @@ -22,10 +22,12 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" + "github.com/ava-labs/avalanchego/utils/set" commontracker "github.com/ava-labs/avalanchego/snow/engine/common/tracker" ) @@ -39,7 +41,8 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { called := make(chan struct{}) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() vdr0 := ids.GenerateTestNodeID() @@ -67,9 +70,6 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { handler := handlerIntf.(*handler) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -78,11 +78,11 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.GetAcceptedFrontierF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + bootstrapper.GetAcceptedFrontierF = func(context.Context, ids.NodeID, uint32) error { require.FailNow("GetAcceptedFrontier message should have timed out") return nil } - bootstrapper.GetAcceptedF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { + bootstrapper.GetAcceptedF = func(context.Context, ids.NodeID, uint32, set.Set[ids.ID]) error { called <- struct{}{} return nil } @@ -137,7 +137,8 @@ func TestHandlerClosesOnError(t *testing.T) { require := require.New(t) closed := make(chan struct{}, 1) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) @@ -169,9 +170,6 @@ func TestHandlerClosesOnError(t *testing.T) { }) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -231,7 +229,8 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { require := require.New(t) closed := make(chan struct{}, 1) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) @@ -259,9 +258,6 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { handler.clock.Set(time.Now()) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -311,7 +307,8 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { func TestHandlerDispatchInternal(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) msgFromVMChan := make(chan common.Message) vdrs := validators.NewManager() require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) @@ -337,9 +334,6 @@ func TestHandlerDispatchInternal(t *testing.T) { require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -383,7 +377,8 @@ func TestHandlerDispatchInternal(t *testing.T) { func TestHandlerSubnetConnector(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) @@ -414,9 +409,6 @@ func TestHandlerSubnetConnector(t *testing.T) { require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -561,7 +553,8 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { require := require.New(t) messageReceived := make(chan struct{}) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) @@ -586,9 +579,6 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -637,7 +627,8 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { func TestHandlerStartError(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), resource.NoUsage, diff --git a/snow/networking/handler/health.go b/snow/networking/handler/health.go index b68ead089639..3f4af4299d1c 100644 --- a/snow/networking/handler/health.go +++ b/snow/networking/handler/health.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler diff --git a/snow/networking/handler/health_test.go b/snow/networking/handler/health_test.go index 9767859a4abf..f3fe456fa023 100644 --- a/snow/networking/handler/health_test.go +++ b/snow/networking/handler/health_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler @@ -18,6 +18,7 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/math/meter" @@ -47,7 +48,8 @@ func TestHealthCheckSubnet(t *testing.T) { t.Run(name, func(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() @@ -82,9 +84,6 @@ func TestHealthCheckSubnet(t *testing.T) { require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, diff --git a/snow/networking/handler/message_queue.go b/snow/networking/handler/message_queue.go index 6fe4137b940e..58e4f2b3b29e 100644 --- a/snow/networking/handler/message_queue.go +++ b/snow/networking/handler/message_queue.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler @@ -15,6 +15,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/buffer" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) @@ -69,7 +70,7 @@ type messageQueue struct { // Node ID --> Messages this node has in [msgs] nodeToUnprocessedMsgs map[ids.NodeID]int // Unprocessed messages - msgAndCtxs []*msgAndContext + msgAndCtxs buffer.Deque[*msgAndContext] } func NewMessageQueue( @@ -85,6 +86,7 @@ func NewMessageQueue( cpuTracker: cpuTracker, cond: sync.NewCond(&sync.Mutex{}), nodeToUnprocessedMsgs: make(map[ids.NodeID]int), + msgAndCtxs: buffer.NewUnboundedDeque[*msgAndContext](1 /*=initSize*/), } return m, m.metrics.initialize(metricsNamespace, ctx.Registerer, ops) } @@ -99,7 +101,7 @@ func (m *messageQueue) Push(ctx context.Context, msg Message) { } // Add the message to the queue - m.msgAndCtxs = append(m.msgAndCtxs, &msgAndContext{ + m.msgAndCtxs.PushRight(&msgAndContext{ msg: msg, ctx: ctx, }) @@ -124,13 +126,13 @@ func (m *messageQueue) Pop() (context.Context, Message, bool) { if m.closed { return nil, Message{}, false } - if len(m.msgAndCtxs) != 0 { + if m.msgAndCtxs.Len() != 0 { break } m.cond.Wait() } - n := len(m.msgAndCtxs) + n := m.msgAndCtxs.Len() // note that n > 0 i := 0 for { if i == n { @@ -140,20 +142,14 @@ func (m *messageQueue) Pop() (context.Context, Message, bool) { } var ( - msgAndCtx = m.msgAndCtxs[0] - msg = msgAndCtx.msg - ctx = msgAndCtx.ctx - nodeID = msg.NodeID() + msgAndCtx, _ = m.msgAndCtxs.PopLeft() + msg = msgAndCtx.msg + ctx = msgAndCtx.ctx + nodeID = msg.NodeID() ) - m.msgAndCtxs[0] = nil // See if it's OK to process [msg] next if m.canPop(msg) || i == n { // i should never == n but handle anyway as a fail-safe - if cap(m.msgAndCtxs) == 1 { - m.msgAndCtxs = nil // Give back memory if possible - } else { - m.msgAndCtxs = m.msgAndCtxs[1:] - } m.nodeToUnprocessedMsgs[nodeID]-- if m.nodeToUnprocessedMsgs[nodeID] == 0 { delete(m.nodeToUnprocessedMsgs, nodeID) @@ -165,8 +161,7 @@ func (m *messageQueue) Pop() (context.Context, Message, bool) { } // [msg.nodeID] is causing excessive CPU usage. // Push [msg] to back of [m.msgs] and handle it later. - m.msgAndCtxs = append(m.msgAndCtxs, msgAndCtx) - m.msgAndCtxs = m.msgAndCtxs[1:] + m.msgAndCtxs.PushRight(msgAndCtx) i++ m.metrics.numExcessiveCPU.Inc() } @@ -176,7 +171,7 @@ func (m *messageQueue) Len() int { m.cond.L.Lock() defer m.cond.L.Unlock() - return len(m.msgAndCtxs) + return m.msgAndCtxs.Len() } func (m *messageQueue) Shutdown() { @@ -184,10 +179,10 @@ func (m *messageQueue) Shutdown() { defer m.cond.L.Unlock() // Remove all the current messages from the queue - for _, msg := range m.msgAndCtxs { - msg.msg.OnFinishedHandling() + for m.msgAndCtxs.Len() > 0 { + msgAndCtx, _ := m.msgAndCtxs.PopLeft() + msgAndCtx.msg.OnFinishedHandling() } - m.msgAndCtxs = nil m.nodeToUnprocessedMsgs = nil // Update metrics diff --git a/snow/networking/handler/message_queue_metrics.go b/snow/networking/handler/message_queue_metrics.go index ce28769a41ca..20bc4c7766f9 100644 --- a/snow/networking/handler/message_queue_metrics.go +++ b/snow/networking/handler/message_queue_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler @@ -9,6 +9,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -24,7 +25,7 @@ func (m *messageQueueMetrics) initialize( metricsRegisterer prometheus.Registerer, ops []message.Op, ) error { - namespace := fmt.Sprintf("%s_%s", metricsNamespace, "unprocessed_msgs") + namespace := metric.AppendNamespace(metricsNamespace, "unprocessed_msgs") m.len = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "len", diff --git a/snow/networking/handler/message_queue_test.go b/snow/networking/handler/message_queue_test.go index 1eabfd96c410..457ba86ceda1 100644 --- a/snow/networking/handler/message_queue_test.go +++ b/snow/networking/handler/message_queue_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler @@ -15,8 +15,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/proto/pb/p2p" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" ) @@ -26,7 +26,8 @@ func TestQueue(t *testing.T) { ctrl := gomock.NewController(t) require := require.New(t) cpuTracker := tracker.NewMockTracker(ctrl) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() vdr1ID, vdr2ID := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() require.NoError(vdrs.AddStaker(ctx.SubnetID, vdr1ID, nil, ids.Empty, 1)) diff --git a/snow/networking/handler/metrics.go b/snow/networking/handler/metrics.go index a8776b30832e..3fe9f2d9a2b3 100644 --- a/snow/networking/handler/metrics.go +++ b/snow/networking/handler/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler diff --git a/snow/networking/handler/mock_handler.go b/snow/networking/handler/mock_handler.go index dd231641e8ac..517fbcd85537 100644 --- a/snow/networking/handler/mock_handler.go +++ b/snow/networking/handler/mock_handler.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/networking/handler (interfaces: Handler) +// +// Generated by this command: +// +// mockgen -package=handler -destination=snow/networking/handler/mock_handler.go github.com/ava-labs/avalanchego/snow/networking/handler Handler +// // Package handler is a generated GoMock package. package handler @@ -50,7 +52,7 @@ func (m *MockHandler) AwaitStopped(arg0 context.Context) (time.Duration, error) } // AwaitStopped indicates an expected call of AwaitStopped. -func (mr *MockHandlerMockRecorder) AwaitStopped(arg0 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) AwaitStopped(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AwaitStopped", reflect.TypeOf((*MockHandler)(nil).AwaitStopped), arg0) } @@ -84,16 +86,16 @@ func (mr *MockHandlerMockRecorder) GetEngineManager() *gomock.Call { } // HealthCheck mocks base method. -func (m *MockHandler) HealthCheck(arg0 context.Context) (interface{}, error) { +func (m *MockHandler) HealthCheck(arg0 context.Context) (any, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "HealthCheck", arg0) - ret0, _ := ret[0].(interface{}) + ret0, _ := ret[0].(any) ret1, _ := ret[1].(error) return ret0, ret1 } // HealthCheck indicates an expected call of HealthCheck. -func (mr *MockHandlerMockRecorder) HealthCheck(arg0 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) HealthCheck(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockHandler)(nil).HealthCheck), arg0) } @@ -119,7 +121,7 @@ func (m *MockHandler) Push(arg0 context.Context, arg1 Message) { } // Push indicates an expected call of Push. -func (mr *MockHandlerMockRecorder) Push(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) Push(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Push", reflect.TypeOf((*MockHandler)(nil).Push), arg0, arg1) } @@ -131,7 +133,7 @@ func (m *MockHandler) RegisterTimeout(arg0 time.Duration) { } // RegisterTimeout indicates an expected call of RegisterTimeout. -func (mr *MockHandlerMockRecorder) RegisterTimeout(arg0 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) RegisterTimeout(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterTimeout", reflect.TypeOf((*MockHandler)(nil).RegisterTimeout), arg0) } @@ -143,7 +145,7 @@ func (m *MockHandler) SetEngineManager(arg0 *EngineManager) { } // SetEngineManager indicates an expected call of SetEngineManager. -func (mr *MockHandlerMockRecorder) SetEngineManager(arg0 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) SetEngineManager(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEngineManager", reflect.TypeOf((*MockHandler)(nil).SetEngineManager), arg0) } @@ -155,7 +157,7 @@ func (m *MockHandler) SetOnStopped(arg0 func()) { } // SetOnStopped indicates an expected call of SetOnStopped. -func (mr *MockHandlerMockRecorder) SetOnStopped(arg0 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) SetOnStopped(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetOnStopped", reflect.TypeOf((*MockHandler)(nil).SetOnStopped), arg0) } @@ -169,7 +171,7 @@ func (m *MockHandler) ShouldHandle(arg0 ids.NodeID) bool { } // ShouldHandle indicates an expected call of ShouldHandle. -func (mr *MockHandlerMockRecorder) ShouldHandle(arg0 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) ShouldHandle(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldHandle", reflect.TypeOf((*MockHandler)(nil).ShouldHandle), arg0) } @@ -181,7 +183,7 @@ func (m *MockHandler) Start(arg0 context.Context, arg1 bool) { } // Start indicates an expected call of Start. -func (mr *MockHandlerMockRecorder) Start(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) Start(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockHandler)(nil).Start), arg0, arg1) } @@ -193,7 +195,7 @@ func (m *MockHandler) Stop(arg0 context.Context) { } // Stop indicates an expected call of Stop. -func (mr *MockHandlerMockRecorder) Stop(arg0 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) Stop(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockHandler)(nil).Stop), arg0) } @@ -205,7 +207,7 @@ func (m *MockHandler) StopWithError(arg0 context.Context, arg1 error) { } // StopWithError indicates an expected call of StopWithError. -func (mr *MockHandlerMockRecorder) StopWithError(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) StopWithError(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopWithError", reflect.TypeOf((*MockHandler)(nil).StopWithError), arg0, arg1) } diff --git a/snow/networking/handler/parser.go b/snow/networking/handler/parser.go index 9349b073fbb6..4dc954e4e9f2 100644 --- a/snow/networking/handler/parser.go +++ b/snow/networking/handler/parser.go @@ -1,30 +1,21 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler import ( - "errors" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" ) -var errDuplicatedID = errors.New("inbound message contains duplicated ID") - -func getIDs(idsBytes [][]byte) ([]ids.ID, error) { - res := make([]ids.ID, len(idsBytes)) - idSet := set.NewSet[ids.ID](len(idsBytes)) - for i, bytes := range idsBytes { +func getIDs(idsBytes [][]byte) (set.Set[ids.ID], error) { + var res set.Set[ids.ID] + for _, bytes := range idsBytes { id, err := ids.ToID(bytes) if err != nil { return nil, err } - if idSet.Contains(id) { - return nil, errDuplicatedID - } - res[i] = id - idSet.Add(id) + res.Add(id) } return res, nil } diff --git a/snow/networking/router/chain_router.go b/snow/networking/router/chain_router.go index b55e77f66f41..f2c6d11775dd 100644 --- a/snow/networking/router/chain_router.go +++ b/snow/networking/router/chain_router.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router diff --git a/snow/networking/router/chain_router_metrics.go b/snow/networking/router/chain_router_metrics.go index 58440377ba82..bc8f26223586 100644 --- a/snow/networking/router/chain_router_metrics.go +++ b/snow/networking/router/chain_router_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index d4f71828799f..1897aae89bc2 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router @@ -15,7 +15,6 @@ import ( "go.uber.org/mock/gomock" - "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/proto/pb/p2p" @@ -25,6 +24,7 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/handler" "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/constants" @@ -43,10 +43,13 @@ const ( testThreadPoolSize = 2 ) +// TODO refactor tests in this file + func TestShutdown(t *testing.T) { require := require.New(t) - chainCtx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + chainCtx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() require.NoError(vdrs.AddStaker(chainCtx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() @@ -105,9 +108,6 @@ func TestShutdown(t *testing.T) { require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -184,7 +184,8 @@ func TestShutdown(t *testing.T) { func TestShutdownTimesOut(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) nodeID := ids.EmptyNodeID vdrs := validators.NewManager() require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) @@ -246,9 +247,6 @@ func TestShutdownTimesOut(t *testing.T) { bootstrapFinished := make(chan struct{}, 1) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -381,7 +379,8 @@ func TestRouterTimeout(t *testing.T) { wg = sync.WaitGroup{} ) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) @@ -407,9 +406,6 @@ func TestRouterTimeout(t *testing.T) { require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -460,12 +456,12 @@ func TestRouterTimeout(t *testing.T) { calledQueryFailed = true return nil } - bootstrapper.AppRequestFailedF = func(context.Context, ids.NodeID, uint32) error { + bootstrapper.AppRequestFailedF = func(context.Context, ids.NodeID, uint32, *common.AppError) error { defer wg.Done() calledAppRequestFailed = true return nil } - bootstrapper.CrossChainAppRequestFailedF = func(context.Context, ids.ID, uint32) error { + bootstrapper.CrossChainAppRequestFailedF = func(context.Context, ids.ID, uint32, *common.AppError) error { defer wg.Done() calledCrossChainAppRequestFailed = true return nil @@ -643,10 +639,12 @@ func TestRouterTimeout(t *testing.T) { ctx.ChainID, requestID, message.AppResponseOp, - message.InternalAppRequestFailed( + message.InboundAppError( nodeID, ctx.ChainID, requestID, + common.ErrTimeout.Code, + common.ErrTimeout.Message, ), p2p.EngineType_ENGINE_TYPE_SNOWMAN, ) @@ -662,11 +660,13 @@ func TestRouterTimeout(t *testing.T) { ctx.ChainID, requestID, message.CrossChainAppResponseOp, - message.InternalCrossChainAppRequestFailed( + message.InternalCrossChainAppError( nodeID, ctx.ChainID, ctx.ChainID, requestID, + common.ErrTimeout.Code, + common.ErrTimeout.Message, ), p2p.EngineType_ENGINE_TYPE_SNOWMAN, ) @@ -729,7 +729,8 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { h := handler.NewMockHandler(ctrl) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) h.EXPECT().Context().Return(ctx).AnyTimes() h.EXPECT().SetOnStopped(gomock.Any()).AnyTimes() h.EXPECT().Stop(gomock.Any()).AnyTimes() @@ -823,294 +824,91 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { } func TestRouterClearTimeouts(t *testing.T) { - require := require.New(t) - - // Create a timeout manager - tm, err := timeout.NewManager( - &timer.AdaptiveTimeoutConfig{ - InitialTimeout: 3 * time.Second, - MinimumTimeout: 3 * time.Second, - MaximumTimeout: 5 * time.Minute, - TimeoutCoefficient: 1, - TimeoutHalflife: 5 * time.Minute, + requestID := uint32(123) + + tests := []struct { + name string + responseOp message.Op + responseMsg message.InboundMessage + timeoutMsg message.InboundMessage + }{ + { + name: "StateSummaryFrontier", + responseOp: message.StateSummaryFrontierOp, + responseMsg: message.InboundStateSummaryFrontier(ids.Empty, requestID, []byte("summary"), ids.EmptyNodeID), + timeoutMsg: message.InternalGetStateSummaryFrontierFailed(ids.EmptyNodeID, ids.Empty, requestID), }, - benchlist.NewNoBenchlist(), - "", - prometheus.NewRegistry(), - ) - require.NoError(err) - - go tm.Dispatch() - defer tm.Stop() - - // Create a router - chainRouter := ChainRouter{} - require.NoError(chainRouter.Initialize( - ids.EmptyNodeID, - logging.NoLog{}, - tm, - time.Millisecond, - set.Set[ids.ID]{}, - true, - set.Set[ids.ID]{}, - nil, - HealthConfig{}, - "", - prometheus.NewRegistry(), - )) - defer chainRouter.Shutdown(context.Background()) - - // Create bootstrapper, engine and handler - ctx := snow.DefaultConsensusContextTest() - vdrs := validators.NewManager() - require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) - - resourceTracker, err := tracker.NewResourceTracker( - prometheus.NewRegistry(), - resource.NoUsage, - meter.ContinuousFactory{}, - time.Second, - ) - require.NoError(err) - h, err := handler.New( - ctx, - vdrs, - nil, - time.Second, - testThreadPoolSize, - resourceTracker, - validators.UnhandledSubnetConnector, - subnets.New(ctx.NodeID, subnets.Config{}), - commontracker.NewPeers(), - ) - require.NoError(err) - - bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, + { + name: "AcceptedStateSummary", + responseOp: message.AcceptedStateSummaryOp, + responseMsg: message.InboundAcceptedStateSummary(ids.Empty, requestID, []ids.ID{ids.GenerateTestID()}, ids.EmptyNodeID), + timeoutMsg: message.InternalGetAcceptedStateSummaryFailed(ids.EmptyNodeID, ids.Empty, requestID), }, - EngineTest: common.EngineTest{ - T: t, + { + name: "AcceptedFrontierOp", + responseOp: message.AcceptedFrontierOp, + responseMsg: message.InboundAcceptedFrontier(ids.Empty, requestID, ids.GenerateTestID(), ids.EmptyNodeID), + timeoutMsg: message.InternalGetAcceptedFrontierFailed(ids.EmptyNodeID, ids.Empty, requestID, engineType), }, - } - bootstrapper.Default(false) - bootstrapper.ContextF = func() *snow.ConsensusContext { - return ctx - } - - engine := &common.EngineTest{T: t} - engine.Default(false) - engine.ContextF = func() *snow.ConsensusContext { - return ctx - } - h.SetEngineManager(&handler.EngineManager{ - Avalanche: &handler.Engine{ - StateSyncer: nil, - Bootstrapper: bootstrapper, - Consensus: engine, + { + name: "Accepted", + responseOp: message.AcceptedOp, + responseMsg: message.InboundAccepted(ids.Empty, requestID, []ids.ID{ids.GenerateTestID()}, ids.EmptyNodeID), + timeoutMsg: message.InternalGetAcceptedFailed(ids.EmptyNodeID, ids.Empty, requestID, engineType), }, - Snowman: &handler.Engine{ - StateSyncer: nil, - Bootstrapper: bootstrapper, - Consensus: engine, + { + name: "Chits", + responseOp: message.ChitsOp, + responseMsg: message.InboundChits(ids.Empty, requestID, ids.GenerateTestID(), ids.GenerateTestID(), ids.GenerateTestID(), ids.EmptyNodeID), + timeoutMsg: message.InternalQueryFailed(ids.EmptyNodeID, ids.Empty, requestID, engineType), + }, + { + name: "AppResponse", + responseOp: message.AppResponseOp, + responseMsg: message.InboundAppResponse(ids.Empty, requestID, []byte("responseMsg"), ids.EmptyNodeID), + timeoutMsg: message.InboundAppError(ids.EmptyNodeID, ids.Empty, requestID, 123, "error"), + }, + { + name: "AppError", + responseOp: message.AppResponseOp, + responseMsg: message.InboundAppError(ids.EmptyNodeID, ids.Empty, requestID, 1234, "custom error"), + timeoutMsg: message.InboundAppError(ids.EmptyNodeID, ids.Empty, requestID, 123, "error"), + }, + { + name: "CrossChainAppResponse", + responseOp: message.CrossChainAppResponseOp, + responseMsg: message.InternalCrossChainAppResponse(ids.EmptyNodeID, ids.Empty, ids.Empty, requestID, []byte("responseMsg")), + timeoutMsg: message.InternalCrossChainAppError(ids.EmptyNodeID, ids.Empty, ids.Empty, requestID, 123, "error"), + }, + { + name: "CrossChainAppError", + responseOp: message.CrossChainAppResponseOp, + responseMsg: message.InternalCrossChainAppError(ids.EmptyNodeID, ids.Empty, ids.Empty, requestID, 1234, "custom error"), + timeoutMsg: message.InternalCrossChainAppError(ids.EmptyNodeID, ids.Empty, ids.Empty, requestID, 123, "error"), }, - }) - ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, - State: snow.NormalOp, // assumed bootstrapping is done - }) - - chainRouter.AddChain(context.Background(), h) - - bootstrapper.StartF = func(context.Context, uint32) error { - return nil - } - h.Start(context.Background(), false) - - nodeID := ids.GenerateTestNodeID() - requestID := uint32(0) - { - chainRouter.RegisterRequest( - context.Background(), - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - message.StateSummaryFrontierOp, - message.InternalGetStateSummaryFrontierFailed( - nodeID, - ctx.ChainID, - requestID, - ), - engineType, - ) - msg := message.InboundStateSummaryFrontier( - ctx.ChainID, - requestID, - nil, - nodeID, - ) - chainRouter.HandleInbound(context.Background(), msg) } - { - requestID++ - chainRouter.RegisterRequest( - context.Background(), - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - message.AcceptedStateSummaryOp, - message.InternalGetAcceptedStateSummaryFailed( - nodeID, - ctx.ChainID, - requestID, - ), - engineType, - ) - msg := message.InboundAcceptedStateSummary( - ctx.ChainID, - requestID, - nil, - nodeID, - ) - chainRouter.HandleInbound(context.Background(), msg) - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) - { - requestID++ - chainRouter.RegisterRequest( - context.Background(), - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - message.AcceptedFrontierOp, - message.InternalGetAcceptedFrontierFailed( - nodeID, - ctx.ChainID, - requestID, - engineType, - ), - engineType, - ) - msg := message.InboundAcceptedFrontier( - ctx.ChainID, - requestID, - ids.Empty, - nodeID, - ) - chainRouter.HandleInbound(context.Background(), msg) - } + chainRouter, _ := newChainRouterTest(t) - { - requestID++ - chainRouter.RegisterRequest( - context.Background(), - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - message.AcceptedOp, - message.InternalGetAcceptedFailed( - nodeID, - ctx.ChainID, + chainRouter.RegisterRequest( + context.Background(), + ids.EmptyNodeID, + ids.Empty, + ids.Empty, requestID, + tt.responseOp, + tt.timeoutMsg, engineType, - ), - engineType, - ) - msg := message.InboundAccepted( - ctx.ChainID, - requestID, - nil, - nodeID, - ) - chainRouter.HandleInbound(context.Background(), msg) - } + ) - { - requestID++ - chainRouter.RegisterRequest( - context.Background(), - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - message.ChitsOp, - message.InternalQueryFailed( - nodeID, - ctx.ChainID, - requestID, - engineType, - ), - engineType, - ) - msg := message.InboundChits( - ctx.ChainID, - requestID, - ids.Empty, - ids.Empty, - ids.Empty, - nodeID, - ) - chainRouter.HandleInbound(context.Background(), msg) - } - - { - requestID++ - chainRouter.RegisterRequest( - context.Background(), - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - message.AppResponseOp, - message.InternalAppRequestFailed( - nodeID, - ctx.ChainID, - requestID, - ), - engineType, - ) - msg := message.InboundAppResponse( - ctx.ChainID, - requestID, - nil, - nodeID, - ) - chainRouter.HandleInbound(context.Background(), msg) - } - - { - requestID++ - chainRouter.RegisterRequest( - context.Background(), - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - message.CrossChainAppResponseOp, - message.InternalCrossChainAppRequestFailed( - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - ), - p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, - ) - msg := message.InternalCrossChainAppResponse( - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - nil, - ) - chainRouter.HandleInbound(context.Background(), msg) + chainRouter.HandleInbound(context.Background(), tt.responseMsg) + require.Zero(chainRouter.timedRequests.Len()) + }) } - - require.Zero(chainRouter.timedRequests.Len()) } func TestValidatorOnlyMessageDrops(t *testing.T) { @@ -1156,7 +954,8 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { calledF := false wg := sync.WaitGroup{} - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) sb := subnets.New(ctx.NodeID, subnets.Config{ValidatorOnly: true}) vdrs := validators.NewManager() vID := ids.GenerateTestNodeID() @@ -1182,9 +981,6 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -1268,179 +1064,6 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { require.True(calledF) // should be called since this is a validator request } -func TestRouterCrossChainMessages(t *testing.T) { - require := require.New(t) - - tm, err := timeout.NewManager( - &timer.AdaptiveTimeoutConfig{ - InitialTimeout: 3 * time.Second, - MinimumTimeout: 3 * time.Second, - MaximumTimeout: 5 * time.Minute, - TimeoutCoefficient: 1, - TimeoutHalflife: 5 * time.Minute, - }, - benchlist.NewNoBenchlist(), - "timeoutManager", - prometheus.NewRegistry(), - ) - require.NoError(err) - - go tm.Dispatch() - defer tm.Stop() - - // Create chain router - nodeID := ids.GenerateTestNodeID() - chainRouter := ChainRouter{} - require.NoError(chainRouter.Initialize( - nodeID, - logging.NoLog{}, - tm, - time.Millisecond, - set.Set[ids.ID]{}, - true, - set.Set[ids.ID]{}, - nil, - HealthConfig{}, - "", - prometheus.NewRegistry(), - )) - defer chainRouter.Shutdown(context.Background()) - - requester := snow.DefaultConsensusContextTest() - requester.ChainID = ids.GenerateTestID() - requester.Registerer = prometheus.NewRegistry() - requester.Metrics = metrics.NewOptionalGatherer() - requester.Executing.Set(false) - - // Set up validators - vdrs := validators.NewManager() - require.NoError(vdrs.AddStaker(requester.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) - - // Create bootstrapper, engine and handler - resourceTracker, err := tracker.NewResourceTracker( - prometheus.NewRegistry(), - resource.NoUsage, - meter.ContinuousFactory{}, - time.Second, - ) - require.NoError(err) - - requesterHandler, err := handler.New( - requester, - vdrs, - nil, - time.Second, - testThreadPoolSize, - resourceTracker, - validators.UnhandledSubnetConnector, - subnets.New(requester.NodeID, subnets.Config{}), - commontracker.NewPeers(), - ) - require.NoError(err) - requesterHandler.SetEngineManager(&handler.EngineManager{ - Avalanche: &handler.Engine{ - StateSyncer: nil, - Bootstrapper: &common.BootstrapperTest{}, - Consensus: &common.EngineTest{}, - }, - Snowman: &handler.Engine{ - StateSyncer: nil, - Bootstrapper: &common.BootstrapperTest{}, - Consensus: &common.EngineTest{}, - }, - }) - - responder := snow.DefaultConsensusContextTest() - responder.ChainID = ids.GenerateTestID() - responder.Registerer = prometheus.NewRegistry() - responder.Metrics = metrics.NewOptionalGatherer() - responder.Executing.Set(false) - - responderHandler, err := handler.New( - responder, - vdrs, - nil, - time.Second, - testThreadPoolSize, - resourceTracker, - validators.UnhandledSubnetConnector, - subnets.New(responder.NodeID, subnets.Config{}), - commontracker.NewPeers(), - ) - require.NoError(err) - responderHandler.SetEngineManager(&handler.EngineManager{ - Avalanche: &handler.Engine{ - StateSyncer: nil, - Bootstrapper: &common.BootstrapperTest{}, - Consensus: &common.EngineTest{}, - }, - Snowman: &handler.Engine{ - StateSyncer: nil, - Bootstrapper: &common.BootstrapperTest{}, - Consensus: &common.EngineTest{}, - }, - }) - - // assumed bootstrapping is done - responder.State.Set(snow.EngineState{ - Type: engineType, - State: snow.NormalOp, - }) - requester.State.Set(snow.EngineState{ - Type: engineType, - State: snow.NormalOp, - }) - - // router tracks two chains - one will send a message to the other - chainRouter.AddChain(context.Background(), requesterHandler) - chainRouter.AddChain(context.Background(), responderHandler) - - // Each chain should start off with a connected message - require.Equal(1, chainRouter.chainHandlers[requester.ChainID].Len()) - require.Equal(1, chainRouter.chainHandlers[responder.ChainID].Len()) - - // Requester sends a request to the responder - msgBytes := []byte("foobar") - msg := message.InternalCrossChainAppRequest( - requester.NodeID, - requester.ChainID, - responder.ChainID, - uint32(1), - time.Minute, - msgBytes, - ) - chainRouter.HandleInbound(context.Background(), msg) - require.Equal(2, chainRouter.chainHandlers[responder.ChainID].Len()) - - // We register the cross-chain response on the requester-side so we don't - // drop it. - chainRouter.RegisterRequest( - context.Background(), - nodeID, - requester.ChainID, - responder.ChainID, - uint32(1), - message.CrossChainAppResponseOp, - message.InternalCrossChainAppRequestFailed( - nodeID, - responder.ChainID, - requester.ChainID, - uint32(1), - ), - p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, - ) - // Responder sends a response back to the requester. - msg = message.InternalCrossChainAppResponse( - nodeID, - responder.ChainID, - requester.ChainID, - uint32(1), - msgBytes, - ) - chainRouter.HandleInbound(context.Background(), msg) - require.Equal(2, chainRouter.chainHandlers[requester.ChainID].Len()) -} - func TestConnectedSubnet(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) @@ -1484,13 +1107,10 @@ func TestConnectedSubnet(t *testing.T) { )) // Create bootstrapper, engine and handler - platform := snow.DefaultConsensusContextTest() - platform.ChainID = constants.PlatformChainID - platform.SubnetID = constants.PrimaryNetworkID - platform.Registerer = prometheus.NewRegistry() - platform.Metrics = metrics.NewOptionalGatherer() - platform.Executing.Set(false) - platform.State.Set(snow.EngineState{ + snowCtx := snowtest.Context(t, snowtest.PChainID) + ctx := snowtest.ConsensusContext(snowCtx) + ctx.Executing.Set(false) + ctx.State.Set(snow.EngineState{ Type: engineType, State: snow.NormalOp, }) @@ -1509,7 +1129,7 @@ func TestConnectedSubnet(t *testing.T) { } platformHandler := handler.NewMockHandler(ctrl) - platformHandler.EXPECT().Context().Return(platform).AnyTimes() + platformHandler.EXPECT().Context().Return(ctx).AnyTimes() platformHandler.EXPECT().SetOnStopped(gomock.Any()).AnyTimes() platformHandler.EXPECT().Push(gomock.Any(), myConnectedMsg).Times(1) platformHandler.EXPECT().Push(gomock.Any(), mySubnetConnectedMsg0).Times(1) @@ -1603,7 +1223,8 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { calledF := false wg := sync.WaitGroup{} - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) allowedID := ids.GenerateTestNodeID() allowedSet := set.Of(allowedID) sb := subnets.New(ctx.NodeID, subnets.Config{ValidatorOnly: true, AllowedNodes: allowedSet}) @@ -1634,9 +1255,6 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -1731,3 +1349,275 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { wg.Wait() require.True(calledF) // should be called since this is a validator request } + +// Tests that a response, peer error, or a timeout clears the timeout and calls +// the handler +func TestAppRequest(t *testing.T) { + wantRequestID := uint32(123) + wantResponse := []byte("response") + + errFoo := common.AppError{ + Code: 456, + Message: "foo", + } + + tests := []struct { + name string + responseOp message.Op + timeoutMsg message.InboundMessage + inboundMsg message.InboundMessage + }{ + { + name: "AppRequest - chain response", + responseOp: message.AppResponseOp, + timeoutMsg: message.InboundAppError(ids.EmptyNodeID, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + inboundMsg: message.InboundAppResponse(ids.Empty, wantRequestID, wantResponse, ids.EmptyNodeID), + }, + { + name: "AppRequest - chain error", + responseOp: message.AppResponseOp, + timeoutMsg: message.InboundAppError(ids.EmptyNodeID, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + inboundMsg: message.InboundAppError(ids.EmptyNodeID, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + }, + { + name: "AppRequest - timeout", + responseOp: message.AppResponseOp, + timeoutMsg: message.InboundAppError(ids.EmptyNodeID, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + wg := &sync.WaitGroup{} + chainRouter, engine := newChainRouterTest(t) + + wg.Add(1) + if tt.inboundMsg == nil || tt.inboundMsg.Op() == message.AppErrorOp { + engine.AppRequestFailedF = func(_ context.Context, nodeID ids.NodeID, requestID uint32, appErr *common.AppError) error { + defer wg.Done() + require.Zero(chainRouter.timedRequests.Len()) + + require.Equal(ids.EmptyNodeID, nodeID) + require.Equal(wantRequestID, requestID) + require.Equal(errFoo.Code, appErr.Code) + require.Equal(errFoo.Message, appErr.Message) + + return nil + } + } else if tt.inboundMsg.Op() == message.AppResponseOp { + engine.AppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, msg []byte) error { + defer wg.Done() + require.Zero(chainRouter.timedRequests.Len()) + + require.Equal(ids.EmptyNodeID, nodeID) + require.Equal(wantRequestID, requestID) + require.Equal(wantResponse, msg) + + return nil + } + } + + ctx := context.Background() + chainRouter.RegisterRequest(ctx, ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, tt.responseOp, tt.timeoutMsg, engineType) + require.Equal(1, chainRouter.timedRequests.Len()) + + if tt.inboundMsg != nil { + chainRouter.HandleInbound(ctx, tt.inboundMsg) + } + + wg.Wait() + }) + } +} + +// Tests that a response, peer error, or a timeout clears the timeout and calls +// the handler +func TestCrossChainAppRequest(t *testing.T) { + wantRequestID := uint32(123) + wantResponse := []byte("response") + + errFoo := common.AppError{ + Code: 456, + Message: "foo", + } + + tests := []struct { + name string + responseOp message.Op + timeoutMsg message.InboundMessage + inboundMsg message.InboundMessage + }{ + { + name: "CrossChainAppRequest - chain response", + responseOp: message.CrossChainAppResponseOp, + timeoutMsg: message.InternalCrossChainAppError(ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + inboundMsg: message.InternalCrossChainAppResponse(ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, wantResponse), + }, + { + name: "CrossChainAppRequest - chain error", + responseOp: message.CrossChainAppResponseOp, + timeoutMsg: message.InternalCrossChainAppError(ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + inboundMsg: message.InternalCrossChainAppError(ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + }, + { + name: "CrossChainAppRequest - timeout", + responseOp: message.CrossChainAppResponseOp, + timeoutMsg: message.InternalCrossChainAppError(ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + wg := &sync.WaitGroup{} + chainRouter, engine := newChainRouterTest(t) + + wg.Add(1) + if tt.inboundMsg == nil || tt.inboundMsg.Op() == message.CrossChainAppErrorOp { + engine.CrossChainAppRequestFailedF = func(_ context.Context, chainID ids.ID, requestID uint32, appErr *common.AppError) error { + defer wg.Done() + require.Zero(chainRouter.timedRequests.Len()) + + require.Equal(ids.Empty, chainID) + require.Equal(wantRequestID, requestID) + require.Equal(errFoo.Code, appErr.Code) + require.Equal(errFoo.Message, appErr.Message) + + return nil + } + } else if tt.inboundMsg.Op() == message.CrossChainAppResponseOp { + engine.CrossChainAppResponseF = func(ctx context.Context, chainID ids.ID, requestID uint32, msg []byte) error { + defer wg.Done() + require.Zero(chainRouter.timedRequests.Len()) + + require.Equal(ids.Empty, chainID) + require.Equal(wantRequestID, requestID) + require.Equal(wantResponse, msg) + + return nil + } + } + + ctx := context.Background() + chainRouter.RegisterRequest(ctx, ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, tt.responseOp, tt.timeoutMsg, engineType) + require.Equal(1, chainRouter.timedRequests.Len()) + + if tt.inboundMsg != nil { + chainRouter.HandleInbound(ctx, tt.inboundMsg) + } + + wg.Wait() + }) + } +} + +func newChainRouterTest(t *testing.T) (*ChainRouter, *common.EngineTest) { + // Create a timeout manager + tm, err := timeout.NewManager( + &timer.AdaptiveTimeoutConfig{ + InitialTimeout: 3 * time.Second, + MinimumTimeout: 3 * time.Second, + MaximumTimeout: 5 * time.Minute, + TimeoutCoefficient: 1, + TimeoutHalflife: 5 * time.Minute, + }, + benchlist.NewNoBenchlist(), + "", + prometheus.NewRegistry(), + ) + require.NoError(t, err) + + go tm.Dispatch() + + // Create a router + chainRouter := &ChainRouter{} + require.NoError(t, chainRouter.Initialize( + ids.EmptyNodeID, + logging.NoLog{}, + tm, + time.Millisecond, + set.Set[ids.ID]{}, + true, + set.Set[ids.ID]{}, + nil, + HealthConfig{}, + "", + prometheus.NewRegistry(), + )) + + // Create bootstrapper, engine and handler + snowCtx := snowtest.Context(t, snowtest.PChainID) + ctx := snowtest.ConsensusContext(snowCtx) + vdrs := validators.NewManager() + require.NoError(t, vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) + require.NoError(t, err) + h, err := handler.New( + ctx, + vdrs, + nil, + time.Second, + testThreadPoolSize, + resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(ctx.NodeID, subnets.Config{}), + commontracker.NewPeers(), + ) + require.NoError(t, err) + + bootstrapper := &common.BootstrapperTest{ + EngineTest: common.EngineTest{ + T: t, + }, + } + bootstrapper.Default(false) + bootstrapper.ContextF = func() *snow.ConsensusContext { + return ctx + } + + engine := &common.EngineTest{T: t} + engine.Default(false) + engine.ContextF = func() *snow.ConsensusContext { + return ctx + } + h.SetEngineManager(&handler.EngineManager{ + Avalanche: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: engine, + }, + Snowman: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: engine, + }, + }) + ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, // assumed bootstrapping is done + }) + + chainRouter.AddChain(context.Background(), h) + + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } + + h.Start(context.Background(), false) + + t.Cleanup(func() { + tm.Stop() + chainRouter.Shutdown(context.Background()) + }) + + return chainRouter, engine +} diff --git a/snow/networking/router/health.go b/snow/networking/router/health.go index d678f0f19aa1..3968f981d084 100644 --- a/snow/networking/router/health.go +++ b/snow/networking/router/health.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router diff --git a/snow/networking/router/inbound_handler.go b/snow/networking/router/inbound_handler.go index cfd6d5fa222f..81d2d9b810be 100644 --- a/snow/networking/router/inbound_handler.go +++ b/snow/networking/router/inbound_handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router diff --git a/snow/networking/router/main_test.go b/snow/networking/router/main_test.go index afc1dddb173e..4398ad2eefeb 100644 --- a/snow/networking/router/main_test.go +++ b/snow/networking/router/main_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router diff --git a/snow/networking/router/mock_router.go b/snow/networking/router/mock_router.go index e644edd2d6b2..c9146a777138 100644 --- a/snow/networking/router/mock_router.go +++ b/snow/networking/router/mock_router.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/snow/networking/router (interfaces: Router) +// Source: snow/networking/router/router.go +// +// Generated by this command: +// +// mockgen -source=snow/networking/router/router.go -destination=snow/networking/router/mock_router.go -package=router -exclude_interfaces=InternalHandler +// // Package router is a generated GoMock package. package router @@ -20,8 +22,8 @@ import ( logging "github.com/ava-labs/avalanchego/utils/logging" set "github.com/ava-labs/avalanchego/utils/set" version "github.com/ava-labs/avalanchego/version" - gomock "go.uber.org/mock/gomock" prometheus "github.com/prometheus/client_golang/prometheus" + gomock "go.uber.org/mock/gomock" ) // MockRouter is a mock of Router interface. @@ -48,51 +50,51 @@ func (m *MockRouter) EXPECT() *MockRouterMockRecorder { } // AddChain mocks base method. -func (m *MockRouter) AddChain(arg0 context.Context, arg1 handler.Handler) { +func (m *MockRouter) AddChain(ctx context.Context, chain handler.Handler) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddChain", arg0, arg1) + m.ctrl.Call(m, "AddChain", ctx, chain) } // AddChain indicates an expected call of AddChain. -func (mr *MockRouterMockRecorder) AddChain(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) AddChain(ctx, chain any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockRouter)(nil).AddChain), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockRouter)(nil).AddChain), ctx, chain) } // Benched mocks base method. -func (m *MockRouter) Benched(arg0 ids.ID, arg1 ids.NodeID) { +func (m *MockRouter) Benched(chainID ids.ID, validatorID ids.NodeID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Benched", arg0, arg1) + m.ctrl.Call(m, "Benched", chainID, validatorID) } // Benched indicates an expected call of Benched. -func (mr *MockRouterMockRecorder) Benched(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) Benched(chainID, validatorID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Benched", reflect.TypeOf((*MockRouter)(nil).Benched), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Benched", reflect.TypeOf((*MockRouter)(nil).Benched), chainID, validatorID) } // Connected mocks base method. -func (m *MockRouter) Connected(arg0 ids.NodeID, arg1 *version.Application, arg2 ids.ID) { +func (m *MockRouter) Connected(nodeID ids.NodeID, nodeVersion *version.Application, subnetID ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Connected", arg0, arg1, arg2) + m.ctrl.Call(m, "Connected", nodeID, nodeVersion, subnetID) } // Connected indicates an expected call of Connected. -func (mr *MockRouterMockRecorder) Connected(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) Connected(nodeID, nodeVersion, subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connected", reflect.TypeOf((*MockRouter)(nil).Connected), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connected", reflect.TypeOf((*MockRouter)(nil).Connected), nodeID, nodeVersion, subnetID) } // Disconnected mocks base method. -func (m *MockRouter) Disconnected(arg0 ids.NodeID) { +func (m *MockRouter) Disconnected(nodeID ids.NodeID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Disconnected", arg0) + m.ctrl.Call(m, "Disconnected", nodeID) } // Disconnected indicates an expected call of Disconnected. -func (mr *MockRouterMockRecorder) Disconnected(arg0 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) Disconnected(nodeID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnected", reflect.TypeOf((*MockRouter)(nil).Disconnected), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnected", reflect.TypeOf((*MockRouter)(nil).Disconnected), nodeID) } // HandleInbound mocks base method. @@ -102,50 +104,50 @@ func (m *MockRouter) HandleInbound(arg0 context.Context, arg1 message.InboundMes } // HandleInbound indicates an expected call of HandleInbound. -func (mr *MockRouterMockRecorder) HandleInbound(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) HandleInbound(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleInbound", reflect.TypeOf((*MockRouter)(nil).HandleInbound), arg0, arg1) } // HealthCheck mocks base method. -func (m *MockRouter) HealthCheck(arg0 context.Context) (interface{}, error) { +func (m *MockRouter) HealthCheck(arg0 context.Context) (any, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "HealthCheck", arg0) - ret0, _ := ret[0].(interface{}) + ret0, _ := ret[0].(any) ret1, _ := ret[1].(error) return ret0, ret1 } // HealthCheck indicates an expected call of HealthCheck. -func (mr *MockRouterMockRecorder) HealthCheck(arg0 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) HealthCheck(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockRouter)(nil).HealthCheck), arg0) } // Initialize mocks base method. -func (m *MockRouter) Initialize(arg0 ids.NodeID, arg1 logging.Logger, arg2 timeout.Manager, arg3 time.Duration, arg4 set.Set[ids.ID], arg5 bool, arg6 set.Set[ids.ID], arg7 func(int), arg8 HealthConfig, arg9 string, arg10 prometheus.Registerer) error { +func (m *MockRouter) Initialize(nodeID ids.NodeID, log logging.Logger, timeouts timeout.Manager, shutdownTimeout time.Duration, criticalChains set.Set[ids.ID], sybilProtectionEnabled bool, trackedSubnets set.Set[ids.ID], onFatal func(int), healthConfig HealthConfig, metricsNamespace string, metricsRegisterer prometheus.Registerer) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Initialize", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) + ret := m.ctrl.Call(m, "Initialize", nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, metricsNamespace, metricsRegisterer) ret0, _ := ret[0].(error) return ret0 } // Initialize indicates an expected call of Initialize. -func (mr *MockRouterMockRecorder) Initialize(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) Initialize(nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, metricsNamespace, metricsRegisterer any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockRouter)(nil).Initialize), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockRouter)(nil).Initialize), nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, metricsNamespace, metricsRegisterer) } // RegisterRequest mocks base method. -func (m *MockRouter) RegisterRequest(arg0 context.Context, arg1 ids.NodeID, arg2, arg3 ids.ID, arg4 uint32, arg5 message.Op, arg6 message.InboundMessage, arg7 p2p.EngineType) { +func (m *MockRouter) RegisterRequest(ctx context.Context, nodeID ids.NodeID, sourceChainID, destinationChainID ids.ID, requestID uint32, op message.Op, failedMsg message.InboundMessage, engineType p2p.EngineType) { m.ctrl.T.Helper() - m.ctrl.Call(m, "RegisterRequest", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + m.ctrl.Call(m, "RegisterRequest", ctx, nodeID, sourceChainID, destinationChainID, requestID, op, failedMsg, engineType) } // RegisterRequest indicates an expected call of RegisterRequest. -func (mr *MockRouterMockRecorder) RegisterRequest(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) RegisterRequest(ctx, nodeID, sourceChainID, destinationChainID, requestID, op, failedMsg, engineType any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRequest", reflect.TypeOf((*MockRouter)(nil).RegisterRequest), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRequest", reflect.TypeOf((*MockRouter)(nil).RegisterRequest), ctx, nodeID, sourceChainID, destinationChainID, requestID, op, failedMsg, engineType) } // Shutdown mocks base method. @@ -155,19 +157,19 @@ func (m *MockRouter) Shutdown(arg0 context.Context) { } // Shutdown indicates an expected call of Shutdown. -func (mr *MockRouterMockRecorder) Shutdown(arg0 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) Shutdown(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockRouter)(nil).Shutdown), arg0) } // Unbenched mocks base method. -func (m *MockRouter) Unbenched(arg0 ids.ID, arg1 ids.NodeID) { +func (m *MockRouter) Unbenched(chainID ids.ID, validatorID ids.NodeID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Unbenched", arg0, arg1) + m.ctrl.Call(m, "Unbenched", chainID, validatorID) } // Unbenched indicates an expected call of Unbenched. -func (mr *MockRouterMockRecorder) Unbenched(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) Unbenched(chainID, validatorID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unbenched", reflect.TypeOf((*MockRouter)(nil).Unbenched), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unbenched", reflect.TypeOf((*MockRouter)(nil).Unbenched), chainID, validatorID) } diff --git a/snow/networking/router/router.go b/snow/networking/router/router.go index bba00eb7ae06..4df5614c25fb 100644 --- a/snow/networking/router/router.go +++ b/snow/networking/router/router.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router diff --git a/snow/networking/router/traced_router.go b/snow/networking/router/traced_router.go index fe493e6717a8..955ccb43bbed 100644 --- a/snow/networking/router/traced_router.go +++ b/snow/networking/router/traced_router.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router diff --git a/snow/networking/sender/external_sender.go b/snow/networking/sender/external_sender.go index 72d9539d41e5..7d279889e3af 100644 --- a/snow/networking/sender/external_sender.go +++ b/snow/networking/sender/external_sender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sender diff --git a/snow/networking/sender/mock_external_sender.go b/snow/networking/sender/mock_external_sender.go index d3d4717b2959..9dc0a50d1af9 100644 --- a/snow/networking/sender/mock_external_sender.go +++ b/snow/networking/sender/mock_external_sender.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/snow/networking/sender (interfaces: ExternalSender) +// Source: snow/networking/sender/external_sender.go +// +// Generated by this command: +// +// mockgen -source=snow/networking/sender/external_sender.go -destination=snow/networking/sender/mock_external_sender.go -package=sender -exclude_interfaces= +// // Package sender is a generated GoMock package. package sender @@ -41,29 +43,29 @@ func (m *MockExternalSender) EXPECT() *MockExternalSenderMockRecorder { } // Gossip mocks base method. -func (m *MockExternalSender) Gossip(arg0 message.OutboundMessage, arg1 ids.ID, arg2, arg3, arg4 int, arg5 subnets.Allower) set.Set[ids.NodeID] { +func (m *MockExternalSender) Gossip(msg message.OutboundMessage, subnetID ids.ID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend int, allower subnets.Allower) set.Set[ids.NodeID] { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Gossip", arg0, arg1, arg2, arg3, arg4, arg5) + ret := m.ctrl.Call(m, "Gossip", msg, subnetID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend, allower) ret0, _ := ret[0].(set.Set[ids.NodeID]) return ret0 } // Gossip indicates an expected call of Gossip. -func (mr *MockExternalSenderMockRecorder) Gossip(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { +func (mr *MockExternalSenderMockRecorder) Gossip(msg, subnetID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend, allower any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Gossip", reflect.TypeOf((*MockExternalSender)(nil).Gossip), arg0, arg1, arg2, arg3, arg4, arg5) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Gossip", reflect.TypeOf((*MockExternalSender)(nil).Gossip), msg, subnetID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend, allower) } // Send mocks base method. -func (m *MockExternalSender) Send(arg0 message.OutboundMessage, arg1 set.Set[ids.NodeID], arg2 ids.ID, arg3 subnets.Allower) set.Set[ids.NodeID] { +func (m *MockExternalSender) Send(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], subnetID ids.ID, allower subnets.Allower) set.Set[ids.NodeID] { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "Send", msg, nodeIDs, subnetID, allower) ret0, _ := ret[0].(set.Set[ids.NodeID]) return ret0 } // Send indicates an expected call of Send. -func (mr *MockExternalSenderMockRecorder) Send(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockExternalSenderMockRecorder) Send(msg, nodeIDs, subnetID, allower any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockExternalSender)(nil).Send), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockExternalSender)(nil).Send), msg, nodeIDs, subnetID, allower) } diff --git a/snow/networking/sender/sender.go b/snow/networking/sender/sender.go index b30e267a19bf..08a05305029a 100644 --- a/snow/networking/sender/sender.go +++ b/snow/networking/sender/sender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sender @@ -1210,11 +1210,13 @@ func (s *sender) SendCrossChainAppRequest(ctx context.Context, chainID ids.ID, r ctx = utils.Detach(ctx) // The failed message is treated as if it was sent by the requested chain - failedMsg := message.InternalCrossChainAppRequestFailed( + failedMsg := message.InternalCrossChainAppError( s.ctx.NodeID, chainID, s.ctx.ChainID, requestID, + common.ErrTimeout.Code, + common.ErrTimeout.Message, ) s.router.RegisterRequest( ctx, @@ -1262,10 +1264,12 @@ func (s *sender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID] // to send them a message, to avoid busy looping when disconnected from // the internet. for nodeID := range nodeIDs { - inMsg := message.InternalAppRequestFailed( + inMsg := message.InboundAppError( nodeID, s.ctx.ChainID, requestID, + common.ErrTimeout.Code, + common.ErrTimeout.Message, ) s.router.RegisterRequest( ctx, @@ -1308,10 +1312,12 @@ func (s *sender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID] // Immediately register a failure. Do so asynchronously to avoid // deadlock. - inMsg := message.InternalAppRequestFailed( + inMsg := message.InboundAppError( nodeID, s.ctx.ChainID, requestID, + common.ErrTimeout.Code, + common.ErrTimeout.Message, ) go s.router.HandleInbound(ctx, inMsg) } @@ -1366,10 +1372,12 @@ func (s *sender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID] // Register failures for nodes we didn't send a request to. s.timeouts.RegisterRequestToUnreachableValidator() - inMsg := message.InternalAppRequestFailed( + inMsg := message.InboundAppError( nodeID, s.ctx.ChainID, requestID, + common.ErrTimeout.Code, + common.ErrTimeout.Message, ) go s.router.HandleInbound(ctx, inMsg) } diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 6355834dcf78..5ad019731857 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sender @@ -26,6 +26,7 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/constants" @@ -53,7 +54,8 @@ var defaultSubnetConfig = subnets.Config{ func TestTimeout(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() @@ -112,7 +114,7 @@ func TestTimeout(t *testing.T) { ) require.NoError(err) - ctx2 := snow.DefaultConsensusContextTest() + ctx2 := snowtest.ConsensusContext(snowCtx) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), resource.NoUsage, @@ -134,9 +136,6 @@ func TestTimeout(t *testing.T) { require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -204,8 +203,18 @@ func TestTimeout(t *testing.T) { bootstrapper.GetAncestorsFailedF = failed bootstrapper.GetFailedF = failed bootstrapper.QueryFailedF = failed - bootstrapper.AppRequestFailedF = failed - bootstrapper.CrossChainAppRequestFailedF = func(ctx context.Context, chainID ids.ID, _ uint32) error { + bootstrapper.AppRequestFailedF = func(ctx context.Context, nodeID ids.NodeID, _ uint32, appErr *common.AppError) error { + require.NoError(ctx.Err()) + + failedLock.Lock() + defer failedLock.Unlock() + + failedVDRs.Add(nodeID) + wg.Done() + return nil + } + + bootstrapper.CrossChainAppRequestFailedF = func(ctx context.Context, chainID ids.ID, _ uint32, _ *common.AppError) error { require.NoError(ctx.Err()) failedLock.Lock() @@ -310,9 +319,10 @@ func TestTimeout(t *testing.T) { func TestReliableMessages(t *testing.T) { require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() - require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.NodeID{1}, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.BuildTestNodeID([]byte{1}), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -370,7 +380,7 @@ func TestReliableMessages(t *testing.T) { ) require.NoError(err) - ctx2 := snow.DefaultConsensusContextTest() + ctx2 := snowtest.ConsensusContext(snowCtx) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), resource.NoUsage, @@ -392,9 +402,6 @@ func TestReliableMessages(t *testing.T) { require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -443,7 +450,7 @@ func TestReliableMessages(t *testing.T) { go func() { for i := 0; i < queriesToSend; i++ { - vdrIDs := set.Of(ids.NodeID{1}) + vdrIDs := set.Of(ids.BuildTestNodeID([]byte{1})) sender.SendPullQuery(context.Background(), vdrIDs, uint32(i), ids.Empty, 0) time.Sleep(time.Duration(rand.Float64() * float64(time.Microsecond))) // #nosec G404 @@ -459,7 +466,8 @@ func TestReliableMessagesToMyself(t *testing.T) { require := require.New(t) benchlist := benchlist.NewNoBenchlist() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) tm, err := timeout.NewManager( @@ -518,7 +526,7 @@ func TestReliableMessagesToMyself(t *testing.T) { ) require.NoError(err) - ctx2 := snow.DefaultConsensusContextTest() + ctx2 := snowtest.ConsensusContext(snowCtx) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), resource.NoUsage, @@ -540,9 +548,6 @@ func TestReliableMessagesToMyself(t *testing.T) { require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -605,26 +610,16 @@ func TestReliableMessagesToMyself(t *testing.T) { func TestSender_Bootstrap_Requests(t *testing.T) { var ( - chainID = ids.GenerateTestID() - subnetID = ids.GenerateTestID() - myNodeID = ids.GenerateTestNodeID() successNodeID = ids.GenerateTestNodeID() failedNodeID = ids.GenerateTestNodeID() deadline = time.Second requestID = uint32(1337) - ctx = snow.DefaultContextTest() heights = []uint64{1, 2, 3} containerIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} engineType = p2p.EngineType_ENGINE_TYPE_SNOWMAN ) - ctx.ChainID = chainID - ctx.SubnetID = subnetID - ctx.NodeID = myNodeID - snowCtx := &snow.ConsensusContext{ - Context: ctx, - Registerer: prometheus.NewRegistry(), - AvalancheRegisterer: prometheus.NewRegistry(), - } + snowCtx := snowtest.Context(t, snowtest.PChainID) + ctx := snowtest.ConsensusContext(snowCtx) type test struct { name string @@ -643,21 +638,21 @@ func TestSender_Bootstrap_Requests(t *testing.T) { failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { return message.InternalGetStateSummaryFrontierFailed( nodeID, - chainID, + ctx.ChainID, requestID, ) }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { require.IsType(&p2p.GetStateSummaryFrontier{}, msg.Message()) innerMsg := msg.Message().(*p2p.GetStateSummaryFrontier) - require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(uint64(deadline), innerMsg.Deadline) }, expectedResponseOp: message.StateSummaryFrontierOp, setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().GetStateSummaryFrontier( - chainID, + ctx.ChainID, requestID, deadline, ).Return(nil, nil) @@ -667,7 +662,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { gomock.Any(), // Outbound message // Note [myNodeID] is not in this set set.Of(successNodeID, failedNodeID), - subnetID, // Subnet ID + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(set.Of(successNodeID)) }, @@ -684,14 +679,14 @@ func TestSender_Bootstrap_Requests(t *testing.T) { failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { return message.InternalGetAcceptedStateSummaryFailed( nodeID, - chainID, + ctx.ChainID, requestID, ) }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { require.IsType(&p2p.GetAcceptedStateSummary{}, msg.Message()) innerMsg := msg.Message().(*p2p.GetAcceptedStateSummary) - require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(uint64(deadline), innerMsg.Deadline) require.Equal(heights, innerMsg.Heights) @@ -699,7 +694,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { expectedResponseOp: message.AcceptedStateSummaryOp, setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().GetAcceptedStateSummary( - chainID, + ctx.ChainID, requestID, deadline, heights, @@ -710,7 +705,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { gomock.Any(), // Outbound message // Note [myNodeID] is not in this set set.Of(successNodeID, failedNodeID), - subnetID, // Subnet ID + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(set.Of(successNodeID)) }, @@ -723,7 +718,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { return message.InternalGetAcceptedFrontierFailed( nodeID, - chainID, + ctx.ChainID, requestID, engineType, ) @@ -731,7 +726,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { require.IsType(&p2p.GetAcceptedFrontier{}, msg.Message()) innerMsg := msg.Message().(*p2p.GetAcceptedFrontier) - require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(uint64(deadline), innerMsg.Deadline) require.Equal(engineType, innerMsg.EngineType) @@ -739,7 +734,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { expectedResponseOp: message.AcceptedFrontierOp, setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().GetAcceptedFrontier( - chainID, + ctx.ChainID, requestID, deadline, engineType, @@ -750,7 +745,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { gomock.Any(), // Outbound message // Note [myNodeID] is not in this set set.Of(successNodeID, failedNodeID), - subnetID, // Subnet ID + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(set.Of(successNodeID)) }, @@ -764,7 +759,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { return message.InternalGetAcceptedFailed( nodeID, - chainID, + ctx.ChainID, requestID, engineType, ) @@ -772,7 +767,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { require.IsType(&p2p.GetAccepted{}, msg.Message()) innerMsg := msg.Message().(*p2p.GetAccepted) - require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(uint64(deadline), innerMsg.Deadline) require.Equal(engineType, innerMsg.EngineType) @@ -780,7 +775,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { expectedResponseOp: message.AcceptedOp, setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().GetAccepted( - chainID, + ctx.ChainID, requestID, deadline, containerIDs, @@ -792,7 +787,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { gomock.Any(), // Outbound message // Note [myNodeID] is not in this set set.Of(successNodeID, failedNodeID), - subnetID, // Subnet ID + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(set.Of(successNodeID)) }, @@ -813,14 +808,17 @@ func TestSender_Bootstrap_Requests(t *testing.T) { externalSender = NewMockExternalSender(ctrl) timeoutManager = timeout.NewMockManager(ctrl) router = router.NewMockRouter(ctrl) - nodeIDs = set.Of(successNodeID, failedNodeID, myNodeID) + nodeIDs = set.Of(successNodeID, failedNodeID, ctx.NodeID) nodeIDsCopy set.Set[ids.NodeID] ) nodeIDsCopy.Union(nodeIDs) - snowCtx.Registerer = prometheus.NewRegistry() + + // Instantiate new registerers to avoid duplicate metrics + // registration + ctx.Registerer = prometheus.NewRegistry() sender, err := New( - snowCtx, + ctx, msgCreator, externalSender, router, @@ -839,8 +837,8 @@ func TestSender_Bootstrap_Requests(t *testing.T) { router.EXPECT().RegisterRequest( gomock.Any(), // Context nodeID, // Node ID - chainID, // Source Chain - chainID, // Destination Chain + ctx.ChainID, // Source Chain + ctx.ChainID, // Destination Chain requestID, // Request ID tt.expectedResponseOp, // Operation expectedFailedMsg, // Failure Message @@ -877,25 +875,15 @@ func TestSender_Bootstrap_Requests(t *testing.T) { func TestSender_Bootstrap_Responses(t *testing.T) { var ( - chainID = ids.GenerateTestID() - subnetID = ids.GenerateTestID() - myNodeID = ids.GenerateTestNodeID() destinationNodeID = ids.GenerateTestNodeID() deadline = time.Second requestID = uint32(1337) - ctx = snow.DefaultContextTest() summaryIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} summary = []byte{1, 2, 3} engineType = p2p.EngineType_ENGINE_TYPE_AVALANCHE ) - ctx.ChainID = chainID - ctx.SubnetID = subnetID - ctx.NodeID = myNodeID - snowCtx := &snow.ConsensusContext{ - Context: ctx, - Registerer: prometheus.NewRegistry(), - AvalancheRegisterer: prometheus.NewRegistry(), - } + snowCtx := snowtest.Context(t, snowtest.PChainID) + ctx := snowtest.ConsensusContext(snowCtx) type test struct { name string @@ -910,7 +898,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { name: "StateSummaryFrontier", setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().StateSummaryFrontier( - chainID, + ctx.ChainID, requestID, summary, ).Return(nil, nil) // Don't care about the message @@ -918,7 +906,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { require.IsType(&p2p.StateSummaryFrontier{}, msg.Message()) innerMsg := msg.Message().(*p2p.StateSummaryFrontier) - require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(summary, innerMsg.Summary) }, @@ -926,7 +914,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { externalSender.EXPECT().Send( gomock.Any(), // Outbound message set.Of(destinationNodeID), // Node IDs - subnetID, // Subnet ID + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(nil) }, @@ -938,7 +926,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { name: "AcceptedStateSummary", setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().AcceptedStateSummary( - chainID, + ctx.ChainID, requestID, summaryIDs, ).Return(nil, nil) // Don't care about the message @@ -946,7 +934,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { require.IsType(&p2p.AcceptedStateSummary{}, msg.Message()) innerMsg := msg.Message().(*p2p.AcceptedStateSummary) - require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) for i, summaryID := range summaryIDs { require.Equal(summaryID[:], innerMsg.SummaryIds[i]) @@ -956,7 +944,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { externalSender.EXPECT().Send( gomock.Any(), // Outbound message set.Of(destinationNodeID), // Node IDs - subnetID, // Subnet ID + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(nil) }, @@ -968,7 +956,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { name: "AcceptedFrontier", setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().AcceptedFrontier( - chainID, + ctx.ChainID, requestID, summaryIDs[0], ).Return(nil, nil) // Don't care about the message @@ -976,7 +964,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { require.IsType(&p2p.AcceptedFrontier{}, msg.Message()) innerMsg := msg.Message().(*p2p.AcceptedFrontier) - require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(summaryIDs[0][:], innerMsg.ContainerId) }, @@ -984,7 +972,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { externalSender.EXPECT().Send( gomock.Any(), // Outbound message set.Of(destinationNodeID), // Node IDs - subnetID, // Subnet ID + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(nil) }, @@ -996,7 +984,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { name: "Accepted", setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().Accepted( - chainID, + ctx.ChainID, requestID, summaryIDs, ).Return(nil, nil) // Don't care about the message @@ -1004,7 +992,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { require.IsType(&p2p.Accepted{}, msg.Message()) innerMsg := msg.Message().(*p2p.Accepted) - require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) for i, summaryID := range summaryIDs { require.Equal(summaryID[:], innerMsg.ContainerIds[i]) @@ -1014,7 +1002,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { externalSender.EXPECT().Send( gomock.Any(), // Outbound message set.Of(destinationNodeID), // Node IDs - subnetID, // Subnet ID + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(nil) }, @@ -1038,11 +1026,11 @@ func TestSender_Bootstrap_Responses(t *testing.T) { // Instantiate new registerers to avoid duplicate metrics // registration - snowCtx.Registerer = prometheus.NewRegistry() - snowCtx.AvalancheRegisterer = prometheus.NewRegistry() + ctx.Registerer = prometheus.NewRegistry() + ctx.AvalancheRegisterer = prometheus.NewRegistry() sender, err := New( - snowCtx, + ctx, msgCreator, externalSender, router, @@ -1066,7 +1054,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { close(calledHandleInbound) }, ) - tt.sendF(require, sender, myNodeID) + tt.sendF(require, sender, ctx.NodeID) <-calledHandleInbound } @@ -1085,24 +1073,14 @@ func TestSender_Bootstrap_Responses(t *testing.T) { func TestSender_Single_Request(t *testing.T) { var ( - chainID = ids.GenerateTestID() - subnetID = ids.GenerateTestID() - myNodeID = ids.GenerateTestNodeID() destinationNodeID = ids.GenerateTestNodeID() deadline = time.Second requestID = uint32(1337) - ctx = snow.DefaultContextTest() containerID = ids.GenerateTestID() engineType = p2p.EngineType_ENGINE_TYPE_SNOWMAN ) - ctx.ChainID = chainID - ctx.SubnetID = subnetID - ctx.NodeID = myNodeID - snowCtx := &snow.ConsensusContext{ - Context: ctx, - Registerer: prometheus.NewRegistry(), - AvalancheRegisterer: prometheus.NewRegistry(), - } + snowCtx := snowtest.Context(t, snowtest.PChainID) + ctx := snowtest.ConsensusContext(snowCtx) type test struct { name string @@ -1120,7 +1098,7 @@ func TestSender_Single_Request(t *testing.T) { failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { return message.InternalGetAncestorsFailed( nodeID, - chainID, + ctx.ChainID, requestID, engineType, ) @@ -1128,14 +1106,14 @@ func TestSender_Single_Request(t *testing.T) { assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { require.IsType(&message.GetAncestorsFailed{}, msg.Message()) innerMsg := msg.Message().(*message.GetAncestorsFailed) - require.Equal(chainID, innerMsg.ChainID) + require.Equal(ctx.ChainID, innerMsg.ChainID) require.Equal(requestID, innerMsg.RequestID) require.Equal(engineType, innerMsg.EngineType) }, expectedResponseOp: message.AncestorsOp, setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().GetAncestors( - chainID, + ctx.ChainID, requestID, deadline, containerID, @@ -1146,7 +1124,7 @@ func TestSender_Single_Request(t *testing.T) { externalSender.EXPECT().Send( gomock.Any(), // Outbound message set.Of(destinationNodeID), // Node IDs - subnetID, + ctx.SubnetID, gomock.Any(), ).Return(sentTo) }, @@ -1159,7 +1137,7 @@ func TestSender_Single_Request(t *testing.T) { failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { return message.InternalGetFailed( nodeID, - chainID, + ctx.ChainID, requestID, engineType, ) @@ -1167,14 +1145,14 @@ func TestSender_Single_Request(t *testing.T) { assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { require.IsType(&message.GetFailed{}, msg.Message()) innerMsg := msg.Message().(*message.GetFailed) - require.Equal(chainID, innerMsg.ChainID) + require.Equal(ctx.ChainID, innerMsg.ChainID) require.Equal(requestID, innerMsg.RequestID) require.Equal(engineType, innerMsg.EngineType) }, expectedResponseOp: message.PutOp, setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().Get( - chainID, + ctx.ChainID, requestID, deadline, containerID, @@ -1185,7 +1163,7 @@ func TestSender_Single_Request(t *testing.T) { externalSender.EXPECT().Send( gomock.Any(), // Outbound message set.Of(destinationNodeID), // Node IDs - subnetID, + ctx.SubnetID, gomock.Any(), ).Return(sentTo) }, @@ -1206,10 +1184,13 @@ func TestSender_Single_Request(t *testing.T) { timeoutManager = timeout.NewMockManager(ctrl) router = router.NewMockRouter(ctrl) ) - snowCtx.Registerer = prometheus.NewRegistry() + + // Instantiate new registerers to avoid duplicate metrics + // registration + ctx.Registerer = prometheus.NewRegistry() sender, err := New( - snowCtx, + ctx, msgCreator, externalSender, router, @@ -1225,12 +1206,12 @@ func TestSender_Single_Request(t *testing.T) { // Case: sending to myself { // Make sure we register requests with the router - expectedFailedMsg := tt.failedMsgF(myNodeID) + expectedFailedMsg := tt.failedMsgF(ctx.NodeID) router.EXPECT().RegisterRequest( gomock.Any(), // Context - myNodeID, // Node ID - chainID, // Source Chain - chainID, // Destination Chain + ctx.NodeID, // Node ID + ctx.ChainID, // Source Chain + ctx.ChainID, // Destination Chain requestID, // Request ID tt.expectedResponseOp, // Operation expectedFailedMsg, // Failure Message @@ -1249,14 +1230,14 @@ func TestSender_Single_Request(t *testing.T) { }, ) - tt.sendF(require, sender, myNodeID) + tt.sendF(require, sender, ctx.NodeID) <-calledHandleInbound } // Case: Node is benched { - timeoutManager.EXPECT().IsBenched(destinationNodeID, chainID).Return(true) + timeoutManager.EXPECT().IsBenched(destinationNodeID, ctx.ChainID).Return(true) timeoutManager.EXPECT().RegisterRequestToUnreachableValidator() @@ -1265,8 +1246,8 @@ func TestSender_Single_Request(t *testing.T) { router.EXPECT().RegisterRequest( gomock.Any(), // Context destinationNodeID, // Node ID - chainID, // Source Chain - chainID, // Destination Chain + ctx.ChainID, // Source Chain + ctx.ChainID, // Destination Chain requestID, // Request ID tt.expectedResponseOp, // Operation expectedFailedMsg, // Failure Message @@ -1292,7 +1273,7 @@ func TestSender_Single_Request(t *testing.T) { // Case: Node is not myself, not benched and send fails { - timeoutManager.EXPECT().IsBenched(destinationNodeID, chainID).Return(false) + timeoutManager.EXPECT().IsBenched(destinationNodeID, ctx.ChainID).Return(false) timeoutManager.EXPECT().RegisterRequestToUnreachableValidator() @@ -1301,8 +1282,8 @@ func TestSender_Single_Request(t *testing.T) { router.EXPECT().RegisterRequest( gomock.Any(), // Context destinationNodeID, // Node ID - chainID, // Source Chain - chainID, // Destination Chain + ctx.ChainID, // Source Chain + ctx.ChainID, // Destination Chain requestID, // Request ID tt.expectedResponseOp, // Operation expectedFailedMsg, // Failure Message diff --git a/snow/networking/sender/test_external_sender.go b/snow/networking/sender/test_external_sender.go index 7b8bef90e8eb..ae06187216bf 100644 --- a/snow/networking/sender/test_external_sender.go +++ b/snow/networking/sender/test_external_sender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sender diff --git a/snow/networking/sender/traced_sender.go b/snow/networking/sender/traced_sender.go index 2011cdc38fb1..c5fdf6dcbc54 100644 --- a/snow/networking/sender/traced_sender.go +++ b/snow/networking/sender/traced_sender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sender diff --git a/snow/networking/timeout/main_test.go b/snow/networking/timeout/main_test.go index f3bee130e58b..c8a597fa91b1 100644 --- a/snow/networking/timeout/main_test.go +++ b/snow/networking/timeout/main_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timeout diff --git a/snow/networking/timeout/manager.go b/snow/networking/timeout/manager.go index d94c34a1f663..95a3be25e166 100644 --- a/snow/networking/timeout/manager.go +++ b/snow/networking/timeout/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timeout @@ -163,7 +163,5 @@ func (m *manager) RegisterRequestToUnreachableValidator() { } func (m *manager) Stop() { - m.stopOnce.Do(func() { - m.tm.Stop() - }) + m.stopOnce.Do(m.tm.Stop) } diff --git a/snow/networking/timeout/manager_test.go b/snow/networking/timeout/manager_test.go index ce412150b1b6..73313322a81a 100644 --- a/snow/networking/timeout/manager_test.go +++ b/snow/networking/timeout/manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timeout @@ -39,7 +39,7 @@ func TestManagerFire(t *testing.T) { wg.Add(1) manager.RegisterRequest( - ids.NodeID{}, + ids.EmptyNodeID, ids.ID{}, true, ids.RequestID{}, diff --git a/snow/networking/timeout/metrics.go b/snow/networking/timeout/metrics.go index 6be45fd2a97e..def073b56558 100644 --- a/snow/networking/timeout/metrics.go +++ b/snow/networking/timeout/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timeout diff --git a/snow/networking/timeout/mock_manager.go b/snow/networking/timeout/mock_manager.go index 5a1bda7cb0b6..8eeac4c6f8dc 100644 --- a/snow/networking/timeout/mock_manager.go +++ b/snow/networking/timeout/mock_manager.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/networking/timeout (interfaces: Manager) +// +// Generated by this command: +// +// mockgen -package=timeout -destination=snow/networking/timeout/mock_manager.go github.com/ava-labs/avalanchego/snow/networking/timeout Manager +// // Package timeout is a generated GoMock package. package timeout @@ -61,7 +63,7 @@ func (m *MockManager) IsBenched(arg0 ids.NodeID, arg1 ids.ID) bool { } // IsBenched indicates an expected call of IsBenched. -func (mr *MockManagerMockRecorder) IsBenched(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) IsBenched(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsBenched", reflect.TypeOf((*MockManager)(nil).IsBenched), arg0, arg1) } @@ -75,7 +77,7 @@ func (m *MockManager) RegisterChain(arg0 *snow.ConsensusContext) error { } // RegisterChain indicates an expected call of RegisterChain. -func (mr *MockManagerMockRecorder) RegisterChain(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RegisterChain(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterChain", reflect.TypeOf((*MockManager)(nil).RegisterChain), arg0) } @@ -87,7 +89,7 @@ func (m *MockManager) RegisterRequest(arg0 ids.NodeID, arg1 ids.ID, arg2 bool, a } // RegisterRequest indicates an expected call of RegisterRequest. -func (mr *MockManagerMockRecorder) RegisterRequest(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RegisterRequest(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRequest", reflect.TypeOf((*MockManager)(nil).RegisterRequest), arg0, arg1, arg2, arg3, arg4) } @@ -111,7 +113,7 @@ func (m *MockManager) RegisterResponse(arg0 ids.NodeID, arg1 ids.ID, arg2 ids.Re } // RegisterResponse indicates an expected call of RegisterResponse. -func (mr *MockManagerMockRecorder) RegisterResponse(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RegisterResponse(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterResponse", reflect.TypeOf((*MockManager)(nil).RegisterResponse), arg0, arg1, arg2, arg3, arg4) } @@ -123,7 +125,7 @@ func (m *MockManager) RemoveRequest(arg0 ids.RequestID) { } // RemoveRequest indicates an expected call of RemoveRequest. -func (mr *MockManagerMockRecorder) RemoveRequest(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RemoveRequest(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveRequest", reflect.TypeOf((*MockManager)(nil).RemoveRequest), arg0) } diff --git a/snow/networking/tracker/mock_resource_tracker.go b/snow/networking/tracker/mock_resource_tracker.go index 4ba16ec98997..438bd44d9b4f 100644 --- a/snow/networking/tracker/mock_resource_tracker.go +++ b/snow/networking/tracker/mock_resource_tracker.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/networking/tracker (interfaces: Tracker) +// +// Generated by this command: +// +// mockgen -package=tracker -destination=snow/networking/tracker/mock_resource_tracker.go github.com/ava-labs/avalanchego/snow/networking/tracker Tracker +// // Package tracker is a generated GoMock package. package tracker @@ -47,7 +49,7 @@ func (m *MockTracker) TimeUntilUsage(arg0 ids.NodeID, arg1 time.Time, arg2 float } // TimeUntilUsage indicates an expected call of TimeUntilUsage. -func (mr *MockTrackerMockRecorder) TimeUntilUsage(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockTrackerMockRecorder) TimeUntilUsage(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TimeUntilUsage", reflect.TypeOf((*MockTracker)(nil).TimeUntilUsage), arg0, arg1, arg2) } @@ -75,7 +77,7 @@ func (m *MockTracker) Usage(arg0 ids.NodeID, arg1 time.Time) float64 { } // Usage indicates an expected call of Usage. -func (mr *MockTrackerMockRecorder) Usage(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockTrackerMockRecorder) Usage(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Usage", reflect.TypeOf((*MockTracker)(nil).Usage), arg0, arg1) } diff --git a/snow/networking/tracker/mock_targeter.go b/snow/networking/tracker/mock_targeter.go index d6fe7b540c8f..7e260fe69d6d 100644 --- a/snow/networking/tracker/mock_targeter.go +++ b/snow/networking/tracker/mock_targeter.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/networking/tracker (interfaces: Targeter) +// +// Generated by this command: +// +// mockgen -package=tracker -destination=snow/networking/tracker/mock_targeter.go github.com/ava-labs/avalanchego/snow/networking/tracker Targeter +// // Package tracker is a generated GoMock package. package tracker @@ -46,7 +48,7 @@ func (m *MockTargeter) TargetUsage(arg0 ids.NodeID) float64 { } // TargetUsage indicates an expected call of TargetUsage. -func (mr *MockTargeterMockRecorder) TargetUsage(arg0 interface{}) *gomock.Call { +func (mr *MockTargeterMockRecorder) TargetUsage(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TargetUsage", reflect.TypeOf((*MockTargeter)(nil).TargetUsage), arg0) } diff --git a/snow/networking/tracker/resource_tracker.go b/snow/networking/tracker/resource_tracker.go index 7910c2fff475..b4b14a7561cf 100644 --- a/snow/networking/tracker/resource_tracker.go +++ b/snow/networking/tracker/resource_tracker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker diff --git a/snow/networking/tracker/resource_tracker_test.go b/snow/networking/tracker/resource_tracker_test.go index 4bc78eb4827a..a87958708cb1 100644 --- a/snow/networking/tracker/resource_tracker_test.go +++ b/snow/networking/tracker/resource_tracker_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker @@ -48,8 +48,8 @@ func TestCPUTracker(t *testing.T) { tracker, err := NewResourceTracker(prometheus.NewRegistry(), mockUser, meter.ContinuousFactory{}, time.Second) require.NoError(err) - node1 := ids.NodeID{1} - node2 := ids.NodeID{2} + node1 := ids.BuildTestNodeID([]byte{1}) + node2 := ids.BuildTestNodeID([]byte{2}) // Note that all the durations between start and end are [halflife]. startTime1 := time.Now() diff --git a/snow/networking/tracker/targeter.go b/snow/networking/tracker/targeter.go index 4c69ab9508c1..39a7398e391c 100644 --- a/snow/networking/tracker/targeter.go +++ b/snow/networking/tracker/targeter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker diff --git a/snow/networking/tracker/targeter_test.go b/snow/networking/tracker/targeter_test.go index 23096adbed28..cc533791cf91 100644 --- a/snow/networking/tracker/targeter_test.go +++ b/snow/networking/tracker/targeter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker @@ -46,10 +46,10 @@ func TestNewTargeter(t *testing.T) { func TestTarget(t *testing.T) { ctrl := gomock.NewController(t) - vdr := ids.NodeID{1} + vdr := ids.BuildTestNodeID([]byte{1}) vdrWeight := uint64(1) totalVdrWeight := uint64(10) - nonVdr := ids.NodeID{2} + nonVdr := ids.BuildTestNodeID([]byte{2}) vdrs := validators.NewManager() require.NoError(t, vdrs.AddStaker(constants.PrimaryNetworkID, vdr, nil, ids.Empty, 1)) require.NoError(t, vdrs.AddStaker(constants.PrimaryNetworkID, ids.GenerateTestNodeID(), nil, ids.Empty, totalVdrWeight-vdrWeight)) diff --git a/snow/snowtest/snowtest.go b/snow/snowtest/snowtest.go new file mode 100644 index 000000000000..83fd98925d90 --- /dev/null +++ b/snow/snowtest/snowtest.go @@ -0,0 +1,99 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowtest + +import ( + "context" + "errors" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/api/metrics" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/logging" +) + +var ( + XChainID = ids.GenerateTestID() + CChainID = ids.GenerateTestID() + PChainID = constants.PlatformChainID + AVAXAssetID = ids.GenerateTestID() + + errMissing = errors.New("missing") + + _ snow.Acceptor = noOpAcceptor{} +) + +type noOpAcceptor struct{} + +func (noOpAcceptor) Accept(*snow.ConsensusContext, ids.ID, []byte) error { + return nil +} + +func ConsensusContext(ctx *snow.Context) *snow.ConsensusContext { + return &snow.ConsensusContext{ + Context: ctx, + Registerer: prometheus.NewRegistry(), + AvalancheRegisterer: prometheus.NewRegistry(), + BlockAcceptor: noOpAcceptor{}, + TxAcceptor: noOpAcceptor{}, + VertexAcceptor: noOpAcceptor{}, + } +} + +func Context(tb testing.TB, chainID ids.ID) *snow.Context { + require := require.New(tb) + + secretKey, err := bls.NewSecretKey() + require.NoError(err) + publicKey := bls.PublicFromSecretKey(secretKey) + + aliaser := ids.NewAliaser() + require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) + require.NoError(aliaser.Alias(constants.PlatformChainID, constants.PlatformChainID.String())) + require.NoError(aliaser.Alias(XChainID, "X")) + require.NoError(aliaser.Alias(XChainID, XChainID.String())) + require.NoError(aliaser.Alias(CChainID, "C")) + require.NoError(aliaser.Alias(CChainID, CChainID.String())) + + validatorState := &validators.TestState{ + GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { + subnetID, ok := map[ids.ID]ids.ID{ + constants.PlatformChainID: constants.PrimaryNetworkID, + XChainID: constants.PrimaryNetworkID, + CChainID: constants.PrimaryNetworkID, + }[chainID] + if !ok { + return ids.Empty, errMissing + } + return subnetID, nil + }, + } + + return &snow.Context{ + NetworkID: constants.UnitTestID, + SubnetID: constants.PrimaryNetworkID, + ChainID: chainID, + NodeID: ids.EmptyNodeID, + PublicKey: publicKey, + + XChainID: XChainID, + CChainID: CChainID, + AVAXAssetID: AVAXAssetID, + + Log: logging.NoLog{}, + BCLookup: aliaser, + Metrics: metrics.NewOptionalGatherer(), + + ValidatorState: validatorState, + ChainDataDir: "", + } +} diff --git a/snow/state.go b/snow/state.go index bb671f26e672..091cd31f50f1 100644 --- a/snow/state.go +++ b/snow/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snow diff --git a/snow/uptime/locked_calculator.go b/snow/uptime/locked_calculator.go index 687b5f5905f5..884878ab24f6 100644 --- a/snow/uptime/locked_calculator.go +++ b/snow/uptime/locked_calculator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime diff --git a/snow/uptime/locked_calculator_test.go b/snow/uptime/locked_calculator_test.go index 254497f62d02..3b073726e654 100644 --- a/snow/uptime/locked_calculator_test.go +++ b/snow/uptime/locked_calculator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime diff --git a/snow/uptime/manager.go b/snow/uptime/manager.go index 2fc2e1605298..a64b71ca62de 100644 --- a/snow/uptime/manager.go +++ b/snow/uptime/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime diff --git a/snow/uptime/manager_test.go b/snow/uptime/manager_test.go index de2c038086d5..e04fcc3a9fbe 100644 --- a/snow/uptime/manager_test.go +++ b/snow/uptime/manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime diff --git a/snow/uptime/mock_calculator.go b/snow/uptime/mock_calculator.go index 389c029e68fe..cc5b5942e639 100644 --- a/snow/uptime/mock_calculator.go +++ b/snow/uptime/mock_calculator.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/uptime (interfaces: Calculator) +// +// Generated by this command: +// +// mockgen -package=uptime -destination=snow/uptime/mock_calculator.go github.com/ava-labs/avalanchego/snow/uptime Calculator +// // Package uptime is a generated GoMock package. package uptime @@ -49,7 +51,7 @@ func (m *MockCalculator) CalculateUptime(arg0 ids.NodeID, arg1 ids.ID) (time.Dur } // CalculateUptime indicates an expected call of CalculateUptime. -func (mr *MockCalculatorMockRecorder) CalculateUptime(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockCalculatorMockRecorder) CalculateUptime(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateUptime", reflect.TypeOf((*MockCalculator)(nil).CalculateUptime), arg0, arg1) } @@ -64,7 +66,7 @@ func (m *MockCalculator) CalculateUptimePercent(arg0 ids.NodeID, arg1 ids.ID) (f } // CalculateUptimePercent indicates an expected call of CalculateUptimePercent. -func (mr *MockCalculatorMockRecorder) CalculateUptimePercent(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockCalculatorMockRecorder) CalculateUptimePercent(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateUptimePercent", reflect.TypeOf((*MockCalculator)(nil).CalculateUptimePercent), arg0, arg1) } @@ -79,7 +81,7 @@ func (m *MockCalculator) CalculateUptimePercentFrom(arg0 ids.NodeID, arg1 ids.ID } // CalculateUptimePercentFrom indicates an expected call of CalculateUptimePercentFrom. -func (mr *MockCalculatorMockRecorder) CalculateUptimePercentFrom(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockCalculatorMockRecorder) CalculateUptimePercentFrom(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateUptimePercentFrom", reflect.TypeOf((*MockCalculator)(nil).CalculateUptimePercentFrom), arg0, arg1, arg2) } diff --git a/snow/uptime/no_op_calculator.go b/snow/uptime/no_op_calculator.go index 44c688e31be7..fb308f4f6030 100644 --- a/snow/uptime/no_op_calculator.go +++ b/snow/uptime/no_op_calculator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime diff --git a/snow/uptime/state.go b/snow/uptime/state.go index 5b2592acc70d..f9edeb76a3ee 100644 --- a/snow/uptime/state.go +++ b/snow/uptime/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime diff --git a/snow/uptime/test_state.go b/snow/uptime/test_state.go index 58687e1671b8..23879b5cb3a9 100644 --- a/snow/uptime/test_state.go +++ b/snow/uptime/test_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime diff --git a/snow/validators/connector.go b/snow/validators/connector.go index abb28d084c20..e3e7e1f94ed4 100644 --- a/snow/validators/connector.go +++ b/snow/validators/connector.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators diff --git a/snow/validators/gvalidators/validator_state_client.go b/snow/validators/gvalidators/validator_state_client.go index 51e68592c001..49fa1e641417 100644 --- a/snow/validators/gvalidators/validator_state_client.go +++ b/snow/validators/gvalidators/validator_state_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gvalidators diff --git a/snow/validators/gvalidators/validator_state_server.go b/snow/validators/gvalidators/validator_state_server.go index 5f0dbc7f46c4..5476dca4db99 100644 --- a/snow/validators/gvalidators/validator_state_server.go +++ b/snow/validators/gvalidators/validator_state_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gvalidators @@ -66,7 +66,7 @@ func (s *Server) GetValidatorSet(ctx context.Context, req *pb.GetValidatorSetReq i := 0 for _, vdr := range vdrs { vdrPB := &pb.Validator{ - NodeId: vdr.NodeID[:], + NodeId: vdr.NodeID.Bytes(), Weight: vdr.Weight, } if vdr.PublicKey != nil { diff --git a/snow/validators/gvalidators/validator_state_test.go b/snow/validators/gvalidators/validator_state_test.go index da8a66570f0b..0dbf9ebe8952 100644 --- a/snow/validators/gvalidators/validator_state_test.go +++ b/snow/validators/gvalidators/validator_state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gvalidators @@ -24,9 +24,8 @@ import ( var errCustom = errors.New("custom") type testState struct { - client *Client - server *validators.MockState - closeFn func() + client *Client + server *validators.MockState } func setupState(t testing.TB, ctrl *gomock.Controller) *testState { @@ -52,11 +51,13 @@ func setupState(t testing.TB, ctrl *gomock.Controller) *testState { require.NoError(err) state.client = NewClient(pb.NewValidatorStateClient(conn)) - state.closeFn = func() { + + t.Cleanup(func() { serverCloser.Stop() _ = conn.Close() _ = listener.Close() - } + }) + return state } @@ -65,7 +66,6 @@ func TestGetMinimumHeight(t *testing.T) { ctrl := gomock.NewController(t) state := setupState(t, ctrl) - defer state.closeFn() // Happy path expectedHeight := uint64(1337) @@ -88,7 +88,6 @@ func TestGetCurrentHeight(t *testing.T) { ctrl := gomock.NewController(t) state := setupState(t, ctrl) - defer state.closeFn() // Happy path expectedHeight := uint64(1337) @@ -111,7 +110,6 @@ func TestGetSubnetID(t *testing.T) { ctrl := gomock.NewController(t) state := setupState(t, ctrl) - defer state.closeFn() // Happy path chainID := ids.GenerateTestID() @@ -135,7 +133,6 @@ func TestGetValidatorSet(t *testing.T) { ctrl := gomock.NewController(t) state := setupState(t, ctrl) - defer state.closeFn() // Happy path sk0, err := bls.NewSecretKey() @@ -209,9 +206,6 @@ func benchmarkGetValidatorSet(b *testing.B, vs map[ids.NodeID]*validators.GetVal require := require.New(b) ctrl := gomock.NewController(b) state := setupState(b, ctrl) - defer func() { - state.closeFn() - }() height := uint64(1337) subnetID := ids.GenerateTestID() diff --git a/snow/validators/logger.go b/snow/validators/logger.go index 124aef423fc4..40613b76b68d 100644 --- a/snow/validators/logger.go +++ b/snow/validators/logger.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators @@ -7,7 +7,6 @@ import ( "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" @@ -18,7 +17,6 @@ var _ SetCallbackListener = (*logger)(nil) type logger struct { log logging.Logger - enabled *utils.Atomic[bool] subnetID ids.ID nodeIDs set.Set[ids.NodeID] } @@ -27,14 +25,12 @@ type logger struct { // the specified validators func NewLogger( log logging.Logger, - enabled *utils.Atomic[bool], subnetID ids.ID, nodeIDs ...ids.NodeID, ) SetCallbackListener { nodeIDSet := set.Of(nodeIDs...) return &logger{ log: log, - enabled: enabled, subnetID: subnetID, nodeIDs: nodeIDSet, } @@ -46,7 +42,7 @@ func (l *logger) OnValidatorAdded( txID ids.ID, weight uint64, ) { - if l.enabled.Get() && l.nodeIDs.Contains(nodeID) { + if l.nodeIDs.Contains(nodeID) { var pkBytes []byte if pk != nil { pkBytes = bls.PublicKeyToBytes(pk) @@ -65,7 +61,7 @@ func (l *logger) OnValidatorRemoved( nodeID ids.NodeID, weight uint64, ) { - if l.enabled.Get() && l.nodeIDs.Contains(nodeID) { + if l.nodeIDs.Contains(nodeID) { l.log.Info("node removed from validator set", zap.Stringer("subnetID", l.subnetID), zap.Stringer("nodeID", nodeID), @@ -79,7 +75,7 @@ func (l *logger) OnValidatorWeightChanged( oldWeight uint64, newWeight uint64, ) { - if l.enabled.Get() && l.nodeIDs.Contains(nodeID) { + if l.nodeIDs.Contains(nodeID) { l.log.Info("validator weight changed", zap.Stringer("subnetID", l.subnetID), zap.Stringer("nodeID", nodeID), diff --git a/snow/validators/manager.go b/snow/validators/manager.go index c42ea779d96b..5844c1e7f185 100644 --- a/snow/validators/manager.go +++ b/snow/validators/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators diff --git a/snow/validators/manager_test.go b/snow/validators/manager_test.go index 01a84201f91d..781d2e784e1d 100644 --- a/snow/validators/manager_test.go +++ b/snow/validators/manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators @@ -216,30 +216,30 @@ func TestLen(t *testing.T) { m := NewManager() subnetID := ids.GenerateTestID() - len := m.Count(subnetID) - require.Zero(len) + count := m.Count(subnetID) + require.Zero(count) nodeID0 := ids.GenerateTestNodeID() require.NoError(m.AddStaker(subnetID, nodeID0, nil, ids.Empty, 1)) - len = m.Count(subnetID) - require.Equal(1, len) + count = m.Count(subnetID) + require.Equal(1, count) nodeID1 := ids.GenerateTestNodeID() require.NoError(m.AddStaker(subnetID, nodeID1, nil, ids.Empty, 1)) - len = m.Count(subnetID) - require.Equal(2, len) + count = m.Count(subnetID) + require.Equal(2, count) require.NoError(m.RemoveWeight(subnetID, nodeID1, 1)) - len = m.Count(subnetID) - require.Equal(1, len) + count = m.Count(subnetID) + require.Equal(1, count) require.NoError(m.RemoveWeight(subnetID, nodeID0, 1)) - len = m.Count(subnetID) - require.Zero(len) + count = m.Count(subnetID) + require.Zero(count) } func TestGetMap(t *testing.T) { @@ -324,9 +324,9 @@ func TestGetMap(t *testing.T) { func TestWeight(t *testing.T) { require := require.New(t) - vdr0 := ids.NodeID{1} + vdr0 := ids.BuildTestNodeID([]byte{1}) weight0 := uint64(93) - vdr1 := ids.NodeID{2} + vdr1 := ids.BuildTestNodeID([]byte{2}) weight1 := uint64(123) m := NewManager() @@ -398,12 +398,12 @@ func TestString(t *testing.T) { require.NoError(m.AddStaker(subnetID0, nodeID1, nil, ids.Empty, math.MaxInt64-1)) require.NoError(m.AddStaker(subnetID1, nodeID1, nil, ids.Empty, 1)) - expected := "Validator Manager: (Size = 2)\n" + - " Subnet[TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES]: Validator Set: (Size = 2, Weight = 9223372036854775807)\n" + - " Validator[0]: NodeID-111111111111111111116DBWJs, 1\n" + - " Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806\n" + - " Subnet[2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w]: Validator Set: (Size = 1, Weight = 1)\n" + - " Validator[0]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 1" + expected := `Validator Manager: (Size = 2) + Subnet[TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES]: Validator Set: (Size = 2, Weight = 9223372036854775807) + Validator[0]: NodeID-111111111111111111116DBWJs, 1 + Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806 + Subnet[2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w]: Validator Set: (Size = 1, Weight = 1) + Validator[0]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 1` result := m.String() require.Equal(expected, result) } @@ -411,7 +411,7 @@ func TestString(t *testing.T) { func TestAddCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) sk0, err := bls.NewSecretKey() require.NoError(err) pk0 := bls.PublicFromSecretKey(sk0) @@ -442,7 +442,7 @@ func TestAddCallback(t *testing.T) { func TestAddWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(1) weight1 := uint64(93) @@ -480,7 +480,7 @@ func TestAddWeightCallback(t *testing.T) { func TestRemoveWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(93) weight1 := uint64(92) @@ -518,7 +518,7 @@ func TestRemoveWeightCallback(t *testing.T) { func TestValidatorRemovedCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(93) diff --git a/snow/validators/mock_manager.go b/snow/validators/mock_manager.go index 2b99245710fb..b622ba11223a 100644 --- a/snow/validators/mock_manager.go +++ b/snow/validators/mock_manager.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Do not include this in mocks.mockgen.txt as bls package won't be available. // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/snow/validators (interfaces: Manager) +// Source: snow/validators/manager.go +// +// Generated by this command: +// +// mockgen -source=snow/validators/manager.go -destination=snow/validators/mock_manager.go -package=validators -exclude_interfaces=SetCallbackListener +// // Package validators is a generated GoMock package. package validators @@ -41,158 +42,143 @@ func (m *MockManager) EXPECT() *MockManagerMockRecorder { } // AddStaker mocks base method. -func (m *MockManager) AddStaker(arg0 ids.ID, arg1 ids.NodeID, arg2 *bls.PublicKey, arg3 ids.ID, arg4 uint64) error { +func (m *MockManager) AddStaker(subnetID ids.ID, nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddStaker", arg0, arg1, arg2, arg3, arg4) + ret := m.ctrl.Call(m, "AddStaker", subnetID, nodeID, pk, txID, weight) ret0, _ := ret[0].(error) return ret0 } // AddStaker indicates an expected call of AddStaker. -func (mr *MockManagerMockRecorder) AddStaker(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) AddStaker(subnetID, nodeID, pk, txID, weight any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStaker", reflect.TypeOf((*MockManager)(nil).AddStaker), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStaker", reflect.TypeOf((*MockManager)(nil).AddStaker), subnetID, nodeID, pk, txID, weight) } // AddWeight mocks base method. -func (m *MockManager) AddWeight(arg0 ids.ID, arg1 ids.NodeID, arg2 uint64) error { +func (m *MockManager) AddWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddWeight", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "AddWeight", subnetID, nodeID, weight) ret0, _ := ret[0].(error) return ret0 } // AddWeight indicates an expected call of AddWeight. -func (mr *MockManagerMockRecorder) AddWeight(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) AddWeight(subnetID, nodeID, weight any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddWeight", reflect.TypeOf((*MockManager)(nil).AddWeight), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddWeight", reflect.TypeOf((*MockManager)(nil).AddWeight), subnetID, nodeID, weight) } -// Contains mocks base method. -func (m *MockManager) Contains(arg0 ids.ID, arg1 ids.NodeID) bool { +// Count mocks base method. +func (m *MockManager) Count(subnetID ids.ID) int { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Contains", arg0, arg1) - ret0, _ := ret[0].(bool) + ret := m.ctrl.Call(m, "Count", subnetID) + ret0, _ := ret[0].(int) return ret0 } -// Contains indicates an expected call of Contains. -func (mr *MockManagerMockRecorder) Contains(arg0, arg1 interface{}) *gomock.Call { +// Count indicates an expected call of Count. +func (mr *MockManagerMockRecorder) Count(subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Contains", reflect.TypeOf((*MockManager)(nil).Contains), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Count", reflect.TypeOf((*MockManager)(nil).Count), subnetID) } // GetMap mocks base method. -func (m *MockManager) GetMap(arg0 ids.ID) map[ids.NodeID]*GetValidatorOutput { +func (m *MockManager) GetMap(subnetID ids.ID) map[ids.NodeID]*GetValidatorOutput { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMap", arg0) + ret := m.ctrl.Call(m, "GetMap", subnetID) ret0, _ := ret[0].(map[ids.NodeID]*GetValidatorOutput) return ret0 } // GetMap indicates an expected call of GetMap. -func (mr *MockManagerMockRecorder) GetMap(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetMap(subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMap", reflect.TypeOf((*MockManager)(nil).GetMap), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMap", reflect.TypeOf((*MockManager)(nil).GetMap), subnetID) } // GetValidator mocks base method. -func (m *MockManager) GetValidator(arg0 ids.ID, arg1 ids.NodeID) (*Validator, bool) { +func (m *MockManager) GetValidator(subnetID ids.ID, nodeID ids.NodeID) (*Validator, bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValidator", arg0, arg1) + ret := m.ctrl.Call(m, "GetValidator", subnetID, nodeID) ret0, _ := ret[0].(*Validator) ret1, _ := ret[1].(bool) return ret0, ret1 } // GetValidator indicates an expected call of GetValidator. -func (mr *MockManagerMockRecorder) GetValidator(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetValidator(subnetID, nodeID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidator", reflect.TypeOf((*MockManager)(nil).GetValidator), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidator", reflect.TypeOf((*MockManager)(nil).GetValidator), subnetID, nodeID) } // GetValidatorIDs mocks base method. -func (m *MockManager) GetValidatorIDs(arg0 ids.ID) ([]ids.NodeID, error) { +func (m *MockManager) GetValidatorIDs(subnetID ids.ID) []ids.NodeID { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValidatorIDs", arg0) + ret := m.ctrl.Call(m, "GetValidatorIDs", subnetID) ret0, _ := ret[0].([]ids.NodeID) - ret1, _ := ret[1].(error) - return ret0, ret1 + return ret0 } // GetValidatorIDs indicates an expected call of GetValidatorIDs. -func (mr *MockManagerMockRecorder) GetValidatorIDs(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetValidatorIDs(subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorIDs", reflect.TypeOf((*MockManager)(nil).GetValidatorIDs), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorIDs", reflect.TypeOf((*MockManager)(nil).GetValidatorIDs), subnetID) } // GetWeight mocks base method. -func (m *MockManager) GetWeight(arg0 ids.ID, arg1 ids.NodeID) uint64 { +func (m *MockManager) GetWeight(subnetID ids.ID, nodeID ids.NodeID) uint64 { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetWeight", arg0, arg1) + ret := m.ctrl.Call(m, "GetWeight", subnetID, nodeID) ret0, _ := ret[0].(uint64) return ret0 } // GetWeight indicates an expected call of GetWeight. -func (mr *MockManagerMockRecorder) GetWeight(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWeight", reflect.TypeOf((*MockManager)(nil).GetWeight), arg0, arg1) -} - -// Len mocks base method. -func (m *MockManager) Len(arg0 ids.ID) int { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Len", arg0) - ret0, _ := ret[0].(int) - return ret0 -} - -// Len indicates an expected call of Len. -func (mr *MockManagerMockRecorder) Len(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetWeight(subnetID, nodeID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Len", reflect.TypeOf((*MockManager)(nil).Len), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWeight", reflect.TypeOf((*MockManager)(nil).GetWeight), subnetID, nodeID) } // RegisterCallbackListener mocks base method. -func (m *MockManager) RegisterCallbackListener(arg0 ids.ID, arg1 SetCallbackListener) { +func (m *MockManager) RegisterCallbackListener(subnetID ids.ID, listener SetCallbackListener) { m.ctrl.T.Helper() - m.ctrl.Call(m, "RegisterCallbackListener", arg0, arg1) + m.ctrl.Call(m, "RegisterCallbackListener", subnetID, listener) } // RegisterCallbackListener indicates an expected call of RegisterCallbackListener. -func (mr *MockManagerMockRecorder) RegisterCallbackListener(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RegisterCallbackListener(subnetID, listener any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterCallbackListener", reflect.TypeOf((*MockManager)(nil).RegisterCallbackListener), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterCallbackListener", reflect.TypeOf((*MockManager)(nil).RegisterCallbackListener), subnetID, listener) } // RemoveWeight mocks base method. -func (m *MockManager) RemoveWeight(arg0 ids.ID, arg1 ids.NodeID, arg2 uint64) error { +func (m *MockManager) RemoveWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemoveWeight", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "RemoveWeight", subnetID, nodeID, weight) ret0, _ := ret[0].(error) return ret0 } // RemoveWeight indicates an expected call of RemoveWeight. -func (mr *MockManagerMockRecorder) RemoveWeight(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RemoveWeight(subnetID, nodeID, weight any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveWeight", reflect.TypeOf((*MockManager)(nil).RemoveWeight), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveWeight", reflect.TypeOf((*MockManager)(nil).RemoveWeight), subnetID, nodeID, weight) } // Sample mocks base method. -func (m *MockManager) Sample(arg0 ids.ID, arg1 int) ([]ids.NodeID, error) { +func (m *MockManager) Sample(subnetID ids.ID, size int) ([]ids.NodeID, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Sample", arg0, arg1) + ret := m.ctrl.Call(m, "Sample", subnetID, size) ret0, _ := ret[0].([]ids.NodeID) ret1, _ := ret[1].(error) return ret0, ret1 } // Sample indicates an expected call of Sample. -func (mr *MockManagerMockRecorder) Sample(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Sample(subnetID, size any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sample", reflect.TypeOf((*MockManager)(nil).Sample), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sample", reflect.TypeOf((*MockManager)(nil).Sample), subnetID, size) } // String mocks base method. @@ -210,31 +196,31 @@ func (mr *MockManagerMockRecorder) String() *gomock.Call { } // SubsetWeight mocks base method. -func (m *MockManager) SubsetWeight(arg0 ids.ID, arg1 set.Set[ids.NodeID]) (uint64, error) { +func (m *MockManager) SubsetWeight(subnetID ids.ID, validatorIDs set.Set[ids.NodeID]) (uint64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SubsetWeight", arg0, arg1) + ret := m.ctrl.Call(m, "SubsetWeight", subnetID, validatorIDs) ret0, _ := ret[0].(uint64) ret1, _ := ret[1].(error) return ret0, ret1 } // SubsetWeight indicates an expected call of SubsetWeight. -func (mr *MockManagerMockRecorder) SubsetWeight(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) SubsetWeight(subnetID, validatorIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubsetWeight", reflect.TypeOf((*MockManager)(nil).SubsetWeight), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubsetWeight", reflect.TypeOf((*MockManager)(nil).SubsetWeight), subnetID, validatorIDs) } // TotalWeight mocks base method. -func (m *MockManager) TotalWeight(arg0 ids.ID) (uint64, error) { +func (m *MockManager) TotalWeight(subnetID ids.ID) (uint64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TotalWeight", arg0) + ret := m.ctrl.Call(m, "TotalWeight", subnetID) ret0, _ := ret[0].(uint64) ret1, _ := ret[1].(error) return ret0, ret1 } // TotalWeight indicates an expected call of TotalWeight. -func (mr *MockManagerMockRecorder) TotalWeight(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) TotalWeight(subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TotalWeight", reflect.TypeOf((*MockManager)(nil).TotalWeight), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TotalWeight", reflect.TypeOf((*MockManager)(nil).TotalWeight), subnetID) } diff --git a/snow/validators/mock_state.go b/snow/validators/mock_state.go index a438b0eb46ef..6bed638becd8 100644 --- a/snow/validators/mock_state.go +++ b/snow/validators/mock_state.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/validators (interfaces: State) +// +// Generated by this command: +// +// mockgen -package=validators -destination=snow/validators/mock_state.go github.com/ava-labs/avalanchego/snow/validators State +// // Package validators is a generated GoMock package. package validators @@ -48,7 +50,7 @@ func (m *MockState) GetCurrentHeight(arg0 context.Context) (uint64, error) { } // GetCurrentHeight indicates an expected call of GetCurrentHeight. -func (mr *MockStateMockRecorder) GetCurrentHeight(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetCurrentHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentHeight", reflect.TypeOf((*MockState)(nil).GetCurrentHeight), arg0) } @@ -63,7 +65,7 @@ func (m *MockState) GetMinimumHeight(arg0 context.Context) (uint64, error) { } // GetMinimumHeight indicates an expected call of GetMinimumHeight. -func (mr *MockStateMockRecorder) GetMinimumHeight(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetMinimumHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMinimumHeight", reflect.TypeOf((*MockState)(nil).GetMinimumHeight), arg0) } @@ -78,7 +80,7 @@ func (m *MockState) GetSubnetID(arg0 context.Context, arg1 ids.ID) (ids.ID, erro } // GetSubnetID indicates an expected call of GetSubnetID. -func (mr *MockStateMockRecorder) GetSubnetID(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetSubnetID(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetID", reflect.TypeOf((*MockState)(nil).GetSubnetID), arg0, arg1) } @@ -93,7 +95,7 @@ func (m *MockState) GetValidatorSet(arg0 context.Context, arg1 uint64, arg2 ids. } // GetValidatorSet indicates an expected call of GetValidatorSet. -func (mr *MockStateMockRecorder) GetValidatorSet(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetValidatorSet(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorSet", reflect.TypeOf((*MockState)(nil).GetValidatorSet), arg0, arg1, arg2) } diff --git a/snow/validators/mock_subnet_connector.go b/snow/validators/mock_subnet_connector.go index e5c985bd56cc..b9f3ee0519b8 100644 --- a/snow/validators/mock_subnet_connector.go +++ b/snow/validators/mock_subnet_connector.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/validators (interfaces: SubnetConnector) +// +// Generated by this command: +// +// mockgen -package=validators -destination=snow/validators/mock_subnet_connector.go github.com/ava-labs/avalanchego/snow/validators SubnetConnector +// // Package validators is a generated GoMock package. package validators @@ -47,7 +49,7 @@ func (m *MockSubnetConnector) ConnectedSubnet(arg0 context.Context, arg1 ids.Nod } // ConnectedSubnet indicates an expected call of ConnectedSubnet. -func (mr *MockSubnetConnectorMockRecorder) ConnectedSubnet(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockSubnetConnectorMockRecorder) ConnectedSubnet(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConnectedSubnet", reflect.TypeOf((*MockSubnetConnector)(nil).ConnectedSubnet), arg0, arg1, arg2) } diff --git a/snow/validators/set.go b/snow/validators/set.go index dfa294a70bbe..5e7c81a2310e 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators diff --git a/snow/validators/set_test.go b/snow/validators/set_test.go index 99651e7930e0..4554f930fa37 100644 --- a/snow/validators/set_test.go +++ b/snow/validators/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators @@ -166,30 +166,30 @@ func TestSetLen(t *testing.T) { s := newSet() - len := s.Len() - require.Zero(len) + setLen := s.Len() + require.Zero(setLen) nodeID0 := ids.GenerateTestNodeID() require.NoError(s.Add(nodeID0, nil, ids.Empty, 1)) - len = s.Len() - require.Equal(1, len) + setLen = s.Len() + require.Equal(1, setLen) nodeID1 := ids.GenerateTestNodeID() require.NoError(s.Add(nodeID1, nil, ids.Empty, 1)) - len = s.Len() - require.Equal(2, len) + setLen = s.Len() + require.Equal(2, setLen) require.NoError(s.RemoveWeight(nodeID1, 1)) - len = s.Len() - require.Equal(1, len) + setLen = s.Len() + require.Equal(1, setLen) require.NoError(s.RemoveWeight(nodeID0, 1)) - len = s.Len() - require.Zero(len) + setLen = s.Len() + require.Zero(setLen) } func TestSetMap(t *testing.T) { @@ -273,9 +273,9 @@ func TestSetMap(t *testing.T) { func TestSetWeight(t *testing.T) { require := require.New(t) - vdr0 := ids.NodeID{1} + vdr0 := ids.BuildTestNodeID([]byte{1}) weight0 := uint64(93) - vdr1 := ids.NodeID{2} + vdr1 := ids.BuildTestNodeID([]byte{2}) weight1 := uint64(123) s := newSet() @@ -332,19 +332,19 @@ func TestSetString(t *testing.T) { require := require.New(t) nodeID0 := ids.EmptyNodeID - nodeID1 := ids.NodeID{ + nodeID1 := ids.BuildTestNodeID([]byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - } + }) s := newSet() require.NoError(s.Add(nodeID0, nil, ids.Empty, 1)) require.NoError(s.Add(nodeID1, nil, ids.Empty, math.MaxInt64-1)) - expected := "Validator Set: (Size = 2, Weight = 9223372036854775807)\n" + - " Validator[0]: NodeID-111111111111111111116DBWJs, 1\n" + - " Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806" + expected := `Validator Set: (Size = 2, Weight = 9223372036854775807) + Validator[0]: NodeID-111111111111111111116DBWJs, 1 + Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806` result := s.String() require.Equal(expected, result) } @@ -385,7 +385,7 @@ func (c *callbackListener) OnValidatorWeightChanged(nodeID ids.NodeID, oldWeight func TestSetAddCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) sk0, err := bls.NewSecretKey() require.NoError(err) pk0 := bls.PublicFromSecretKey(sk0) @@ -413,7 +413,7 @@ func TestSetAddCallback(t *testing.T) { func TestSetAddWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(1) weight1 := uint64(93) @@ -447,7 +447,7 @@ func TestSetAddWeightCallback(t *testing.T) { func TestSetRemoveWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(93) weight1 := uint64(92) @@ -481,7 +481,7 @@ func TestSetRemoveWeightCallback(t *testing.T) { func TestSetValidatorRemovedCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(93) diff --git a/snow/validators/state.go b/snow/validators/state.go index fa9ef2783165..3f92df35231b 100644 --- a/snow/validators/state.go +++ b/snow/validators/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators diff --git a/snow/validators/subnet_connector.go b/snow/validators/subnet_connector.go index 6b4a24bd85e5..06b02ff90820 100644 --- a/snow/validators/subnet_connector.go +++ b/snow/validators/subnet_connector.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators diff --git a/snow/validators/test_state.go b/snow/validators/test_state.go index b27e6d972613..ee4102cf7194 100644 --- a/snow/validators/test_state.go +++ b/snow/validators/test_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators @@ -23,7 +23,7 @@ var ( var _ State = (*TestState)(nil) type TestState struct { - T *testing.T + T testing.TB CantGetMinimumHeight, CantGetCurrentHeight, diff --git a/snow/validators/traced_state.go b/snow/validators/traced_state.go index e1f5472001e2..126a2b009eb0 100644 --- a/snow/validators/traced_state.go +++ b/snow/validators/traced_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators diff --git a/snow/validators/unhandled_subnet_connector.go b/snow/validators/unhandled_subnet_connector.go index de7225aa2a80..08447c4582ad 100644 --- a/snow/validators/unhandled_subnet_connector.go +++ b/snow/validators/unhandled_subnet_connector.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators diff --git a/snow/validators/validator.go b/snow/validators/validator.go index 56664ddc00a1..499b5189e424 100644 --- a/snow/validators/validator.go +++ b/snow/validators/validator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators diff --git a/staking/asn1.go b/staking/asn1.go index 13579600eb3c..afd817a95cd6 100644 --- a/staking/asn1.go +++ b/staking/asn1.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package staking @@ -6,6 +6,7 @@ package staking import ( "crypto" "crypto/x509" + "encoding/asn1" "fmt" // Explicitly import for the crypto.RegisterHash init side-effects. @@ -14,11 +15,28 @@ import ( _ "crypto/sha256" ) -// Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/x509.go#L326-L350 -var signatureAlgorithmVerificationDetails = map[x509.SignatureAlgorithm]x509.PublicKeyAlgorithm{ - x509.SHA256WithRSA: x509.RSA, - x509.ECDSAWithSHA256: x509.ECDSA, -} +var ( + // Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/x509.go#L433-L452 + // + // RFC 3279, 2.3 Public Key Algorithms + // + // pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) + // rsadsi(113549) pkcs(1) 1 } + // + // rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 } + oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + // RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters + // + // id-ecPublicKey OBJECT IDENTIFIER ::= { + // iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 } + oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} + + // Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/x509.go#L326-L350 + signatureAlgorithmVerificationDetails = map[x509.SignatureAlgorithm]x509.PublicKeyAlgorithm{ + x509.SHA256WithRSA: x509.RSA, + x509.ECDSAWithSHA256: x509.ECDSA, + } +) func init() { if !crypto.SHA256.Available() { diff --git a/staking/camino.go b/staking/camino.go new file mode 100644 index 000000000000..4e9514fe14b7 --- /dev/null +++ b/staking/camino.go @@ -0,0 +1,93 @@ +// Copyright (C) 2024, Chain4Travel AG. All rights reserved. +// See the file LICENSE for licensing terms. + +package staking + +import ( + "crypto" + "crypto/x509" + "crypto/x509/pkix" + "errors" + + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" +) + +var ( + errWrongCertificateVersion = errors.New("certificate version must be 3 (2 if versioning starts with 0)") + errNoCertificateExtensions = errors.New("certificate must have extensions") + errMissingNodePubKey = errors.New("certificate must have extension with node public key") +) + +func TLSCertToID(cert *x509.Certificate) (ids.NodeID, error) { + pubKeyBytes, err := secp256k1.RecoverSecp256PublicKey(cert) + if err != nil { + return ids.EmptyNodeID, err + } + return ids.ToNodeID(pubKeyBytes) +} + +func getNodeID(input cryptobyte.String, certVersion int, pubKey crypto.PublicKey) (ids.NodeID, error) { + if certVersion != 2 { + return ids.EmptyNodeID, errWrongCertificateVersion + } + var extensions cryptobyte.String + var hasExtensions bool + if !input.ReadOptionalASN1(&extensions, &hasExtensions, cryptobyte_asn1.Tag(3).Constructed().ContextSpecific()) { + return ids.EmptyNodeID, errors.New("x509: malformed extensions") + } + if !hasExtensions { + return ids.EmptyNodeID, errNoCertificateExtensions + } + if !extensions.ReadASN1(&extensions, cryptobyte_asn1.SEQUENCE) { + return ids.EmptyNodeID, errors.New("x509: malformed extensions") + } + var secp256k1PubKeyBytes []byte +L: + for !extensions.Empty() { + var extension cryptobyte.String + if !extensions.ReadASN1(&extension, cryptobyte_asn1.SEQUENCE) { + return ids.EmptyNodeID, errors.New("x509: malformed extension") + } + ext, err := parseExtension(extension) + if err != nil { + return ids.EmptyNodeID, err + } + secp256k1PubKeyBytes, err = secp256k1.RecoverSecp256PublicKeyFromExtension(&ext, pubKey) + switch { + case err == secp256k1.ErrWrongExtensionType: + continue + case err == nil: + break L + default: + return ids.EmptyNodeID, err + } + } + + if secp256k1PubKeyBytes == nil { + return ids.EmptyNodeID, errMissingNodePubKey + } + + return ids.ToNodeID(secp256k1PubKeyBytes) +} + +func parseExtension(der cryptobyte.String) (pkix.Extension, error) { + var ext pkix.Extension + if !der.ReadASN1ObjectIdentifier(&ext.Id) { + return ext, errors.New("x509: malformed extension OID field") + } + if der.PeekASN1Tag(cryptobyte_asn1.BOOLEAN) { + if !der.ReadASN1Boolean(&ext.Critical) { + return ext, errors.New("x509: malformed extension critical field") + } + } + var val cryptobyte.String + if !der.ReadASN1(&val, cryptobyte_asn1.OCTET_STRING) { + return ext, errors.New("x509: malformed extension value field") + } + ext.Value = val + return ext, nil +} diff --git a/staking/camino_test.go b/staking/camino_test.go new file mode 100644 index 000000000000..2354e4da89a1 --- /dev/null +++ b/staking/camino_test.go @@ -0,0 +1,73 @@ +// Copyright (C) 2024, Chain4Travel AG. All rights reserved. +// See the file LICENSE for licensing terms. + +package staking + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "math/big" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" + + utilsSecp256k1 "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/perms" +) + +// Convinient way to run generateTestCertFile. Comment out SkipNow before run. +func TestGenerateTestCertFile(t *testing.T) { + t.SkipNow() + const certPath = "large_rsa_key.cert" + require.NoError(t, generateTestCertFile(certPath)) +} + +// Creates cert file with double-sized rsaKey. This cert file is used by tests in this package. +func generateTestCertFile(certPath string) error { + // Create RSA key to sign cert with + rsaKey, err := rsa.GenerateKey(rand.Reader, 8192) // twice as much bytes! + if err != nil { + return fmt.Errorf("couldn't generate rsa key: %w", err) + } + // Create SECP256K1 key to sign cert with + secpKey := utilsSecp256k1.RsaPrivateKeyToSecp256PrivateKey(rsaKey) + extension := utilsSecp256k1.SignRsaPublicKey(secpKey, &rsaKey.PublicKey) + + // Create self-signed staking cert + certTemplate := &x509.Certificate{ + SerialNumber: big.NewInt(0), + NotBefore: time.Date(2000, time.January, 0, 0, 0, 0, 0, time.UTC), + NotAfter: time.Now().AddDate(100, 0, 0), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageDataEncipherment, + ExtraExtensions: []pkix.Extension{*extension}, + BasicConstraintsValid: true, + } + certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, &rsaKey.PublicKey, rsaKey) + if err != nil { + return fmt.Errorf("couldn't create certificate: %w", err) + } + + // Ensure directory where key/cert will live exist + if err := os.MkdirAll(filepath.Dir(certPath), perms.ReadWriteExecute); err != nil { + return fmt.Errorf("couldn't create path for cert: %w", err) + } + + // Write cert to disk + certFile, err := os.Create(certPath) + if err != nil { + return fmt.Errorf("couldn't create cert file: %w", err) + } + if _, err := certFile.Write(certBytes); err != nil { + return fmt.Errorf("couldn't write cert file: %w", err) + } + if err := certFile.Close(); err != nil { + return fmt.Errorf("couldn't close cert file: %w", err) + } + return nil +} diff --git a/staking/certificate.go b/staking/certificate.go index 9521f43abef0..fba0205819c6 100644 --- a/staking/certificate.go +++ b/staking/certificate.go @@ -1,13 +1,20 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package staking -import "crypto/x509" +import ( + "crypto" + "crypto/x509" + + "github.com/ava-labs/avalanchego/ids" +) type Certificate struct { - Raw []byte - PublicKey any + Raw []byte + NodeID ids.NodeID + PublicKey crypto.PublicKey + // TODO: Remove after v1.11.x activates. SignatureAlgorithm x509.SignatureAlgorithm } @@ -15,10 +22,15 @@ type Certificate struct { // // Invariant: The provided certificate must be a parseable into a staking // certificate. -func CertificateFromX509(cert *x509.Certificate) *Certificate { +func CertificateFromX509(cert *x509.Certificate) (*Certificate, error) { + nodeID, err := TLSCertToID(cert) + if err != nil { + return nil, err + } return &Certificate{ Raw: cert.Raw, + NodeID: nodeID, PublicKey: cert.PublicKey, SignatureAlgorithm: cert.SignatureAlgorithm, - } + }, nil } diff --git a/staking/large_rsa_key.cert b/staking/large_rsa_key.cert index 45e60a6b7991..d4b2d69f6cf1 100644 Binary files a/staking/large_rsa_key.cert and b/staking/large_rsa_key.cert differ diff --git a/staking/large_rsa_key.sig b/staking/large_rsa_key.sig deleted file mode 100644 index 61000a9903cf..000000000000 Binary files a/staking/large_rsa_key.sig and /dev/null differ diff --git a/staking/parse.go b/staking/parse.go index 04b47aba5a32..ef94a1550941 100644 --- a/staking/parse.go +++ b/staking/parse.go @@ -1,15 +1,194 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package staking -import "crypto/x509" +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/asn1" + "errors" + "fmt" + "math/big" + + "golang.org/x/crypto/cryptobyte" + + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" + + "github.com/ava-labs/avalanchego/utils/units" +) + +const ( + MaxCertificateLen = 2 * units.KiB + + allowedRSASmallModulusLen = 2048 + allowedRSALargeModulusLen = 4096 + allowedRSAPublicExponentValue = 65537 +) + +var ( + ErrCertificateTooLarge = fmt.Errorf("staking: certificate length is greater than %d", MaxCertificateLen) + ErrMalformedCertificate = errors.New("staking: malformed certificate") + ErrMalformedTBSCertificate = errors.New("staking: malformed tbs certificate") + ErrMalformedVersion = errors.New("staking: malformed version") + ErrMalformedSerialNumber = errors.New("staking: malformed serial number") + ErrMalformedSignatureAlgorithmIdentifier = errors.New("staking: malformed signature algorithm identifier") + ErrMalformedIssuer = errors.New("staking: malformed issuer") + ErrMalformedValidity = errors.New("staking: malformed validity") + ErrMalformedSPKI = errors.New("staking: malformed spki") + ErrMalformedPublicKeyAlgorithmIdentifier = errors.New("staking: malformed public key algorithm identifier") + ErrMalformedSubjectPublicKey = errors.New("staking: malformed subject public key") + ErrMalformedOID = errors.New("staking: malformed oid") + ErrInvalidRSAPublicKey = errors.New("staking: invalid RSA public key") + ErrInvalidRSAModulus = errors.New("staking: invalid RSA modulus") + ErrInvalidRSAPublicExponent = errors.New("staking: invalid RSA public exponent") + ErrRSAModulusNotPositive = errors.New("staking: RSA modulus is not a positive number") + ErrUnsupportedRSAModulusBitLen = errors.New("staking: unsupported RSA modulus bitlen") + ErrRSAModulusIsEven = errors.New("staking: RSA modulus is an even number") + ErrUnsupportedRSAPublicExponent = errors.New("staking: unsupported RSA public exponent") + ErrFailedUnmarshallingEllipticCurvePoint = errors.New("staking: failed to unmarshal elliptic curve point") + ErrUnknownPublicKeyAlgorithm = errors.New("staking: unknown public key algorithm") +) // ParseCertificate parses a single certificate from the given ASN.1 DER data. +// +// TODO: Remove after v1.11.x activates. func ParseCertificate(der []byte) (*Certificate, error) { - cert, err := x509.ParseCertificate(der) + x509Cert, err := x509.ParseCertificate(der) if err != nil { return nil, err } - return CertificateFromX509(cert), nil + stakingCert, err := CertificateFromX509(x509Cert) + if err != nil { + return nil, err + } + return stakingCert, ValidateCertificate(stakingCert) +} + +// ParseCertificatePermissive parses a single certificate from the given ASN.1. +// +// This function does not validate that the certificate is valid to be used +// against normal TLS implementations. +// +// Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/parser.go#L789-L968 +func ParseCertificatePermissive(bytes []byte) (*Certificate, error) { + if len(bytes) > MaxCertificateLen { + return nil, ErrCertificateTooLarge + } + + input := cryptobyte.String(bytes) + // Consume the length and tag bytes. + if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedCertificate + } + + // Read the "to be signed" certificate into input. + if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedTBSCertificate + } + var certVersion int + if !input.ReadOptionalASN1Integer(&certVersion, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific(), 0) { + return nil, ErrMalformedVersion + } + if !input.SkipASN1(cryptobyte_asn1.INTEGER) { + return nil, ErrMalformedSerialNumber + } + if !input.SkipASN1(cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedSignatureAlgorithmIdentifier + } + if !input.SkipASN1(cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedIssuer + } + if !input.SkipASN1(cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedValidity + } + if !input.SkipASN1(cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedIssuer + } + + // Read the "subject public key info" into spki. + var spki cryptobyte.String + if !input.ReadASN1(&spki, cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedSPKI + } + + // Read the public key algorithm identifier. + var pkAISeq cryptobyte.String + if !spki.ReadASN1(&pkAISeq, cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedPublicKeyAlgorithmIdentifier + } + var pkAI asn1.ObjectIdentifier + if !pkAISeq.ReadASN1ObjectIdentifier(&pkAI) { + return nil, ErrMalformedOID + } + + // Note: Unlike the x509 package, we require parsing the public key. + + var spk asn1.BitString + if !spki.ReadASN1BitString(&spk) { + return nil, ErrMalformedSubjectPublicKey + } + + publicKey, signatureAlgorithm, err := parsePublicKey(pkAI, spk) + cert := &Certificate{ + Raw: bytes, + SignatureAlgorithm: signatureAlgorithm, + PublicKey: publicKey, + } + if err != nil { + return cert, err + } + + nodeID, err := getNodeID(input, certVersion, publicKey) + cert.NodeID = nodeID + return cert, err +} + +// Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/parser.go#L215-L306 +func parsePublicKey(oid asn1.ObjectIdentifier, publicKey asn1.BitString) (crypto.PublicKey, x509.SignatureAlgorithm, error) { + der := cryptobyte.String(publicKey.RightAlign()) + switch { + case oid.Equal(oidPublicKeyRSA): + pub := &rsa.PublicKey{N: new(big.Int)} + if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { + return nil, 0, ErrInvalidRSAPublicKey + } + if !der.ReadASN1Integer(pub.N) { + return nil, 0, ErrInvalidRSAModulus + } + if !der.ReadASN1Integer(&pub.E) { + return nil, 0, ErrInvalidRSAPublicExponent + } + + if pub.N.Sign() <= 0 { + return nil, 0, ErrRSAModulusNotPositive + } + + if bitLen := pub.N.BitLen(); bitLen != allowedRSALargeModulusLen && bitLen != allowedRSASmallModulusLen { + return nil, 0, fmt.Errorf("%w: %d", ErrUnsupportedRSAModulusBitLen, bitLen) + } + if pub.N.Bit(0) == 0 { + return nil, 0, ErrRSAModulusIsEven + } + if pub.E != allowedRSAPublicExponentValue { + return nil, 0, fmt.Errorf("%w: %d", ErrUnsupportedRSAPublicExponent, pub.E) + } + return pub, x509.SHA256WithRSA, nil + case oid.Equal(oidPublicKeyECDSA): + namedCurve := elliptic.P256() + x, y := elliptic.Unmarshal(namedCurve, der) + if x == nil { + return nil, 0, ErrFailedUnmarshallingEllipticCurvePoint + } + return &ecdsa.PublicKey{ + Curve: namedCurve, + X: x, + Y: y, + }, x509.ECDSAWithSHA256, nil + default: + return nil, 0, ErrUnknownPublicKeyAlgorithm + } } diff --git a/staking/parse_test.go b/staking/parse_test.go new file mode 100644 index 000000000000..e9006e4ddcc0 --- /dev/null +++ b/staking/parse_test.go @@ -0,0 +1,89 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package staking + +import ( + "testing" + + _ "embed" + + "github.com/stretchr/testify/require" +) + +var ( + //go:embed large_rsa_key.cert + largeRSAKeyCert []byte + + parsers = []struct { + name string + parse func([]byte) (*Certificate, error) + }{ + { + name: "ParseCertificate", + parse: ParseCertificate, + }, + { + name: "ParseCertificatePermissive", + parse: ParseCertificatePermissive, + }, + } +) + +func TestParseCheckLargeCert(t *testing.T) { + for _, parser := range parsers { + t.Run(parser.name, func(t *testing.T) { + _, err := parser.parse(largeRSAKeyCert) + require.ErrorIs(t, err, ErrCertificateTooLarge) + }) + } +} + +func BenchmarkParse(b *testing.B) { + tlsCert, err := NewTLSCert() + require.NoError(b, err) + + bytes := tlsCert.Leaf.Raw + for _, parser := range parsers { + b.Run(parser.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err = parser.parse(bytes) + require.NoError(b, err) + } + }) + } +} + +func FuzzParseCertificate(f *testing.F) { + tlsCert, err := NewTLSCert() + require.NoError(f, err) + + f.Add(tlsCert.Leaf.Raw) + f.Add(largeRSAKeyCert) + f.Fuzz(func(t *testing.T, certBytes []byte) { + require := require.New(t) + + // Verify that any certificate that can be parsed by ParseCertificate + // can also be parsed by ParseCertificatePermissive. + { + strictCert, err := ParseCertificate(certBytes) + if err == nil { + permissiveCert, err := ParseCertificatePermissive(certBytes) + require.NoError(err) + require.Equal(strictCert, permissiveCert) + } + } + + // Verify that any certificate that can't be parsed by + // ParseCertificatePermissive also can't be parsed by ParseCertificate. + { + cert, err := ParseCertificatePermissive(certBytes) + if err == nil { + require.NoError(ValidateCertificate(cert)) + } else { + _, err = ParseCertificate(certBytes) + require.Error(err) //nolint:forbidigo + } + } + }) +} diff --git a/staking/tls.go b/staking/tls.go index 7e984df51795..7442c8464d90 100644 --- a/staking/tls.go +++ b/staking/tls.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package staking diff --git a/staking/tls_test.go b/staking/tls_test.go index 2282090b4c8e..6de376c2a538 100644 --- a/staking/tls_test.go +++ b/staking/tls_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package staking diff --git a/staking/verify.go b/staking/verify.go index 8da442e8b998..dd4255455ff0 100644 --- a/staking/verify.go +++ b/staking/verify.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package staking @@ -11,28 +11,13 @@ import ( "crypto/x509" "errors" "fmt" - - "github.com/ava-labs/avalanchego/utils/units" -) - -// MaxRSAKeyBitLen is the maximum RSA key size in bits that we are willing to -// parse. -// -// https://github.com/golang/go/blob/go1.19.12/src/crypto/tls/handshake_client.go#L860-L862 -const ( - MaxCertificateLen = 16 * units.KiB - MaxRSAKeyByteLen = units.KiB - MaxRSAKeyBitLen = 8 * MaxRSAKeyByteLen ) var ( - ErrCertificateTooLarge = fmt.Errorf("staking: certificate length is greater than %d", MaxCertificateLen) - ErrUnsupportedAlgorithm = errors.New("staking: cannot verify signature: unsupported algorithm") - ErrPublicKeyAlgoMismatch = errors.New("staking: signature algorithm specified different public key type") - ErrInvalidRSAPublicKey = errors.New("staking: invalid RSA public key") - ErrInvalidECDSAPublicKey = errors.New("staking: invalid ECDSA public key") - ErrECDSAVerificationFailure = errors.New("staking: ECDSA verification failure") - ErrED25519VerificationFailure = errors.New("staking: Ed25519 verification failure") + ErrUnsupportedAlgorithm = errors.New("staking: cannot verify signature: unsupported algorithm") + ErrPublicKeyAlgoMismatch = errors.New("staking: signature algorithm specified different public key type") + ErrInvalidECDSAPublicKey = errors.New("staking: invalid ECDSA public key") + ErrECDSAVerificationFailure = errors.New("staking: ECDSA verification failure") ) // CheckSignature verifies that the signature is a valid signature over signed @@ -41,10 +26,6 @@ var ( // Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/x509.go#L793-L797 // Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/x509.go#L816-L879 func CheckSignature(cert *Certificate, msg []byte, signature []byte) error { - if err := ValidateCertificate(cert); err != nil { - return err - } - hasher := crypto.SHA256.New() _, err := hasher.Write(msg) if err != nil { @@ -67,6 +48,8 @@ func CheckSignature(cert *Certificate, msg []byte, signature []byte) error { // ValidateCertificate verifies that this certificate conforms to the required // staking format assuming that it was already able to be parsed. +// +// TODO: Remove after v1.11.x activates. func ValidateCertificate(cert *Certificate) error { if len(cert.Raw) > MaxCertificateLen { return ErrCertificateTooLarge @@ -82,8 +65,14 @@ func ValidateCertificate(cert *Certificate) error { if pubkeyAlgo != x509.RSA { return signaturePublicKeyAlgoMismatchError(pubkeyAlgo, pub) } - if bitLen := pub.N.BitLen(); bitLen > MaxRSAKeyBitLen { - return fmt.Errorf("%w: bitLen=%d > maxBitLen=%d", ErrInvalidRSAPublicKey, bitLen, MaxRSAKeyBitLen) + if bitLen := pub.N.BitLen(); bitLen != allowedRSALargeModulusLen && bitLen != allowedRSASmallModulusLen { + return fmt.Errorf("%w: %d", ErrUnsupportedRSAModulusBitLen, bitLen) + } + if pub.N.Bit(0) == 0 { + return ErrRSAModulusIsEven + } + if pub.E != allowedRSAPublicExponentValue { + return fmt.Errorf("%w: %d", ErrUnsupportedRSAPublicExponent, pub.E) } return nil case *ecdsa.PublicKey: diff --git a/staking/verify_test.go b/staking/verify_test.go deleted file mode 100644 index e7cee91c1b43..000000000000 --- a/staking/verify_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package staking - -import ( - "testing" - - _ "embed" - - "github.com/stretchr/testify/require" -) - -var ( - //go:embed large_rsa_key.cert - largeRSAKeyCert []byte - //go:embed large_rsa_key.sig - largeRSAKeySig []byte -) - -func TestCheckSignatureLargePublicKey(t *testing.T) { - require := require.New(t) - - cert, err := ParseCertificate(largeRSAKeyCert) - require.NoError(err) - - msg := []byte("TODO: put something clever") - err = CheckSignature(cert, msg, largeRSAKeySig) - require.ErrorIs(err, ErrInvalidRSAPublicKey) -} diff --git a/subnets/config.go b/subnets/config.go index 3ccbab79c50e..9a12c550b833 100644 --- a/subnets/config.go +++ b/subnets/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subnets diff --git a/subnets/config_test.go b/subnets/config_test.go index 2294a1e5176a..fdb10c4e072a 100644 --- a/subnets/config_test.go +++ b/subnets/config_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subnets diff --git a/subnets/no_op_allower.go b/subnets/no_op_allower.go index 9d2d51ea26d3..9cb7115e910d 100644 --- a/subnets/no_op_allower.go +++ b/subnets/no_op_allower.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subnets diff --git a/subnets/subnet.go b/subnets/subnet.go index 31bc9dcb562b..95425ba30500 100644 --- a/subnets/subnet.go +++ b/subnets/subnet.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subnets diff --git a/subnets/subnet_test.go b/subnets/subnet_test.go index 98a75dfd2813..3a816a158f04 100644 --- a/subnets/subnet_test.go +++ b/subnets/subnet_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subnets diff --git a/tests/colors.go b/tests/colors.go index 3aa935a5fce9..6cfec4df3dc0 100644 --- a/tests/colors.go +++ b/tests/colors.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tests diff --git a/tests/e2e/README.md b/tests/e2e/README.md index 48b77a7ad699..d6e5bfd96dca 100644 --- a/tests/e2e/README.md +++ b/tests/e2e/README.md @@ -28,7 +28,7 @@ primarily target the X-Chain: ```bash ./tests/e2e/e2e.test \ - --caminogo-path=./build/caminogo \ + --avalanchego-path=./build/avalanchego \ --ginkgo.label-filter=x ``` @@ -69,13 +69,13 @@ of the test under development. To create a temporary network for use across test runs: ```bash -# From the root of the caminogo repo +# From the root of the avalanchego repo # Build the tmpnetctl binary $ ./scripts/build_tmpnetctl.sh # Start a new network -$ ./build/tmpnetctl start-network --caminogo-path=/path/to/caminogo +$ ./build/tmpnetctl start-network --avalanchego-path=/path/to/avalanchego ... Started network 1000 @ /home/me/.tmpnet/networks/1000 @@ -87,12 +87,12 @@ with one of the following statements: # Start a new test run using the existing network ginkgo -v ./tests/e2e -- \ - --caminogo-path=/path/to/caminogo \ + --avalanchego-path=/path/to/avalanchego \ --ginkgo.focus-file=[name of file containing test] \ --use-existing-network \ --network-dir=/path/to/network -# It is also possible to set the CAMINOGO_BIN_PATH env var instead of supplying --caminogo-path +# It is also possible to set the CAMINOGO_BIN_PATH env var instead of supplying --avalanchego-path # and to set TMPNET_NETWORK_DIR instead of supplying --network-dir. ``` diff --git a/tests/e2e/banff/suites.go b/tests/e2e/banff/suites.go index 6adeb1476cfa..009bad3494b3 100644 --- a/tests/e2e/banff/suites.go +++ b/tests/e2e/banff/suites.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Implements tests for the banff network upgrade. @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -25,7 +25,7 @@ var _ = ginkgo.Describe("[Banff]", func() { ginkgo.It("can send custom assets X->P and P->X", func() { keychain := e2e.Env.NewKeychain(1) - wallet := e2e.Env.NewWallet(keychain, e2e.Env.GetRandomNodeURI()) + wallet := e2e.NewWallet(keychain, e2e.Env.GetRandomNodeURI()) // Get the P-chain and the X-chain wallets pWallet := wallet.P() diff --git a/tests/e2e/c/dynamic_fees.go b/tests/e2e/c/dynamic_fees.go index edfbef2671a8..0978bddc91d2 100644 --- a/tests/e2e/c/dynamic_fees.go +++ b/tests/e2e/c/dynamic_fees.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package c @@ -19,8 +19,8 @@ import ( "github.com/ava-labs/coreth/plugin/evm" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" ) @@ -42,16 +42,16 @@ var _ = e2e.DescribeCChain("[Dynamic Fees]", func() { privateNetwork := e2e.Env.NewPrivateNetwork() ginkgo.By("allocating a pre-funded key") - key := privateNetwork.GetConfig().FundedKeys[0] + key := privateNetwork.PreFundedKeys[0] ethAddress := evm.GetEthAddress(key) ginkgo.By("initializing a coreth client") - node := privateNetwork.GetNodes()[0] - nodeURI := testnet.NodeURI{ - NodeID: node.GetID(), - URI: node.GetProcessContext().URI, + node := privateNetwork.Nodes[0] + nodeURI := tmpnet.NodeURI{ + NodeID: node.NodeID, + URI: node.URI, } - ethClient := e2e.Env.NewEthClient(nodeURI) + ethClient := e2e.NewEthClient(nodeURI) ginkgo.By("initializing a transaction signer") cChainID, err := ethClient.ChainID(e2e.DefaultContext()) diff --git a/tests/e2e/c/hashing_contract.go b/tests/e2e/c/hashing_contract.go index af5e81eb9057..7bf1db76c7cf 100644 --- a/tests/e2e/c/hashing_contract.go +++ b/tests/e2e/c/hashing_contract.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // AUTOMATICALLY GENERATED. DO NOT EDIT! diff --git a/tests/e2e/c/interchain_workflow.go b/tests/e2e/c/interchain_workflow.go index 8bed85eb1bd9..0ce0ace59113 100644 --- a/tests/e2e/c/interchain_workflow.go +++ b/tests/e2e/c/interchain_workflow.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package c @@ -14,7 +14,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/set" @@ -34,10 +34,10 @@ var _ = e2e.DescribeCChain("[Interchain Workflow]", func() { // the wallet to avoid having to verify that all nodes are at // the same height before initializing the wallet. nodeURI := e2e.Env.GetRandomNodeURI() - ethClient := e2e.Env.NewEthClient(nodeURI) + ethClient := e2e.NewEthClient(nodeURI) ginkgo.By("allocating a pre-funded key to send from and a recipient key to deliver to") - senderKey := e2e.Env.AllocateFundedKey() + senderKey := e2e.Env.AllocatePreFundedKey() senderEthAddress := evm.GetEthAddress(senderKey) recipientKey, err := secp256k1.NewPrivateKey() require.NoError(err) @@ -79,7 +79,7 @@ var _ = e2e.DescribeCChain("[Interchain Workflow]", func() { // matches on-chain state. ginkgo.By("initializing a keychain and associated wallet") keychain := secp256k1fx.NewKeychain(senderKey, recipientKey) - baseWallet := e2e.Env.NewWallet(keychain, nodeURI) + baseWallet := e2e.NewWallet(keychain, nodeURI) xWallet := baseWallet.X() cWallet := baseWallet.C() pWallet := baseWallet.P() diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index 2e9a86684df0..d363ff775086 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -1,32 +1,23 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package e2e_test import ( - "encoding/json" - "flag" - "fmt" - "os" "testing" ginkgo "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" - "github.com/ava-labs/avalanchego/tests/fixture" - "github.com/ava-labs/avalanchego/tests/fixture/testnet/local" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" // ensure test packages are scanned by ginkgo _ "github.com/ava-labs/avalanchego/tests/e2e/banff" _ "github.com/ava-labs/avalanchego/tests/e2e/c" _ "github.com/ava-labs/avalanchego/tests/e2e/faultinjection" _ "github.com/ava-labs/avalanchego/tests/e2e/p" - _ "github.com/ava-labs/avalanchego/tests/e2e/static-handlers" _ "github.com/ava-labs/avalanchego/tests/e2e/x" _ "github.com/ava-labs/avalanchego/tests/e2e/x/transfer" ) @@ -36,75 +27,18 @@ func TestE2E(t *testing.T) { ginkgo.RunSpecs(t, "e2e test suites") } -var ( - avalancheGoExecPath string - persistentNetworkDir string - usePersistentNetwork bool -) +var flagVars *e2e.FlagVars func init() { - flag.StringVar( - &avalancheGoExecPath, - "avalanchego-path", - os.Getenv(local.AvalancheGoPathEnvName), - fmt.Sprintf("avalanchego executable path (required if not using a persistent network). Also possible to configure via the %s env variable.", local.AvalancheGoPathEnvName), - ) - flag.StringVar( - &persistentNetworkDir, - "network-dir", - "", - fmt.Sprintf("[optional] the dir containing the configuration of a persistent network to target for testing. Useful for speeding up test development. Also possible to configure via the %s env variable.", local.NetworkDirEnvName), - ) - flag.BoolVar( - &usePersistentNetwork, - "use-persistent-network", - false, - "[optional] whether to target the persistent network identified by --network-dir.", - ) + flagVars = e2e.RegisterFlags() } var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // Run only once in the first ginkgo process - - require := require.New(ginkgo.GinkgoT()) - - if usePersistentNetwork && len(persistentNetworkDir) == 0 { - persistentNetworkDir = os.Getenv(local.NetworkDirEnvName) - } - - // Load or create a test network - var network *local.LocalNetwork - if len(persistentNetworkDir) > 0 { - tests.Outf("{{yellow}}Using a persistent network configured at %s{{/}}\n", persistentNetworkDir) - - var err error - network, err = local.ReadNetwork(persistentNetworkDir) - require.NoError(err) - } else { - network = e2e.StartLocalNetwork(avalancheGoExecPath, e2e.DefaultNetworkDir) - } - - uris := network.GetURIs() - require.NotEmpty(uris, "network contains no nodes") - tests.Outf("{{green}}network URIs: {{/}} %+v\n", uris) - - testDataServerURI, err := fixture.ServeTestData(fixture.TestData{ - FundedKeys: network.FundedKeys, - }) - tests.Outf("{{green}}test data server URI: {{/}} %+v\n", testDataServerURI) - require.NoError(err) - - env := &e2e.TestEnvironment{ - NetworkDir: network.Dir, - URIs: uris, - TestDataServerURI: testDataServerURI, - } - bytes, err := json.Marshal(env) - require.NoError(err) - return bytes + return e2e.NewTestEnvironment(flagVars, &tmpnet.Network{}).Marshal() }, func(envBytes []byte) { // Run in every ginkgo process // Initialize the local test environment from the global state - e2e.InitTestEnvironment(envBytes) + e2e.InitSharedTestEnvironment(envBytes) }) diff --git a/tests/e2e/faultinjection/duplicate_node_id.go b/tests/e2e/faultinjection/duplicate_node_id.go index bcdd05cb35b0..d20ef1a28c0c 100644 --- a/tests/e2e/faultinjection/duplicate_node_id.go +++ b/tests/e2e/faultinjection/duplicate_node_id.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package faultinjection @@ -14,8 +14,8 @@ import ( "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" "github.com/ava-labs/avalanchego/utils/set" ) @@ -24,18 +24,17 @@ var _ = ginkgo.Describe("Duplicate node handling", func() { ginkgo.It("should ensure that a given Node ID (i.e. staking keypair) can be used at most once on a network", func() { network := e2e.Env.GetNetwork() - nodes := network.GetNodes() ginkgo.By("creating new node") - node1 := e2e.AddEphemeralNode(network, testnet.FlagsMap{}) + node1 := e2e.AddEphemeralNode(network, tmpnet.FlagsMap{}) e2e.WaitForHealthy(node1) ginkgo.By("checking that the new node is connected to its peers") - checkConnectedPeers(nodes, node1) + checkConnectedPeers(network.Nodes, node1) ginkgo.By("creating a second new node with the same staking keypair as the first new node") - node1Flags := node1.GetConfig().Flags - node2Flags := testnet.FlagsMap{ + node1Flags := node1.Flags + node2Flags := tmpnet.FlagsMap{ config.StakingTLSKeyContentKey: node1Flags[config.StakingTLSKeyContentKey], config.StakingCertContentKey: node1Flags[config.StakingCertContentKey], // Construct a unique data dir to ensure the two nodes' data will be stored @@ -46,28 +45,28 @@ var _ = ginkgo.Describe("Duplicate node handling", func() { node2 := e2e.AddEphemeralNode(network, node2Flags) ginkgo.By("checking that the second new node fails to become healthy before timeout") - err := testnet.WaitForHealthy(e2e.DefaultContext(), node2) + err := tmpnet.WaitForHealthy(e2e.DefaultContext(), node2) require.ErrorIs(err, context.DeadlineExceeded) ginkgo.By("stopping the first new node") - require.NoError(node1.Stop()) + require.NoError(node1.Stop(e2e.DefaultContext())) ginkgo.By("checking that the second new node becomes healthy within timeout") e2e.WaitForHealthy(node2) ginkgo.By("checking that the second new node is connected to its peers") - checkConnectedPeers(nodes, node2) + checkConnectedPeers(network.Nodes, node2) // A bootstrap check was already performed by the second node. }) }) // Check that a new node is connected to existing nodes and vice versa -func checkConnectedPeers(existingNodes []testnet.Node, newNode testnet.Node) { +func checkConnectedPeers(existingNodes []*tmpnet.Node, newNode *tmpnet.Node) { require := require.New(ginkgo.GinkgoT()) // Collect the node ids of the new node's peers - infoClient := info.NewClient(newNode.GetProcessContext().URI) + infoClient := info.NewClient(newNode.URI) peers, err := infoClient.Peers(e2e.DefaultContext()) require.NoError(err) peerIDs := set.NewSet[ids.NodeID](len(existingNodes)) @@ -75,18 +74,17 @@ func checkConnectedPeers(existingNodes []testnet.Node, newNode testnet.Node) { peerIDs.Add(peer.ID) } - newNodeID := newNode.GetID() for _, existingNode := range existingNodes { // Check that the existing node is a peer of the new node - require.True(peerIDs.Contains(existingNode.GetID())) + require.True(peerIDs.Contains(existingNode.NodeID)) // Check that the new node is a peer - infoClient := info.NewClient(existingNode.GetProcessContext().URI) + infoClient := info.NewClient(existingNode.URI) peers, err := infoClient.Peers(e2e.DefaultContext()) require.NoError(err) isPeer := false for _, peer := range peers { - if peer.ID == newNodeID { + if peer.ID == newNode.NodeID { isPeer = true break } diff --git a/tests/e2e/ignore.go b/tests/e2e/ignore.go new file mode 100644 index 000000000000..ddf89c5d1bcc --- /dev/null +++ b/tests/e2e/ignore.go @@ -0,0 +1,13 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package e2e + +// This file is required by ginkgo to accurately report compilation errors in test packages. Without +// it, the following error will mask the actual errors: +// +// ``` +// Failed to compile e2e: +// +// github.com/ava-labs/avalanchego/tests/e2e: no non-test Go files in /path/to/avalanchego/tests/e2e +// ``` diff --git a/tests/e2e/p/interchain_workflow.go b/tests/e2e/p/interchain_workflow.go index 729418adbd97..8983c46adcbd 100644 --- a/tests/e2e/p/interchain_workflow.go +++ b/tests/e2e/p/interchain_workflow.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -18,8 +18,8 @@ import ( "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/set" @@ -43,8 +43,8 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL network := e2e.Env.GetNetwork() ginkgo.By("checking that the network has a compatible minimum stake duration", func() { - minStakeDuration := cast.ToDuration(network.GetConfig().DefaultFlags[config.MinStakeDurationKey]) - require.Equal(testnet.DefaultMinStakeDuration, minStakeDuration) + minStakeDuration := cast.ToDuration(network.DefaultFlags[config.MinStakeDurationKey]) + require.Equal(tmpnet.DefaultMinStakeDuration, minStakeDuration) }) ginkgo.By("creating wallet with a funded key to send from and recipient key to deliver to") @@ -53,7 +53,7 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL keychain := e2e.Env.NewKeychain(1) keychain.Add(recipientKey) nodeURI := e2e.Env.GetRandomNodeURI() - baseWallet := e2e.Env.NewWallet(keychain, nodeURI) + baseWallet := e2e.NewWallet(keychain, nodeURI) xWallet := baseWallet.X() cWallet := baseWallet.C() pWallet := baseWallet.P() @@ -87,21 +87,17 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL } ginkgo.By("adding new node and waiting for it to report healthy") - node := e2e.AddEphemeralNode(network, testnet.FlagsMap{}) + node := e2e.AddEphemeralNode(network, tmpnet.FlagsMap{}) e2e.WaitForHealthy(node) ginkgo.By("retrieving new node's id and pop") - infoClient := info.NewClient(node.GetProcessContext().URI) + infoClient := info.NewClient(node.URI) nodeID, nodePOP, err := infoClient.GetNodeID(e2e.DefaultContext()) require.NoError(err) + // Adding a validator should not break interchain transfer. + endTime := time.Now().Add(30 * time.Second) ginkgo.By("adding the new node as a validator", func() { - startTime := time.Now().Add(e2e.DefaultValidatorStartTimeDiff) - // Validation duration doesn't actually matter to this - // test - it is only ensuring that adding a validator - // doesn't break interchain transfer. - endTime := startTime.Add(30 * time.Second) - rewardKey, err := secp256k1.NewPrivateKey() require.NoError(err) @@ -114,7 +110,6 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, - Start: uint64(startTime.Unix()), End: uint64(endTime.Unix()), Wght: weight, }, @@ -136,13 +131,8 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL require.NoError(err) }) + // Adding a delegator should not break interchain transfer. ginkgo.By("adding a delegator to the new node", func() { - startTime := time.Now().Add(e2e.DefaultValidatorStartTimeDiff) - // Delegation duration doesn't actually matter to this - // test - it is only ensuring that adding a delegator - // doesn't break interchain transfer. - endTime := startTime.Add(15 * time.Second) - rewardKey, err := secp256k1.NewPrivateKey() require.NoError(err) @@ -150,7 +140,6 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, - Start: uint64(startTime.Unix()), End: uint64(endTime.Unix()), Wght: weight, }, @@ -202,7 +191,7 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL }) ginkgo.By("initializing a new eth client") - ethClient := e2e.Env.NewEthClient(nodeURI) + ethClient := e2e.NewEthClient(nodeURI) ginkgo.By("importing AVAX from the P-Chain to the C-Chain", func() { _, err := cWallet.IssueImportTx( @@ -220,7 +209,7 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL require.Positive(balance.Cmp(big.NewInt(0))) ginkgo.By("stopping validator node to free up resources for a bootstrap check") - require.NoError(node.Stop()) + require.NoError(node.Stop(e2e.DefaultContext())) e2e.CheckBootstrapIsPossible(network) }) diff --git a/tests/e2e/p/permissionless_subnets.go b/tests/e2e/p/permissionless_subnets.go index 1369685bf077..ebb9dc602e6c 100644 --- a/tests/e2e/p/permissionless_subnets.go +++ b/tests/e2e/p/permissionless_subnets.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -32,7 +32,7 @@ var _ = e2e.DescribePChain("[Permissionless Subnets]", func() { nodeURI := e2e.Env.GetRandomNodeURI() keychain := e2e.Env.NewKeychain(1) - baseWallet := e2e.Env.NewWallet(keychain, nodeURI) + baseWallet := e2e.NewWallet(keychain, nodeURI) pWallet := baseWallet.P() xWallet := baseWallet.X() @@ -134,14 +134,13 @@ var _ = e2e.DescribePChain("[Permissionless Subnets]", func() { require.NoError(err) }) - validatorStartTime := time.Now().Add(time.Minute) + endTime := time.Now().Add(time.Minute) ginkgo.By("add permissionless validator", func() { _, err := pWallet.IssueAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: validatorID, - Start: uint64(validatorStartTime.Unix()), - End: uint64(validatorStartTime.Add(5 * time.Second).Unix()), + End: uint64(endTime.Unix()), Wght: 25 * units.MegaAvax, }, Subnet: subnetID, @@ -156,14 +155,12 @@ var _ = e2e.DescribePChain("[Permissionless Subnets]", func() { require.NoError(err) }) - delegatorStartTime := validatorStartTime ginkgo.By("add permissionless delegator", func() { _, err := pWallet.IssueAddPermissionlessDelegatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: validatorID, - Start: uint64(delegatorStartTime.Unix()), - End: uint64(delegatorStartTime.Add(5 * time.Second).Unix()), + End: uint64(endTime.Unix()), Wght: 25 * units.MegaAvax, }, Subnet: subnetID, diff --git a/tests/e2e/p/staking_rewards.go b/tests/e2e/p/staking_rewards.go index aee486eb3e2d..91d4430c86de 100644 --- a/tests/e2e/p/staking_rewards.go +++ b/tests/e2e/p/staking_rewards.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -29,8 +29,8 @@ import ( "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/units" @@ -41,8 +41,8 @@ import ( ) const ( - delegationPeriod = 15 * time.Second - validationPeriod = 30 * time.Second + targetDelegationPeriod = 15 * time.Second + targetValidationPeriod = 30 * time.Second ) var _ = ginkgo.Describe("[Staking Rewards]", func() { @@ -52,14 +52,14 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { network := e2e.Env.GetNetwork() ginkgo.By("checking that the network has a compatible minimum stake duration", func() { - minStakeDuration := cast.ToDuration(network.GetConfig().DefaultFlags[config.MinStakeDurationKey]) - require.Equal(testnet.DefaultMinStakeDuration, minStakeDuration) + minStakeDuration := cast.ToDuration(network.DefaultFlags[config.MinStakeDurationKey]) + require.Equal(tmpnet.DefaultMinStakeDuration, minStakeDuration) }) ginkgo.By("adding alpha node, whose uptime should result in a staking reward") - alphaNode := e2e.AddEphemeralNode(network, testnet.FlagsMap{}) + alphaNode := e2e.AddEphemeralNode(network, tmpnet.FlagsMap{}) ginkgo.By("adding beta node, whose uptime should not result in a staking reward") - betaNode := e2e.AddEphemeralNode(network, testnet.FlagsMap{}) + betaNode := e2e.AddEphemeralNode(network, tmpnet.FlagsMap{}) // Wait to check health until both nodes have started to minimize the duration // required for both nodes to report healthy. @@ -68,6 +68,16 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { ginkgo.By("waiting until beta node is healthy") e2e.WaitForHealthy(betaNode) + ginkgo.By("retrieving alpha node id and pop") + alphaInfoClient := info.NewClient(alphaNode.URI) + alphaNodeID, alphaPOP, err := alphaInfoClient.GetNodeID(e2e.DefaultContext()) + require.NoError(err) + + ginkgo.By("retrieving beta node id and pop") + betaInfoClient := info.NewClient(betaNode.URI) + betaNodeID, betaPOP, err := betaInfoClient.GetNodeID(e2e.DefaultContext()) + require.NoError(err) + ginkgo.By("generating reward keys") alphaValidationRewardKey, err := secp256k1.NewPrivateKey() @@ -97,39 +107,36 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { ginkgo.By("creating keychain and P-Chain wallet") keychain := secp256k1fx.NewKeychain(rewardKeys...) - fundedKey := e2e.Env.AllocateFundedKey() + fundedKey := e2e.Env.AllocatePreFundedKey() keychain.Add(fundedKey) - nodeURI := e2e.Env.GetRandomNodeURI() - baseWallet := e2e.Env.NewWallet(keychain, nodeURI) + nodeURI := tmpnet.NodeURI{ + NodeID: alphaNodeID, + URI: alphaNode.URI, + } + baseWallet := e2e.NewWallet(keychain, nodeURI) pWallet := baseWallet.P() - ginkgo.By("retrieving alpha node id and pop") - alphaInfoClient := info.NewClient(alphaNode.GetProcessContext().URI) - alphaNodeID, alphaPOP, err := alphaInfoClient.GetNodeID(e2e.DefaultContext()) - require.NoError(err) - - ginkgo.By("retrieving beta node id and pop") - betaInfoClient := info.NewClient(betaNode.GetProcessContext().URI) - betaNodeID, betaPOP, err := betaInfoClient.GetNodeID(e2e.DefaultContext()) - require.NoError(err) - const ( delegationPercent = 0.10 // 10% delegationShare = reward.PercentDenominator * delegationPercent weight = 2_000 * units.Avax ) - alphaValidatorStartTime := time.Now().Add(e2e.DefaultValidatorStartTimeDiff) - alphaValidatorEndTime := alphaValidatorStartTime.Add(validationPeriod) - tests.Outf("alpha node validation period starting at: %v\n", alphaValidatorStartTime) + pvmClient := platformvm.NewClient(alphaNode.URI) + + ginkgo.By("retrieving supply before inserting validators") + supplyAtValidatorsStart, _, err := pvmClient.GetCurrentSupply(e2e.DefaultContext(), constants.PrimaryNetworkID) + require.NoError(err) + + alphaValidatorsEndTime := time.Now().Add(targetValidationPeriod) + tests.Outf("alpha node validation period ending at: %v\n", alphaValidatorsEndTime) ginkgo.By("adding alpha node as a validator", func() { _, err := pWallet.IssueAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: alphaNodeID, - Start: uint64(alphaValidatorStartTime.Unix()), - End: uint64(alphaValidatorEndTime.Unix()), + End: uint64(alphaValidatorsEndTime.Unix()), Wght: weight, }, Subnet: constants.PrimaryNetworkID, @@ -150,16 +157,14 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { require.NoError(err) }) - betaValidatorStartTime := time.Now().Add(e2e.DefaultValidatorStartTimeDiff) - betaValidatorEndTime := betaValidatorStartTime.Add(validationPeriod) - tests.Outf("beta node validation period starting at: %v\n", betaValidatorStartTime) + betaValidatorEndTime := time.Now().Add(targetValidationPeriod) + tests.Outf("beta node validation period ending at: %v\n", betaValidatorEndTime) ginkgo.By("adding beta node as a validator", func() { _, err := pWallet.IssueAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: betaNodeID, - Start: uint64(betaValidatorStartTime.Unix()), End: uint64(betaValidatorEndTime.Unix()), Wght: weight, }, @@ -181,16 +186,19 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { require.NoError(err) }) - gammaDelegatorStartTime := time.Now().Add(e2e.DefaultValidatorStartTimeDiff) - tests.Outf("gamma delegation period starting at: %v\n", gammaDelegatorStartTime) + ginkgo.By("retrieving supply before inserting delegators") + supplyAtDelegatorsStart, _, err := pvmClient.GetCurrentSupply(e2e.DefaultContext(), constants.PrimaryNetworkID) + require.NoError(err) + + gammaDelegatorEndTime := time.Now().Add(targetDelegationPeriod) + tests.Outf("gamma delegation period ending at: %v\n", gammaDelegatorEndTime) ginkgo.By("adding gamma as delegator to the alpha node", func() { _, err := pWallet.IssueAddPermissionlessDelegatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: alphaNodeID, - Start: uint64(gammaDelegatorStartTime.Unix()), - End: uint64(gammaDelegatorStartTime.Add(delegationPeriod).Unix()), + End: uint64(gammaDelegatorEndTime.Unix()), Wght: weight, }, Subnet: constants.PrimaryNetworkID, @@ -205,16 +213,15 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { require.NoError(err) }) - deltaDelegatorStartTime := time.Now().Add(e2e.DefaultValidatorStartTimeDiff) - tests.Outf("delta delegation period starting at: %v\n", deltaDelegatorStartTime) + deltaDelegatorEndTime := time.Now().Add(targetDelegationPeriod) + tests.Outf("delta delegation period ending at: %v\n", deltaDelegatorEndTime) ginkgo.By("adding delta as delegator to the beta node", func() { _, err := pWallet.IssueAddPermissionlessDelegatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: betaNodeID, - Start: uint64(deltaDelegatorStartTime.Unix()), - End: uint64(deltaDelegatorStartTime.Add(delegationPeriod).Unix()), + End: uint64(deltaDelegatorEndTime.Unix()), Wght: weight, }, Subnet: constants.PrimaryNetworkID, @@ -230,15 +237,21 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { }) ginkgo.By("stopping beta node to prevent it and its delegator from receiving a validation reward") - require.NoError(betaNode.Stop()) + require.NoError(betaNode.Stop(e2e.DefaultContext())) + + ginkgo.By("retrieving staking periods from the chain") + data, err := pvmClient.GetCurrentValidators(e2e.DefaultContext(), constants.PlatformChainID, []ids.NodeID{alphaNodeID}) + require.NoError(err) + require.Len(data, 1) + actualAlphaValidationPeriod := time.Duration(data[0].EndTime-data[0].StartTime) * time.Second + delegatorData := data[0].Delegators[0] + actualGammaDelegationPeriod := time.Duration(delegatorData.EndTime-delegatorData.StartTime) * time.Second ginkgo.By("waiting until all validation periods are over") // The beta validator was the last added and so has the latest end time. The // delegation periods are shorter than the validation periods. time.Sleep(time.Until(betaValidatorEndTime)) - pvmClient := platformvm.NewClient(alphaNode.GetProcessContext().URI) - ginkgo.By("waiting until the alpha and beta nodes are no longer validators") e2e.Eventually(func() bool { validators, err := pvmClient.GetCurrentValidators(e2e.DefaultContext(), constants.PrimaryNetworkID, nil) @@ -271,7 +284,7 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { rewardBalances := make(map[ids.ShortID]uint64, len(rewardKeys)) for _, rewardKey := range rewardKeys { keychain := secp256k1fx.NewKeychain(rewardKey) - baseWallet := e2e.Env.NewWallet(keychain, nodeURI) + baseWallet := e2e.NewWallet(keychain, nodeURI) pWallet := baseWallet.P() balances, err := pWallet.Builder().GetBalance() require.NoError(err) @@ -280,11 +293,9 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { require.Len(rewardBalances, len(rewardKeys)) ginkgo.By("determining expected validation and delegation rewards") - currentSupply, _, err := pvmClient.GetCurrentSupply(e2e.DefaultContext(), constants.PrimaryNetworkID) - require.NoError(err) calculator := reward.NewCalculator(rewardConfig) - expectedValidationReward := calculator.Calculate(validationPeriod, weight, currentSupply) - potentialDelegationReward := calculator.Calculate(delegationPeriod, weight, currentSupply) + expectedValidationReward := calculator.Calculate(actualAlphaValidationPeriod, weight, supplyAtValidatorsStart) + potentialDelegationReward := calculator.Calculate(actualGammaDelegationPeriod, weight, supplyAtDelegatorsStart) expectedDelegationFee, expectedDelegatorReward := reward.Split(potentialDelegationReward, delegationShare) ginkgo.By("checking expected rewards against actual rewards") @@ -301,7 +312,7 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { } ginkgo.By("stopping alpha to free up resources for a bootstrap check") - require.NoError(alphaNode.Stop()) + require.NoError(alphaNode.Stop(e2e.DefaultContext())) e2e.CheckBootstrapIsPossible(network) }) diff --git a/tests/e2e/p/validator_sets.go b/tests/e2e/p/validator_sets.go new file mode 100644 index 000000000000..84df98979938 --- /dev/null +++ b/tests/e2e/p/validator_sets.go @@ -0,0 +1,113 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p + +import ( + "fmt" + "time" + + ginkgo "github.com/onsi/ginkgo/v2" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/tests" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/platformvm" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +var _ = e2e.DescribePChain("[Validator Sets]", func() { + require := require.New(ginkgo.GinkgoT()) + + ginkgo.It("should be identical for every height for all nodes in the network", func() { + network := e2e.Env.GetNetwork() + + ginkgo.By("creating wallet with a funded key to source delegated funds from") + keychain := e2e.Env.NewKeychain(1) + nodeURI := e2e.Env.GetRandomNodeURI() + baseWallet := e2e.NewWallet(keychain, nodeURI) + pWallet := baseWallet.P() + + const delegatorCount = 15 + ginkgo.By(fmt.Sprintf("adding %d delegators", delegatorCount), func() { + rewardKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + avaxAssetID := pWallet.AVAXAssetID() + startTime := time.Now().Add(tmpnet.DefaultValidatorStartTimeDiff) + endTime := startTime.Add(time.Second * 360) + // This is the default flag value for MinDelegatorStake. + weight := genesis.LocalParams.StakingConfig.MinDelegatorStake + + for i := 0; i < delegatorCount; i++ { + _, err = pWallet.IssueAddPermissionlessDelegatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeURI.NodeID, + Start: uint64(startTime.Unix()), + End: uint64(endTime.Unix()), + Wght: weight, + }, + Subnet: constants.PrimaryNetworkID, + }, + avaxAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardKey.Address()}, + }, + e2e.WithDefaultContext(), + ) + require.NoError(err) + } + }) + + ginkgo.By("getting the current P-Chain height from the wallet") + currentPChainHeight, err := platformvm.NewClient(nodeURI.URI).GetHeight(e2e.DefaultContext()) + require.NoError(err) + + ginkgo.By("checking that validator sets are equal across all heights for all nodes", func() { + pvmClients := make([]platformvm.Client, len(e2e.Env.URIs)) + for i, nodeURI := range e2e.Env.URIs { + pvmClients[i] = platformvm.NewClient(nodeURI.URI) + // Ensure that the height of the target node is at least the expected height + e2e.Eventually( + func() bool { + pChainHeight, err := pvmClients[i].GetHeight(e2e.DefaultContext()) + require.NoError(err) + return pChainHeight >= currentPChainHeight + }, + e2e.DefaultTimeout, + e2e.DefaultPollingInterval, + fmt.Sprintf("failed to see expected height %d for %s before timeout", currentPChainHeight, nodeURI.NodeID), + ) + } + + for height := uint64(0); height <= currentPChainHeight; height++ { + tests.Outf(" checked validator sets for height %d\n", height) + var observedValidatorSet map[ids.NodeID]*validators.GetValidatorOutput + for _, pvmClient := range pvmClients { + validatorSet, err := pvmClient.GetValidatorsAt( + e2e.DefaultContext(), + constants.PrimaryNetworkID, + height, + ) + require.NoError(err) + if observedValidatorSet == nil { + observedValidatorSet = validatorSet + continue + } + require.Equal(observedValidatorSet, validatorSet) + } + } + }) + + e2e.CheckBootstrapIsPossible(network) + }) +}) diff --git a/tests/e2e/p/workflow.go b/tests/e2e/p/workflow.go index 96bf8bafc02c..8bf7efca2c2c 100644 --- a/tests/e2e/p/workflow.go +++ b/tests/e2e/p/workflow.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/units" @@ -36,7 +36,7 @@ var _ = e2e.DescribePChain("[Workflow]", func() { func() { nodeURI := e2e.Env.GetRandomNodeURI() keychain := e2e.Env.NewKeychain(2) - baseWallet := e2e.Env.NewWallet(keychain, nodeURI) + baseWallet := e2e.NewWallet(keychain, nodeURI) pWallet := baseWallet.P() avaxAssetID := baseWallet.P().AVAXAssetID() @@ -68,19 +68,15 @@ var _ = e2e.DescribePChain("[Workflow]", func() { require.NoError(err) require.GreaterOrEqual(pBalance, minBalance) }) - // create validator data - validatorStartTimeDiff := 30 * time.Second - vdrStartTime := time.Now().Add(validatorStartTimeDiff) // Use a random node ID to ensure that repeated test runs - // will succeed against a persistent network. + // will succeed against a network that persists across runs. validatorID, err := ids.ToNodeID(utils.RandomBytes(ids.NodeIDLen)) require.NoError(err) vdr := &txs.Validator{ NodeID: validatorID, - Start: uint64(vdrStartTime.Unix()), - End: uint64(vdrStartTime.Add(72 * time.Hour).Unix()), + End: uint64(time.Now().Add(72 * time.Hour).Unix()), Wght: minValStake, } rewardOwner := &secp256k1fx.OutputOwners{ diff --git a/tests/e2e/static-handlers/suites.go b/tests/e2e/static-handlers/suites.go deleted file mode 100644 index 94b889da026b..000000000000 --- a/tests/e2e/static-handlers/suites.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright (C) 2022-2024, Chain4Travel AG. All rights reserved. -// -// This file is a derived work, based on ava-labs code whose -// original notices appear below. -// -// It is distributed under the same license conditions as the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Implements static handlers tests for avm and platformvm -package statichandlers - -import ( - "time" - - ginkgo "github.com/onsi/ginkgo/v2" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" - "github.com/ava-labs/avalanchego/utils/cb58" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/formatting" - "github.com/ava-labs/avalanchego/utils/formatting/address" - "github.com/ava-labs/avalanchego/utils/json" - "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/avm" - "github.com/ava-labs/avalanchego/vms/platformvm/api" -) - -var _ = ginkgo.Describe("[StaticHandlers]", func() { - require := require.New(ginkgo.GinkgoT()) - - ginkgo.It("can make calls to avm static api", - func() { - addrMap := map[string]string{} - for _, addrStr := range []string{ - "A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy", - "6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv", - "6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa", - "Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7", - } { - addr, err := ids.ShortFromString(addrStr) - require.NoError(err) - addrMap[addrStr], err = address.FormatBech32(constants.NetworkIDToHRP[constants.LocalID], addr[:]) - require.NoError(err) - } - avmArgs := avm.BuildGenesisArgs{ - Encoding: formatting.Hex, - GenesisData: map[string]avm.AssetDefinition{ - "asset1": { - Name: "myFixedCapAsset", - Symbol: "MFCA", - Denomination: 8, - InitialState: map[string][]interface{}{ - "fixedCap": { - avm.Holder{ - Amount: 100000, - Address: addrMap["A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy"], - }, - avm.Holder{ - Amount: 100000, - Address: addrMap["6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv"], - }, - avm.Holder{ - Amount: json.Uint64(50000), - Address: addrMap["6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa"], - }, - avm.Holder{ - Amount: json.Uint64(50000), - Address: addrMap["Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7"], - }, - }, - }, - }, - "asset2": { - Name: "myVarCapAsset", - Symbol: "MVCA", - InitialState: map[string][]interface{}{ - "variableCap": { - avm.Owners{ - Threshold: 1, - Minters: []string{ - addrMap["A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy"], - addrMap["6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv"], - }, - }, - avm.Owners{ - Threshold: 2, - Minters: []string{ - addrMap["6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa"], - addrMap["Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7"], - }, - }, - }, - }, - }, - "asset3": { - Name: "myOtherVarCapAsset", - InitialState: map[string][]interface{}{ - "variableCap": { - avm.Owners{ - Threshold: 1, - Minters: []string{ - addrMap["A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy"], - }, - }, - }, - }, - }, - }, - } - staticClient := avm.NewStaticClient(e2e.Env.GetRandomNodeURI().URI) - resp, err := staticClient.BuildGenesis(e2e.DefaultContext(), &avmArgs) - require.NoError(err) - require.Equal(resp.Bytes, "0x0000000000030006617373657431000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f6d794669786564436170417373657400044d4643410800000001000000000000000400000007000000000000c350000000000000000000000001000000013f78e510df62bc48b0829ec06d6a6b98062d695300000007000000000000c35000000000000000000000000100000001c54903de5177a16f7811771ef2f4659d9e8646710000000700000000000186a0000000000000000000000001000000013f58fda2e9ea8d9e4b181832a07b26dae286f2cb0000000700000000000186a000000000000000000000000100000001645938bb7ae2193270e6ffef009e3664d11e07c10006617373657432000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000d6d79566172436170417373657400044d5643410000000001000000000000000200000006000000000000000000000001000000023f58fda2e9ea8d9e4b181832a07b26dae286f2cb645938bb7ae2193270e6ffef009e3664d11e07c100000006000000000000000000000001000000023f78e510df62bc48b0829ec06d6a6b98062d6953c54903de5177a16f7811771ef2f4659d9e864671000661737365743300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000126d794f7468657256617243617041737365740000000000000100000000000000010000000600000000000000000000000100000001645938bb7ae2193270e6ffef009e3664d11e07c1279fa028") - }) - - ginkgo.It("can make calls to platformvm static api", func() { - keys := []*secp256k1.PrivateKey{} - for _, key := range []string{ - "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", - "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", - "cxb7KpGWhDMALTjNNSJ7UQkkomPesyWAPUaWRGdyeBNzR6f35", - "ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN", - "2RWLv6YVEXDiWLpaCbXhhqxtLbnFaKQsWPSSMSPhpWo47uJAeV", - } { - privKeyBytes, err := cb58.Decode(key) - require.NoError(err) - pk, err := secp256k1.ToPrivateKey(privKeyBytes) - require.NoError(err) - keys = append(keys, pk) - } - - genesisUTXOs := make([]api.UTXO, len(keys)) - hrp := constants.NetworkIDToHRP[constants.UnitTestID] - for i, key := range keys { - id := key.PublicKey().Address() - addr, err := address.FormatBech32(hrp, id.Bytes()) - require.NoError(err) - genesisUTXOs[i] = api.UTXO{ - Amount: json.Uint64(50000 * units.MilliAvax), - Address: addr, - } - } - - genesisValidators := make([]api.PermissionlessValidator, len(keys)) - for i, key := range keys { - id := key.PublicKey().Address() - addr, err := address.FormatBech32(hrp, id.Bytes()) - require.NoError(err) - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ - StartTime: json.Uint64(time.Date(1997, 1, 1, 0, 0, 0, 0, time.UTC).Unix()), - EndTime: json.Uint64(time.Date(1997, 1, 30, 0, 0, 0, 0, time.UTC).Unix()), - NodeID: ids.NodeID(id), - }, - RewardOwner: &api.Owner{ - Threshold: 1, - Addresses: []string{addr}, - }, - Staked: []api.UTXO{{ - Amount: json.Uint64(10000), - Address: addr, - }}, - } - } - - buildGenesisArgs := api.BuildGenesisArgs{ - NetworkID: json.Uint32(constants.UnitTestID), - AvaxAssetID: ids.ID{'a', 'v', 'a', 'x'}, - UTXOs: genesisUTXOs, - Validators: genesisValidators, - Chains: nil, - Time: json.Uint64(time.Date(1997, 1, 1, 0, 0, 0, 0, time.UTC).Unix()), - InitialSupply: json.Uint64(360 * units.MegaAvax), - Encoding: formatting.Hex, - } - - staticClient := api.NewStaticClient(e2e.Env.GetRandomNodeURI().URI) - resp, err := staticClient.BuildGenesis(e2e.DefaultContext(), &buildGenesisArgs) - require.NoError(err) - require.Equal(resp.Bytes, "0x0000000000050000000000000000000000000000000000000000000000000000000000000000000000006176617800000000000000000000000000000000000000000000000000000000000000070000000ba43b740000000000000000000000000100000001fceda8f90fcb5d30614b99d79fc4baa293077626000000000000000000000000000000000000000000000000000000000000000000000000000000016176617800000000000000000000000000000000000000000000000000000000000000070000000ba43b7400000000000000000000000001000000016ead693c17abb1be422bb50b30b9711ff98d667e000000000000000000000000000000000000000000000000000000000000000000000000000000026176617800000000000000000000000000000000000000000000000000000000000000070000000ba43b740000000000000000000000000100000001f2420846876e69f473dda256172967e992f0ee31000000000000000000000000000000000000000000000000000000000000000000000000000000036176617800000000000000000000000000000000000000000000000000000000000000070000000ba43b7400000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c000000000000000000000000000000000000000000000000000000000000000000000000000000046176617800000000000000000000000000000000000000000000000000000000000000070000000ba43b74000000000000000000000000010000000187c4ec0736fdad03fd9ec8c3ba609de958601a7b00000000000000050000000c0000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fceda8f90fcb5d30614b99d79fc4baa2930776260000000032c9a9000000000032efe480000000000000271000000001617661780000000000000000000000000000000000000000000000000000000000000007000000000000271000000000000000000000000100000001fceda8f90fcb5d30614b99d79fc4baa2930776260000000b00000000000000000000000100000001fceda8f90fcb5d30614b99d79fc4baa29307762600000000000000000000000c0000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000006ead693c17abb1be422bb50b30b9711ff98d667e0000000032c9a9000000000032efe4800000000000002710000000016176617800000000000000000000000000000000000000000000000000000000000000070000000000002710000000000000000000000001000000016ead693c17abb1be422bb50b30b9711ff98d667e0000000b000000000000000000000001000000016ead693c17abb1be422bb50b30b9711ff98d667e00000000000000000000000c0000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f2420846876e69f473dda256172967e992f0ee310000000032c9a9000000000032efe480000000000000271000000001617661780000000000000000000000000000000000000000000000000000000000000007000000000000271000000000000000000000000100000001f2420846876e69f473dda256172967e992f0ee310000000b00000000000000000000000100000001f2420846876e69f473dda256172967e992f0ee3100000000000000000000000c0000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000003cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c0000000032c9a9000000000032efe4800000000000002710000000016176617800000000000000000000000000000000000000000000000000000000000000070000000000002710000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c0000000b000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c00000000000000000000000c0000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000087c4ec0736fdad03fd9ec8c3ba609de958601a7b0000000032c9a9000000000032efe48000000000000027100000000161766178000000000000000000000000000000000000000000000000000000000000000700000000000027100000000000000000000000010000000187c4ec0736fdad03fd9ec8c3ba609de958601a7b0000000b0000000000000000000000010000000187c4ec0736fdad03fd9ec8c3ba609de958601a7b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032c9a90004fefa17b72400000000481140e2") - }) -}) diff --git a/tests/e2e/x/interchain_workflow.go b/tests/e2e/x/interchain_workflow.go index 6d335199b5b9..eec7b3427c19 100644 --- a/tests/e2e/x/interchain_workflow.go +++ b/tests/e2e/x/interchain_workflow.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/set" @@ -36,7 +36,7 @@ var _ = e2e.DescribeXChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL require.NoError(err) keychain := e2e.Env.NewKeychain(1) keychain.Add(recipientKey) - baseWallet := e2e.Env.NewWallet(keychain, nodeURI) + baseWallet := e2e.NewWallet(keychain, nodeURI) xWallet := baseWallet.X() cWallet := baseWallet.C() pWallet := baseWallet.P() @@ -103,7 +103,7 @@ var _ = e2e.DescribeXChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL }) ginkgo.By("initializing a new eth client") - ethClient := e2e.Env.NewEthClient(nodeURI) + ethClient := e2e.NewEthClient(nodeURI) ginkgo.By("importing AVAX from the X-Chain to the C-Chain", func() { _, err := cWallet.IssueImportTx( diff --git a/tests/e2e/x/transfer/virtuous.go b/tests/e2e/x/transfer/virtuous.go index ed848401d057..75cbbfe71220 100644 --- a/tests/e2e/x/transfer/virtuous.go +++ b/tests/e2e/x/transfer/virtuous.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Implements X-chain transfer tests. @@ -26,7 +26,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -79,7 +79,7 @@ var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { // Ensure the same set of 10 keys is used for all tests // by retrieving them outside of runFunc. - testKeys := e2e.Env.AllocateFundedKeys(10) + testKeys := e2e.Env.AllocatePreFundedKeys(10) runFunc := func(round int) { tests.Outf("{{green}}\n\n\n\n\n\n---\n[ROUND #%02d]:{{/}}\n", round) @@ -93,7 +93,7 @@ var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { } keychain := secp256k1fx.NewKeychain(testKeys...) - baseWallet := e2e.Env.NewWallet(keychain, e2e.Env.GetRandomNodeURI()) + baseWallet := e2e.NewWallet(keychain, e2e.Env.GetRandomNodeURI()) avaxAssetID := baseWallet.X().AVAXAssetID() wallets := make([]primary.Wallet, len(testKeys)) diff --git a/tests/e2e/describe.go b/tests/fixture/e2e/describe.go similarity index 95% rename from tests/e2e/describe.go rename to tests/fixture/e2e/describe.go index 5475a7114c96..2810117758c6 100644 --- a/tests/e2e/describe.go +++ b/tests/fixture/e2e/describe.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package e2e diff --git a/tests/fixture/e2e/env.go b/tests/fixture/e2e/env.go new file mode 100644 index 000000000000..9019c9438b9e --- /dev/null +++ b/tests/fixture/e2e/env.go @@ -0,0 +1,184 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package e2e + +import ( + "encoding/json" + "math/rand" + "os" + "path/filepath" + "time" + + ginkgo "github.com/onsi/ginkgo/v2" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/api/info" + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/tests" + "github.com/ava-labs/avalanchego/tests/fixture" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/perms" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +// Env is used to access shared test fixture. Intended to be +// initialized from SynchronizedBeforeSuite. +var Env *TestEnvironment + +func InitSharedTestEnvironment(envBytes []byte) { + require := require.New(ginkgo.GinkgoT()) + require.Nil(Env, "env already initialized") + Env = &TestEnvironment{} + require.NoError(json.Unmarshal(envBytes, Env)) + Env.require = require +} + +type TestEnvironment struct { + // The directory where the test network configuration is stored + NetworkDir string + // URIs used to access the API endpoints of nodes of the network + URIs []tmpnet.NodeURI + // The URI used to access the http server that allocates test data + TestDataServerURI string + + require *require.Assertions +} + +func (te *TestEnvironment) Marshal() []byte { + bytes, err := json.Marshal(te) + require.NoError(ginkgo.GinkgoT(), err) + return bytes +} + +// Initialize a new test environment with a shared network (either pre-existing or newly created). +func NewTestEnvironment(flagVars *FlagVars, desiredNetwork *tmpnet.Network) *TestEnvironment { + require := require.New(ginkgo.GinkgoT()) + + networkDir := flagVars.NetworkDir() + + // Load or create a test network + var network *tmpnet.Network + if len(networkDir) > 0 { + var err error + network, err = tmpnet.ReadNetwork(networkDir) + require.NoError(err) + tests.Outf("{{yellow}}Using an existing network configured at %s{{/}}\n", network.Dir) + + // Set the desired subnet configuration to ensure subsequent creation. + for _, subnet := range desiredNetwork.Subnets { + if existing := network.GetSubnet(subnet.Name); existing != nil { + // Already present + continue + } + network.Subnets = append(network.Subnets, subnet) + } + } else { + network = desiredNetwork + StartNetwork(network, DefaultNetworkDir, flagVars.AvalancheGoExecPath(), flagVars.PluginDir()) + } + + // A new network will always need subnet creation and an existing + // network will also need subnets to be created the first time it + // is used. + require.NoError(network.CreateSubnets(DefaultContext(), ginkgo.GinkgoWriter)) + + // Wait for chains to have bootstrapped on all nodes + Eventually(func() bool { + for _, subnet := range network.Subnets { + for _, validatorID := range subnet.ValidatorIDs { + uri, err := network.GetURIForNodeID(validatorID) + require.NoError(err) + infoClient := info.NewClient(uri) + for _, chain := range subnet.Chains { + isBootstrapped, err := infoClient.IsBootstrapped(DefaultContext(), chain.ChainID.String()) + // Ignore errors since a chain id that is not yet known will result in a recoverable error. + if err != nil || !isBootstrapped { + return false + } + } + } + } + return true + }, DefaultTimeout, DefaultPollingInterval, "failed to see all chains bootstrap before timeout") + + uris := network.GetNodeURIs() + require.NotEmpty(uris, "network contains no nodes") + tests.Outf("{{green}}network URIs: {{/}} %+v\n", uris) + + testDataServerURI, err := fixture.ServeTestData(fixture.TestData{ + PreFundedKeys: network.PreFundedKeys, + }) + tests.Outf("{{green}}test data server URI: {{/}} %+v\n", testDataServerURI) + require.NoError(err) + + return &TestEnvironment{ + NetworkDir: network.Dir, + URIs: uris, + TestDataServerURI: testDataServerURI, + require: require, + } +} + +// Retrieve a random URI to naively attempt to spread API load across +// nodes. +func (te *TestEnvironment) GetRandomNodeURI() tmpnet.NodeURI { + r := rand.New(rand.NewSource(time.Now().Unix())) //#nosec G404 + nodeURI := te.URIs[r.Intn(len(te.URIs))] + tests.Outf("{{blue}} targeting node %s with URI: %s{{/}}\n", nodeURI.NodeID, nodeURI.URI) + return nodeURI +} + +// Retrieve the network to target for testing. +func (te *TestEnvironment) GetNetwork() *tmpnet.Network { + network, err := tmpnet.ReadNetwork(te.NetworkDir) + te.require.NoError(err) + return network +} + +// Retrieve the specified number of funded keys allocated for the caller's exclusive use. +func (te *TestEnvironment) AllocatePreFundedKeys(count int) []*secp256k1.PrivateKey { + keys, err := fixture.AllocatePreFundedKeys(te.TestDataServerURI, count) + te.require.NoError(err) + tests.Outf("{{blue}} allocated pre-funded key(s): %+v{{/}}\n", keys) + return keys +} + +// Retrieve a funded key allocated for the caller's exclusive use. +func (te *TestEnvironment) AllocatePreFundedKey() *secp256k1.PrivateKey { + return te.AllocatePreFundedKeys(1)[0] +} + +// Create a new keychain with the specified number of test keys. +func (te *TestEnvironment) NewKeychain(count int) *secp256k1fx.Keychain { + keys := te.AllocatePreFundedKeys(count) + return secp256k1fx.NewKeychain(keys...) +} + +// Create a new private network that is not shared with other tests. +func (te *TestEnvironment) NewPrivateNetwork() *tmpnet.Network { + // Load the shared network to retrieve its path and exec path + sharedNetwork, err := tmpnet.ReadNetwork(te.NetworkDir) + te.require.NoError(err) + + network := &tmpnet.Network{} + + // The private networks dir is under the shared network dir to ensure it + // will be included in the artifact uploaded in CI. + privateNetworksDir := filepath.Join(sharedNetwork.Dir, PrivateNetworksDirName) + te.require.NoError(os.MkdirAll(privateNetworksDir, perms.ReadWriteExecute)) + + pluginDir, err := sharedNetwork.DefaultFlags.GetStringVal(config.PluginDirKey) + te.require.NoError(err) + + StartNetwork( + network, + privateNetworksDir, + sharedNetwork.DefaultRuntimeConfig.AvalancheGoPath, + pluginDir, + ) + + return network +} diff --git a/tests/fixture/e2e/flags.go b/tests/fixture/e2e/flags.go new file mode 100644 index 000000000000..2a00df97a885 --- /dev/null +++ b/tests/fixture/e2e/flags.go @@ -0,0 +1,71 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package e2e + +import ( + "flag" + "fmt" + "os" + + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" +) + +type FlagVars struct { + avalancheGoExecPath string + pluginDir string + networkDir string + useExistingNetwork bool +} + +func (v *FlagVars) AvalancheGoExecPath() string { + return v.avalancheGoExecPath +} + +func (v *FlagVars) PluginDir() string { + return v.pluginDir +} + +func (v *FlagVars) NetworkDir() string { + if !v.useExistingNetwork { + return "" + } + if len(v.networkDir) > 0 { + return v.networkDir + } + return os.Getenv(tmpnet.NetworkDirEnvName) +} + +func (v *FlagVars) UseExistingNetwork() bool { + return v.useExistingNetwork +} + +func RegisterFlags() *FlagVars { + vars := FlagVars{} + flag.StringVar( + &vars.avalancheGoExecPath, + "avalanchego-path", + os.Getenv(tmpnet.AvalancheGoPathEnvName), + fmt.Sprintf("avalanchego executable path (required if not using an existing network). Also possible to configure via the %s env variable.", tmpnet.AvalancheGoPathEnvName), + ) + flag.StringVar( + &vars.pluginDir, + "plugin-dir", + os.ExpandEnv("$HOME/.avalanchego/plugins"), + "[optional] the dir containing VM plugins.", + ) + flag.StringVar( + &vars.networkDir, + "network-dir", + "", + fmt.Sprintf("[optional] the dir containing the configuration of an existing network to target for testing. Will only be used if --use-existing-network is specified. Also possible to configure via the %s env variable.", tmpnet.NetworkDirEnvName), + ) + flag.BoolVar( + &vars.useExistingNetwork, + "use-existing-network", + false, + "[optional] whether to target the existing network identified by --network-dir.", + ) + + return &vars +} diff --git a/tests/e2e/e2e.go b/tests/fixture/e2e/helpers.go similarity index 52% rename from tests/e2e/e2e.go rename to tests/fixture/e2e/helpers.go index 130f33f1197c..c1d87a4beba8 100644 --- a/tests/e2e/e2e.go +++ b/tests/fixture/e2e/helpers.go @@ -1,18 +1,14 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -// e2e implements the e2e tests. package e2e import ( "context" - "encoding/json" "errors" "fmt" "math/big" - "math/rand" "os" - "path/filepath" "strings" "time" @@ -26,12 +22,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/fixture" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" - "github.com/ava-labs/avalanchego/tests/fixture/testnet/local" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/perms" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" @@ -43,19 +34,14 @@ const ( // contention. DefaultTimeout = 2 * time.Minute - // Interval appropriate for network operations that should be - // retried periodically but not too often. - DefaultPollingInterval = 500 * time.Millisecond + DefaultPollingInterval = tmpnet.DefaultPollingInterval // Setting this env will disable post-test bootstrap // checks. Useful for speeding up iteration during test // development. SkipBootstrapChecksEnvName = "E2E_SKIP_BOOTSTRAP_CHECKS" - // Validator start time must be a minimum of SyncBound from the - // current time for validator addition to succeed, and adding 20 - // seconds provides a buffer in case of any delay in processing. - DefaultValidatorStartTimeDiff = executor.SyncBound + 20*time.Second + DefaultValidatorStartTimeDiff = tmpnet.DefaultValidatorStartTimeDiff DefaultGasLimit = uint64(21000) // Standard gas limit @@ -68,75 +54,15 @@ const ( PrivateNetworksDirName = "private_networks" ) -// Env is used to access shared test fixture. Intended to be -// initialized by SynchronizedBeforeSuite. -var Env *TestEnvironment - -type TestEnvironment struct { - // The directory where the test network configuration is stored - NetworkDir string - // URIs used to access the API endpoints of nodes of the network - URIs []testnet.NodeURI - // The URI used to access the http server that allocates test data - TestDataServerURI string - - require *require.Assertions -} - -func InitTestEnvironment(envBytes []byte) { - require := require.New(ginkgo.GinkgoT()) - require.Nil(Env, "env already initialized") - Env = &TestEnvironment{ - require: require, - } - require.NoError(json.Unmarshal(envBytes, Env)) -} - -// Retrieve a random URI to naively attempt to spread API load across -// nodes. -func (te *TestEnvironment) GetRandomNodeURI() testnet.NodeURI { - r := rand.New(rand.NewSource(time.Now().Unix())) //#nosec G404 - nodeURI := te.URIs[r.Intn(len(te.URIs))] - tests.Outf("{{blue}} targeting node %s with URI: %s{{/}}\n", nodeURI.NodeID, nodeURI.URI) - return nodeURI -} - -// Retrieve the network to target for testing. -func (te *TestEnvironment) GetNetwork() testnet.Network { - network, err := local.ReadNetwork(te.NetworkDir) - te.require.NoError(err) - return network -} - -// Retrieve the specified number of funded keys allocated for the caller's exclusive use. -func (te *TestEnvironment) AllocateFundedKeys(count int) []*secp256k1.PrivateKey { - keys, err := fixture.AllocateFundedKeys(te.TestDataServerURI, count) - te.require.NoError(err) - tests.Outf("{{blue}} allocated funded key(s): %+v{{/}}\n", keys) - return keys -} - -// Retrieve a funded key allocated for the caller's exclusive use. -func (te *TestEnvironment) AllocateFundedKey() *secp256k1.PrivateKey { - return te.AllocateFundedKeys(1)[0] -} - -// Create a new keychain with the specified number of test keys. -func (te *TestEnvironment) NewKeychain(count int) *secp256k1fx.Keychain { - keys := te.AllocateFundedKeys(count) - return secp256k1fx.NewKeychain(keys...) -} - // Create a new wallet for the provided keychain against the specified node URI. -// TODO(marun) Make this a regular function. -func (te *TestEnvironment) NewWallet(keychain *secp256k1fx.Keychain, nodeURI testnet.NodeURI) primary.Wallet { +func NewWallet(keychain *secp256k1fx.Keychain, nodeURI tmpnet.NodeURI) primary.Wallet { tests.Outf("{{blue}} initializing a new wallet for node %s with URI: %s {{/}}\n", nodeURI.NodeID, nodeURI.URI) baseWallet, err := primary.MakeWallet(DefaultContext(), &primary.WalletConfig{ URI: nodeURI.URI, AVAXKeychain: keychain, EthKeychain: keychain, }) - te.require.NoError(err) + require.NoError(ginkgo.GinkgoT(), err) return primary.NewWalletWithOptions( baseWallet, common.WithPostIssuanceFunc( @@ -148,30 +74,15 @@ func (te *TestEnvironment) NewWallet(keychain *secp256k1fx.Keychain, nodeURI tes } // Create a new eth client targeting the specified node URI. -// TODO(marun) Make this a regular function. -func (te *TestEnvironment) NewEthClient(nodeURI testnet.NodeURI) ethclient.Client { +func NewEthClient(nodeURI tmpnet.NodeURI) ethclient.Client { tests.Outf("{{blue}} initializing a new eth client for node %s with URI: %s {{/}}\n", nodeURI.NodeID, nodeURI.URI) nodeAddress := strings.Split(nodeURI.URI, "//")[1] uri := fmt.Sprintf("ws://%s/ext/bc/C/ws", nodeAddress) client, err := ethclient.Dial(uri) - te.require.NoError(err) + require.NoError(ginkgo.GinkgoT(), err) return client } -// Create a new private network that is not shared with other tests. -func (te *TestEnvironment) NewPrivateNetwork() testnet.Network { - // Load the shared network to retrieve its path and exec path - sharedNetwork, err := local.ReadNetwork(te.NetworkDir) - te.require.NoError(err) - - // The private networks dir is under the shared network dir to ensure it - // will be included in the artifact uploaded in CI. - privateNetworksDir := filepath.Join(sharedNetwork.Dir, PrivateNetworksDirName) - te.require.NoError(os.MkdirAll(privateNetworksDir, perms.ReadWriteExecute)) - - return StartLocalNetwork(sharedNetwork.ExecPath, privateNetworksDir) -} - // Helper simplifying use of a timed context by canceling the context on ginkgo teardown. func ContextWithTimeout(duration time.Duration) context.Context { ctx, cancel := context.WithTimeout(context.Background(), duration) @@ -207,28 +118,28 @@ func Eventually(condition func() bool, waitFor time.Duration, tick time.Duration } } -// Add an ephemeral node that is only intended to be used by a single test. Its ID and -// URI are not intended to be returned from the Network instance to minimize -// accessibility from other tests. -func AddEphemeralNode(network testnet.Network, flags testnet.FlagsMap) testnet.Node { +// Adds an ephemeral node intended to be used by a single test. +func AddEphemeralNode(network *tmpnet.Network, flags tmpnet.FlagsMap) *tmpnet.Node { require := require.New(ginkgo.GinkgoT()) - node, err := network.AddEphemeralNode(ginkgo.GinkgoWriter, flags) + node, err := network.AddEphemeralNode(DefaultContext(), ginkgo.GinkgoWriter, flags) require.NoError(err) - // Ensure node is stopped on teardown. It's configuration is not removed to enable - // collection in CI to aid in troubleshooting failures. ginkgo.DeferCleanup(func() { - tests.Outf("Shutting down ephemeral node %s\n", node.GetID()) - require.NoError(node.Stop()) + tests.Outf("shutting down ephemeral node %q\n", node.NodeID) + ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout) + defer cancel() + require.NoError(node.Stop(ctx)) }) - return node } // Wait for the given node to report healthy. -func WaitForHealthy(node testnet.Node) { - require.NoError(ginkgo.GinkgoT(), testnet.WaitForHealthy(DefaultContext(), node)) +func WaitForHealthy(node *tmpnet.Node) { + // Need to use explicit context (vs DefaultContext()) to support use with DeferCleanup + ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout) + defer cancel() + require.NoError(ginkgo.GinkgoT(), tmpnet.WaitForHealthy(ctx, node)) } // Sends an eth transaction, waits for the transaction receipt to be issued @@ -275,40 +186,58 @@ func WithSuggestedGasPrice(ethClient ethclient.Client) common.Option { return common.WithBaseFee(baseFee) } -// Verify that a new node can bootstrap into the network. -func CheckBootstrapIsPossible(network testnet.Network) { +// Verify that a new node can bootstrap into the network. This function is safe to call +// from `Teardown` by virtue of not depending on ginkgo.DeferCleanup. +func CheckBootstrapIsPossible(network *tmpnet.Network) { + require := require.New(ginkgo.GinkgoT()) + if len(os.Getenv(SkipBootstrapChecksEnvName)) > 0 { tests.Outf("{{yellow}}Skipping bootstrap check due to the %s env var being set", SkipBootstrapChecksEnvName) return } ginkgo.By("checking if bootstrap is possible with the current network state") - node := AddEphemeralNode(network, testnet.FlagsMap{}) - WaitForHealthy(node) + + ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout) + defer cancel() + + node, err := network.AddEphemeralNode(ctx, ginkgo.GinkgoWriter, tmpnet.FlagsMap{}) + // AddEphemeralNode will initiate node stop if an error is encountered during start, + // so no further cleanup effort is required if an error is seen here. + require.NoError(err) + + // Ensure the node is always stopped at the end of the check + defer func() { + ctx, cancel = context.WithTimeout(context.Background(), DefaultTimeout) + defer cancel() + require.NoError(node.Stop(ctx)) + }() + + // Check that the node becomes healthy within timeout + require.NoError(tmpnet.WaitForHealthy(ctx, node)) } -// Start a local test-managed network with the provided avalanchego binary. -func StartLocalNetwork(avalancheGoExecPath string, networkDir string) *local.LocalNetwork { +// Start a temporary network with the provided avalanchego binary. +func StartNetwork(network *tmpnet.Network, rootNetworkDir string, avalancheGoExecPath string, pluginDir string) { require := require.New(ginkgo.GinkgoT()) - network, err := local.StartNetwork( - DefaultContext(), - ginkgo.GinkgoWriter, - networkDir, - &local.LocalNetwork{ - LocalConfig: local.LocalConfig{ - ExecPath: avalancheGoExecPath, - }, - }, - testnet.DefaultNodeCount, - testnet.DefaultFundedKeyCount, + require.NoError( + tmpnet.StartNewNetwork( + DefaultContext(), + ginkgo.GinkgoWriter, + network, + rootNetworkDir, + avalancheGoExecPath, + pluginDir, + tmpnet.DefaultNodeCount, + ), ) - require.NoError(err) + ginkgo.DeferCleanup(func() { tests.Outf("Shutting down network\n") - require.NoError(network.Stop()) + ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout) + defer cancel() + require.NoError(network.Stop(ctx)) }) tests.Outf("{{green}}Successfully started network{{/}}\n") - - return network } diff --git a/tests/fixture/test_data_server.go b/tests/fixture/test_data_server.go index 5a39baab8c9c..b79dcc2bb26b 100644 --- a/tests/fixture/test_data_server.go +++ b/tests/fixture/test_data_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package fixture @@ -33,7 +33,7 @@ var ( ) type TestData struct { - FundedKeys []*secp256k1.PrivateKey + PreFundedKeys []*secp256k1.PrivateKey } // http server allocating resources to tests potentially executing in parallel @@ -68,14 +68,14 @@ func (s *testDataServer) allocateKeys(w http.ResponseWriter, r *http.Request) { defer s.lock.Unlock() // Only fulfill requests for available keys - if keyCount > len(s.FundedKeys) { + if keyCount > len(s.PreFundedKeys) { http.Error(w, requestedKeyCountExceedsAvailable, http.StatusInternalServerError) return } // Allocate the requested number of keys - remainingKeys := len(s.FundedKeys) - keyCount - allocatedKeys := s.FundedKeys[remainingKeys:] + remainingKeys := len(s.PreFundedKeys) - keyCount + allocatedKeys := s.PreFundedKeys[remainingKeys:] keysDoc := &keysDocument{ Keys: allocatedKeys, @@ -88,7 +88,7 @@ func (s *testDataServer) allocateKeys(w http.ResponseWriter, r *http.Request) { // Forget the allocated keys utils.ZeroSlice(allocatedKeys) - s.FundedKeys = s.FundedKeys[:remainingKeys] + s.PreFundedKeys = s.PreFundedKeys[:remainingKeys] } // Serve test data via http to ensure allocation is synchronized even when @@ -122,9 +122,9 @@ func ServeTestData(testData TestData) (string, error) { return address, nil } -// Retrieve the specified number of funded test keys from the provided URI. A given +// Retrieve the specified number of pre-funded test keys from the provided URI. A given // key is allocated at most once during the life of the test data server. -func AllocateFundedKeys(baseURI string, count int) ([]*secp256k1.PrivateKey, error) { +func AllocatePreFundedKeys(baseURI string, count int) ([]*secp256k1.PrivateKey, error) { if count <= 0 { return nil, errInvalidKeyCount } @@ -144,13 +144,13 @@ func AllocateFundedKeys(baseURI string, count int) ([]*secp256k1.PrivateKey, err resp, err := http.DefaultClient.Do(req) if err != nil { - return nil, fmt.Errorf("failed to request funded keys: %w", err) + return nil, fmt.Errorf("failed to request pre-funded keys: %w", err) } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - return nil, fmt.Errorf("failed to read response for funded keys: %w", err) + return nil, fmt.Errorf("failed to read response for pre-funded keys: %w", err) } if resp.StatusCode != http.StatusOK { if strings.TrimSpace(string(body)) == requestedKeyCountExceedsAvailable { @@ -161,7 +161,7 @@ func AllocateFundedKeys(baseURI string, count int) ([]*secp256k1.PrivateKey, err keysDoc := &keysDocument{} if err := json.Unmarshal(body, keysDoc); err != nil { - return nil, fmt.Errorf("failed to unmarshal funded keys: %w", err) + return nil, fmt.Errorf("failed to unmarshal pre-funded keys: %w", err) } return keysDoc.Keys, nil } diff --git a/tests/fixture/test_data_server_test.go b/tests/fixture/test_data_server_test.go index 979c927fea7f..6ad7644264d7 100644 --- a/tests/fixture/test_data_server_test.go +++ b/tests/fixture/test_data_server_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package fixture @@ -14,7 +14,7 @@ import ( // Check that funded test keys can be served from an http server to // ensure at-most-once allocation when tests are executed in parallel. -func TestAllocateFundedKeys(t *testing.T) { +func TestAllocatePreFundedKeys(t *testing.T) { require := require.New(t) keys := make([]*secp256k1.PrivateKey, 5) @@ -25,7 +25,7 @@ func TestAllocateFundedKeys(t *testing.T) { } uri, err := ServeTestData(TestData{ - FundedKeys: keys, + PreFundedKeys: keys, }) require.NoError(err) @@ -63,7 +63,7 @@ func TestAllocateFundedKeys(t *testing.T) { } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - keys, err := AllocateFundedKeys(uri, tc.count) + keys, err := AllocatePreFundedKeys(uri, tc.count) require.ErrorIs(err, tc.expectedError) addresses := make([]ids.ShortID, len(keys)) diff --git a/tests/fixture/testnet/README.md b/tests/fixture/testnet/README.md deleted file mode 100644 index ef2e5fb4df75..000000000000 --- a/tests/fixture/testnet/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# Test Network Fixture - -This package contains configuration and interfaces that are -independent of a given orchestration mechanism -(e.g. [local](local/README.md)). The intent is to enable tests to be -written against the interfaces defined in this package and for -implementation-specific details of test network orchestration to be -limited to test setup and teardown. diff --git a/tests/fixture/testnet/cmd/main.go b/tests/fixture/testnet/cmd/main.go deleted file mode 100644 index 92dc846edca8..000000000000 --- a/tests/fixture/testnet/cmd/main.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package main - -import ( - "context" - "errors" - "fmt" - "io/fs" - "os" - "path/filepath" - - "github.com/spf13/cobra" - - "github.com/ava-labs/avalanchego/tests/fixture/testnet" - "github.com/ava-labs/avalanchego/tests/fixture/testnet/local" - "github.com/ava-labs/avalanchego/version" -) - -const cliVersion = "0.0.1" - -var ( - errAvalancheGoRequired = fmt.Errorf("--avalanchego-path or %s are required", local.AvalancheGoPathEnvName) - errNetworkDirRequired = fmt.Errorf("--network-dir or %s are required", local.NetworkDirEnvName) -) - -func main() { - rootCmd := &cobra.Command{ - Use: "testnetctl", - Short: "testnetctl commands", - } - - versionCmd := &cobra.Command{ - Use: "version", - Short: "Print version details", - RunE: func(*cobra.Command, []string) error { - msg := cliVersion - if len(version.GitCommit) > 0 { - msg += ", commit=" + version.GitCommit - } - fmt.Fprintf(os.Stdout, msg+"\n") - return nil - }, - } - rootCmd.AddCommand(versionCmd) - - var ( - rootDir string - execPath string - nodeCount uint8 - fundedKeyCount uint8 - ) - startNetworkCmd := &cobra.Command{ - Use: "start-network", - Short: "Start a new local network", - RunE: func(*cobra.Command, []string) error { - if len(execPath) == 0 { - return errAvalancheGoRequired - } - - // Root dir will be defaulted on start if not provided - - network := &local.LocalNetwork{ - LocalConfig: local.LocalConfig{ - ExecPath: execPath, - }, - } - ctx, cancel := context.WithTimeout(context.Background(), local.DefaultNetworkStartTimeout) - defer cancel() - network, err := local.StartNetwork(ctx, os.Stdout, rootDir, network, int(nodeCount), int(fundedKeyCount)) - if err != nil { - return err - } - - // Symlink the new network to the 'latest' network to simplify usage - networkRootDir := filepath.Dir(network.Dir) - networkDirName := filepath.Base(network.Dir) - latestSymlinkPath := filepath.Join(networkRootDir, "latest") - if err := os.Remove(latestSymlinkPath); err != nil && !errors.Is(err, fs.ErrNotExist) { - return err - } - if err := os.Symlink(networkDirName, latestSymlinkPath); err != nil { - return err - } - - fmt.Fprintf(os.Stdout, "\nConfigure testnetctl to target this network by default with one of the following statements:") - fmt.Fprintf(os.Stdout, "\n - source %s\n", network.EnvFilePath()) - fmt.Fprintf(os.Stdout, " - %s\n", network.EnvFileContents()) - fmt.Fprintf(os.Stdout, " - export %s=%s\n", local.NetworkDirEnvName, latestSymlinkPath) - - return nil - }, - } - startNetworkCmd.PersistentFlags().StringVar(&rootDir, "root-dir", os.Getenv(local.RootDirEnvName), "The path to the root directory for local networks") - startNetworkCmd.PersistentFlags().StringVar(&execPath, "avalanchego-path", os.Getenv(local.AvalancheGoPathEnvName), "The path to an avalanchego binary") - startNetworkCmd.PersistentFlags().Uint8Var(&nodeCount, "node-count", testnet.DefaultNodeCount, "Number of nodes the network should initially consist of") - startNetworkCmd.PersistentFlags().Uint8Var(&fundedKeyCount, "funded-key-count", testnet.DefaultFundedKeyCount, "Number of funded keys the network should start with") - rootCmd.AddCommand(startNetworkCmd) - - var networkDir string - stopNetworkCmd := &cobra.Command{ - Use: "stop-network", - Short: "Stop a local network", - RunE: func(*cobra.Command, []string) error { - if len(networkDir) == 0 { - return errNetworkDirRequired - } - if err := local.StopNetwork(networkDir); err != nil { - return err - } - fmt.Fprintf(os.Stdout, "Stopped network configured at: %s\n", networkDir) - return nil - }, - } - stopNetworkCmd.PersistentFlags().StringVar(&networkDir, "network-dir", os.Getenv(local.NetworkDirEnvName), "The path to the configuration directory of a local network") - rootCmd.AddCommand(stopNetworkCmd) - - if err := rootCmd.Execute(); err != nil { - fmt.Fprintf(os.Stderr, "testnetctl failed: %v\n", err) - os.Exit(1) - } - os.Exit(0) -} diff --git a/tests/fixture/testnet/common.go b/tests/fixture/testnet/common.go deleted file mode 100644 index ab983e893e46..000000000000 --- a/tests/fixture/testnet/common.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package testnet - -import ( - "context" - "errors" - "fmt" - "time" -) - -const ( - DefaultNodeTickerInterval = 50 * time.Millisecond -) - -var ErrNotRunning = errors.New("not running") - -// WaitForHealthy blocks until Node.IsHealthy returns true or an error (including context timeout) is observed. -func WaitForHealthy(ctx context.Context, node Node) error { - if _, ok := ctx.Deadline(); !ok { - return fmt.Errorf("unable to wait for health for node %q with a context without a deadline", node.GetID()) - } - ticker := time.NewTicker(DefaultNodeTickerInterval) - defer ticker.Stop() - - for { - healthy, err := node.IsHealthy(ctx) - if err != nil && !errors.Is(err, ErrNotRunning) { - return fmt.Errorf("failed to wait for health of node %q: %w", node.GetID(), err) - } - if healthy { - return nil - } - - select { - case <-ctx.Done(): - return fmt.Errorf("failed to wait for health of node %q before timeout: %w", node.GetID(), ctx.Err()) - case <-ticker.C: - } - } -} diff --git a/tests/fixture/testnet/config.go b/tests/fixture/testnet/config.go deleted file mode 100644 index 76fad35a622a..000000000000 --- a/tests/fixture/testnet/config.go +++ /dev/null @@ -1,438 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package testnet - -import ( - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "math/big" - "os" - "strings" - "time" - - "github.com/spf13/cast" - - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/plugin/evm" - - "github.com/ava-labs/avalanchego/config" - "github.com/ava-labs/avalanchego/genesis" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/network/peer" - "github.com/ava-labs/avalanchego/staking" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/formatting/address" - "github.com/ava-labs/avalanchego/utils/perms" - "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/platformvm/reward" -) - -const ( - DefaultNodeCount = 2 // Minimum required to ensure connectivity-based health checks will pass - DefaultFundedKeyCount = 50 - - DefaultGasLimit = uint64(100_000_000) // Gas limit is arbitrary - - // Arbitrarily large amount of AVAX to fund keys on the X-Chain for testing - DefaultFundedKeyXChainAmount = 30 * units.MegaAvax - - // A short min stake duration enables testing of staking logic. - DefaultMinStakeDuration = time.Second -) - -var ( - // Arbitrarily large amount of AVAX (10^12) to fund keys on the C-Chain for testing - DefaultFundedKeyCChainAmount = new(big.Int).Exp(big.NewInt(10), big.NewInt(30), nil) - - errEmptyValidatorsForGenesis = errors.New("failed to generate genesis: empty validator IDs") - errNoKeysForGenesis = errors.New("failed to generate genesis: no keys to fund") - errInvalidNetworkIDForGenesis = errors.New("network ID can't be mainnet, testnet or local network ID") - errMissingValidatorsForGenesis = errors.New("no genesis validators provided") - errMissingBalancesForGenesis = errors.New("no genesis balances given") - errMissingTLSKeyForNodeID = fmt.Errorf("failed to ensure node ID: missing value for %q", config.StakingTLSKeyContentKey) - errMissingCertForNodeID = fmt.Errorf("failed to ensure node ID: missing value for %q", config.StakingCertContentKey) - errInvalidKeypair = fmt.Errorf("%q and %q must be provided together or not at all", config.StakingTLSKeyContentKey, config.StakingCertContentKey) -) - -// Defines a mapping of flag keys to values intended to be supplied to -// an invocation of an AvalancheGo node. -type FlagsMap map[string]interface{} - -// SetDefaults ensures the effectiveness of flag overrides by only -// setting values supplied in the defaults map that are not already -// explicitly set. -func (f FlagsMap) SetDefaults(defaults FlagsMap) { - for key, value := range defaults { - if _, ok := f[key]; !ok { - f[key] = value - } - } -} - -// GetStringVal simplifies retrieving a map value as a string. -func (f FlagsMap) GetStringVal(key string) (string, error) { - rawVal, ok := f[key] - if !ok { - return "", nil - } - - val, err := cast.ToStringE(rawVal) - if err != nil { - return "", fmt.Errorf("failed to cast value for %q: %w", key, err) - } - return val, nil -} - -// Write simplifies writing a FlagsMap to the provided path. The -// description is used in error messages. -func (f FlagsMap) Write(path string, description string) error { - bytes, err := DefaultJSONMarshal(f) - if err != nil { - return fmt.Errorf("failed to marshal %s: %w", description, err) - } - if err := os.WriteFile(path, bytes, perms.ReadWrite); err != nil { - return fmt.Errorf("failed to write %s: %w", description, err) - } - return nil -} - -// Utility function simplifying construction of a FlagsMap from a file. -func ReadFlagsMap(path string, description string) (*FlagsMap, error) { - bytes, err := os.ReadFile(path) - if err != nil { - return nil, fmt.Errorf("failed to read %s: %w", description, err) - } - flagsMap := &FlagsMap{} - if err := json.Unmarshal(bytes, flagsMap); err != nil { - return nil, fmt.Errorf("failed to unmarshal %s: %w", description, err) - } - return flagsMap, nil -} - -// Marshal to json with default prefix and indent. -func DefaultJSONMarshal(v interface{}) ([]byte, error) { - return json.MarshalIndent(v, "", " ") -} - -// NetworkConfig defines configuration shared or -// common to all nodes in a given network. -type NetworkConfig struct { - Genesis *genesis.UnparsedConfig - CChainConfig FlagsMap - DefaultFlags FlagsMap - FundedKeys []*secp256k1.PrivateKey -} - -// Ensure genesis is generated if not already present. -func (c *NetworkConfig) EnsureGenesis(networkID uint32, validatorIDs []ids.NodeID) error { - if c.Genesis != nil { - return nil - } - - if len(validatorIDs) == 0 { - return errEmptyValidatorsForGenesis - } - if len(c.FundedKeys) == 0 { - return errNoKeysForGenesis - } - - // Ensure pre-funded keys have arbitrary large balances on both chains to support testing - xChainBalances := make(XChainBalanceMap, len(c.FundedKeys)) - cChainBalances := make(core.GenesisAlloc, len(c.FundedKeys)) - for _, key := range c.FundedKeys { - xChainBalances[key.Address()] = DefaultFundedKeyXChainAmount - cChainBalances[evm.GetEthAddress(key)] = core.GenesisAccount{ - Balance: DefaultFundedKeyCChainAmount, - } - } - - genesis, err := NewTestGenesis(networkID, xChainBalances, cChainBalances, validatorIDs) - if err != nil { - return err - } - - c.Genesis = genesis - return nil -} - -// NodeURI associates a node ID with its API URI. -type NodeURI struct { - NodeID ids.NodeID - URI string -} - -// NodeConfig defines configuration for an AvalancheGo node. -type NodeConfig struct { - NodeID ids.NodeID - Flags FlagsMap -} - -func NewNodeConfig() *NodeConfig { - return &NodeConfig{ - Flags: FlagsMap{}, - } -} - -// Convenience method for setting networking flags. -func (nc *NodeConfig) SetNetworkingConfigDefaults( - httpPort uint16, - stakingPort uint16, - bootstrapIDs []string, - bootstrapIPs []string, -) { - nc.Flags.SetDefaults(FlagsMap{ - config.HTTPPortKey: httpPort, - config.StakingPortKey: stakingPort, - config.BootstrapIDsKey: strings.Join(bootstrapIDs, ","), - config.BootstrapIPsKey: strings.Join(bootstrapIPs, ","), - }) -} - -// Ensures staking and signing keys are generated if not already present and -// that the node ID (derived from the staking keypair) is set. -func (nc *NodeConfig) EnsureKeys() error { - if err := nc.EnsureBLSSigningKey(); err != nil { - return err - } - if err := nc.EnsureStakingKeypair(); err != nil { - return err - } - // Once a staking keypair is guaranteed it is safe to derive the node ID - return nc.EnsureNodeID() -} - -// Ensures a BLS signing key is generated if not already present. -func (nc *NodeConfig) EnsureBLSSigningKey() error { - // Attempt to retrieve an existing key - existingKey, err := nc.Flags.GetStringVal(config.StakingSignerKeyContentKey) - if err != nil { - return err - } - if len(existingKey) > 0 { - // Nothing to do - return nil - } - - // Generate a new signing key - newKey, err := bls.NewSecretKey() - if err != nil { - return fmt.Errorf("failed to generate staking signer key: %w", err) - } - nc.Flags[config.StakingSignerKeyContentKey] = base64.StdEncoding.EncodeToString(bls.SerializeSecretKey(newKey)) - return nil -} - -// Ensures a staking keypair is generated if not already present. -func (nc *NodeConfig) EnsureStakingKeypair() error { - keyKey := config.StakingTLSKeyContentKey - certKey := config.StakingCertContentKey - - key, err := nc.Flags.GetStringVal(keyKey) - if err != nil { - return err - } - - cert, err := nc.Flags.GetStringVal(certKey) - if err != nil { - return err - } - - if len(key) == 0 && len(cert) == 0 { - // Generate new keypair - tlsCertBytes, tlsKeyBytes, err := staking.NewCertAndKeyBytes() - if err != nil { - return fmt.Errorf("failed to generate staking keypair: %w", err) - } - nc.Flags[keyKey] = base64.StdEncoding.EncodeToString(tlsKeyBytes) - nc.Flags[certKey] = base64.StdEncoding.EncodeToString(tlsCertBytes) - } else if len(key) == 0 || len(cert) == 0 { - // Only one of key and cert was provided - return errInvalidKeypair - } - - err = nc.EnsureNodeID() - if err != nil { - return fmt.Errorf("failed to derive a node ID: %w", err) - } - - return nil -} - -// Attempt to derive the node ID from the node configuration. -func (nc *NodeConfig) EnsureNodeID() error { - keyKey := config.StakingTLSKeyContentKey - certKey := config.StakingCertContentKey - - key, err := nc.Flags.GetStringVal(keyKey) - if err != nil { - return err - } - if len(key) == 0 { - return errMissingTLSKeyForNodeID - } - keyBytes, err := base64.StdEncoding.DecodeString(key) - if err != nil { - return fmt.Errorf("failed to ensure node ID: failed to base64 decode value for %q: %w", keyKey, err) - } - - cert, err := nc.Flags.GetStringVal(certKey) - if err != nil { - return err - } - if len(cert) == 0 { - return errMissingCertForNodeID - } - certBytes, err := base64.StdEncoding.DecodeString(cert) - if err != nil { - return fmt.Errorf("failed to ensure node ID: failed to base64 decode value for %q: %w", certKey, err) - } - - tlsCert, err := staking.LoadTLSCertFromBytes(keyBytes, certBytes) - if err != nil { - return fmt.Errorf("failed to ensure node ID: failed to load tls cert: %w", err) - } - - nodeID, err := peer.CertToID(tlsCert.Leaf) - if err != nil { - return fmt.Errorf("failed to recover nodeID from tls cert: %w", err) - } - nc.NodeID = nodeID - - return nil -} - -// Helper type to simplify configuring X-Chain genesis balances -type XChainBalanceMap map[ids.ShortID]uint64 - -// Create a genesis struct valid for bootstrapping a test -// network. Note that many of the genesis fields (e.g. reward -// addresses) are randomly generated or hard-coded. -func NewTestGenesis( - networkID uint32, - xChainBalances XChainBalanceMap, - cChainBalances core.GenesisAlloc, - validatorIDs []ids.NodeID, -) (*genesis.UnparsedConfig, error) { - // Validate inputs - switch networkID { - case constants.TestnetID, constants.MainnetID, constants.LocalID: - return nil, errInvalidNetworkIDForGenesis - } - if len(validatorIDs) == 0 { - return nil, errMissingValidatorsForGenesis - } - if len(xChainBalances) == 0 || len(cChainBalances) == 0 { - return nil, errMissingBalancesForGenesis - } - - // Address that controls stake doesn't matter -- generate it randomly - stakeAddress, err := address.Format( - "X", - constants.GetHRP(networkID), - ids.GenerateTestShortID().Bytes(), - ) - if err != nil { - return nil, fmt.Errorf("failed to format stake address: %w", err) - } - - // Ensure the total stake allows a MegaAvax per validator - totalStake := uint64(len(validatorIDs)) * units.MegaAvax - - // The eth address is only needed to link pre-mainnet assets. Until that capability - // becomes necessary for testing, use a bogus address. - // - // Reference: https://github.com/ava-labs/avalanchego/issues/1365#issuecomment-1511508767 - ethAddress := "0x0000000000000000000000000000000000000000" - - now := time.Now() - - config := &genesis.UnparsedConfig{ - NetworkID: networkID, - Allocations: []genesis.UnparsedAllocation{ - { - ETHAddr: ethAddress, - AVAXAddr: stakeAddress, - InitialAmount: 0, - UnlockSchedule: []genesis.LockedAmount{ // Provides stake to validators - { - Amount: totalStake, - Locktime: uint64(now.Add(7 * 24 * time.Hour).Unix()), // 1 Week - }, - }, - }, - }, - StartTime: uint64(now.Unix()), - InitialStakedFunds: []string{stakeAddress}, - InitialStakeDuration: 365 * 24 * 60 * 60, // 1 year - InitialStakeDurationOffset: 90 * 60, // 90 minutes - Message: "hello avalanche!", - } - - // Set X-Chain balances - for xChainAddress, balance := range xChainBalances { - avaxAddr, err := address.Format("X", constants.GetHRP(networkID), xChainAddress[:]) - if err != nil { - return nil, fmt.Errorf("failed to format X-Chain address: %w", err) - } - config.Allocations = append( - config.Allocations, - genesis.UnparsedAllocation{ - ETHAddr: ethAddress, - AVAXAddr: avaxAddr, - InitialAmount: balance, - UnlockSchedule: []genesis.LockedAmount{ - { - Amount: 20 * units.MegaAvax, - }, - { - Amount: totalStake, - Locktime: uint64(now.Add(7 * 24 * time.Hour).Unix()), // 1 Week - }, - }, - }, - ) - } - - // Define C-Chain genesis - cChainGenesis := &core.Genesis{ - Config: ¶ms.ChainConfig{ - ChainID: big.NewInt(43112), // Arbitrary chain ID is arbitrary - }, - Difficulty: big.NewInt(0), // Difficulty is a mandatory field - GasLimit: DefaultGasLimit, - Alloc: cChainBalances, - } - cChainGenesisBytes, err := json.Marshal(cChainGenesis) - if err != nil { - return nil, fmt.Errorf("failed to marshal C-Chain genesis: %w", err) - } - config.CChainGenesis = string(cChainGenesisBytes) - - // Give staking rewards for initial validators to a random address. Any testing of staking rewards - // will be easier to perform with nodes other than the initial validators since the timing of - // staking can be more easily controlled. - rewardAddr, err := address.Format("X", constants.GetHRP(networkID), ids.GenerateTestShortID().Bytes()) - if err != nil { - return nil, fmt.Errorf("failed to format reward address: %w", err) - } - - // Configure provided validator node IDs as initial stakers - for _, validatorID := range validatorIDs { - config.InitialStakers = append( - config.InitialStakers, - genesis.UnparsedStaker{ - NodeID: validatorID, - RewardAddress: rewardAddr, - DelegationFee: .01 * reward.PercentDenominator, - }, - ) - } - - return config, nil -} diff --git a/tests/fixture/testnet/interfaces.go b/tests/fixture/testnet/interfaces.go deleted file mode 100644 index 2c1479ec48bd..000000000000 --- a/tests/fixture/testnet/interfaces.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package testnet - -import ( - "context" - "io" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/node" -) - -// Defines network capabilities supportable regardless of how a network is orchestrated. -type Network interface { - GetConfig() NetworkConfig - GetNodes() []Node - AddEphemeralNode(w io.Writer, flags FlagsMap) (Node, error) -} - -// Defines node capabilities supportable regardless of how a network is orchestrated. -type Node interface { - GetID() ids.NodeID - GetConfig() NodeConfig - GetProcessContext() node.NodeProcessContext - IsHealthy(ctx context.Context) (bool, error) - Stop() error -} diff --git a/tests/fixture/testnet/local/README.md b/tests/fixture/testnet/local/README.md deleted file mode 100644 index fdfbbdb4d58b..000000000000 --- a/tests/fixture/testnet/local/README.md +++ /dev/null @@ -1,219 +0,0 @@ -# Local network orchestration - -This package implements a simple orchestrator for the avalanchego -nodes of a local network. Configuration is stored on disk, and nodes -run as independent processes whose process details are also written to -disk. Using the filesystem to store configuration and process details -allows for the `testnetctl` cli and e2e test fixture to orchestrate -the same local networks without the use of an rpc daemon. - -## Package details - -The functionality in this package is grouped by logical purpose into -the following non-test files: - -| Filename | Types | Purpose | -|:-----------|:-------------------|:----------------------------------------------| -| config.go | | Common configuration | -| network.go | LocalNetwork | Network-level orchestration and configuration | -| node.go | Local{Config,Node} | Node-level orchestration and configuration | - - -This package depends on its parent package for implementation-agnostic -network and node configuration. Only configuration and code specific -to orchestrating local networks belongs in this package to ensure that -other orchestration implementations can reuse the shared configuration -abstractions. - -## Usage - -### Via testnetctl - -A local network can be managed by the `testnetctl` cli tool: - -```bash -# From the root of the avalanchego repo - -# Build the testnetctl binary -$ ./scripts/build_testnetctl.sh - -# Start a new network -$ ./build/testnetctl start-network --avalanchego-path=/path/to/avalanchego -... -Started network 1000 @ /home/me/.testnetctl/networks/1000 - -Configure testnetctl to target this network by default with one of the following statements: - - source /home/me/.testnetctl/networks/1000/network.env - - export TESTNETCTL_NETWORK_DIR=/home/me/.testnetctl/networks/1000 - - export TESTNETCTL_NETWORK_DIR=/home/me/.testnetctl/networks/latest - -# Stop the network -$ ./build/testnetctl stop-network --network-dir=/path/to/network -``` - -Note the export of the path ending in `latest`. This is a symlink that -set to the last network created by `testnetctl start-network`. Setting -the `TESTNETCTL_NETWORK_DIR` env var to this symlink ensures that -`testnetctl` commands and e2e execution with -`--use-persistent-network` will target the most recently deployed -local network. - -### Via code - -A local network can be managed in code: - -```golang -network, _ := local.StartNetwork( - ctx, // Context used to limit duration of waiting for network health - ginkgo.GinkgoWriter, // Writer to report progress of network start - "", // Use default root dir (~/.testnetctl) - &local.LocalNetwork{ - LocalConfig: local.LocalConfig{ - ExecPath: "/path/to/avalanchego", // Defining the avalanchego exec path is required - }, - }, - 5, // Number of initial validating nodes - 50, // Number of pre-funded keys to create -) - -uris := network.GetURIs() - -// Use URIs to interact with the network - -// Stop all nodes in the network -network.Stop() -``` - -If non-default node behavior is required, the `LocalNetwork` instance -supplied to `StartNetwork()` can be initialized with explicit node -configuration and by supplying a nodeCount argument of `0`: - -```golang -network, _ := local.StartNetwork( - ctx, - ginkgo.GinkgoWriter, - "", - &local.LocalNetwork{ - LocalConfig: local.LocalConfig{ - ExecPath: "/path/to/avalanchego", - }, - Nodes: []*LocalNode{ - { // node1 configuration is customized - Flags: FlagsMap{ // Any and all node flags can be configured here - config.DataDirKey: "/custom/path/to/node/data", - } - }, - }, - {}, // node2 uses default configuration - {}, // node3 uses default configuration - {}, // node4 uses default configuration - {}, // node5 uses default configuration - }, - 0, // Node count must be zero when setting node config - 50, -) -``` - -Further examples of code-based usage are located in the [e2e -tests](../../../e2e/e2e_test.go). - -## Networking configuration - -By default, nodes in a local network will be started with staking and -API ports set to `0` to ensure that ports will be dynamically -chosen. The testnet fixture discovers the ports used by a given node -by reading the `[base-data-dir]/process.json` file written by -avalanchego on node start. The use of dynamic ports supports testing -with many local networks without having to manually select compatible -port ranges. - -## Configuration on disk - -A local network relies on configuration written to disk in the following structure: - -``` -HOME -└── .testnetctl // Root path for tool - └── networks // Default parent directory for local networks - └── 1000 // The networkID is used to name the network dir and starts at 1000 - ├── NodeID-37E8UK3x2YFsHE3RdALmfWcppcZ1eTuj9 // The ID of a node is the name of its data dir - │ ├── chainData - │ │ └── ... - │ ├── config.json // Node flags - │ ├── db - │ │ └── ... - │ ├── logs - │ │ └── ... - │ ├── plugins - │ │ └── ... - │ └── process.json // Node process details (PID, API URI, staking address) - ├── chains - │ └── C - │ └── config.json // C-Chain config for all nodes - ├── defaults.json // Default flags and configuration for network - ├── genesis.json // Genesis for all nodes - ├── network.env // Sets network dir env to simplify use of network - └── ephemeral // Parent directory for ephemeral nodes (e.g. created by tests) - └─ NodeID-FdxnAvr4jK9XXAwsYZPgWAHW2QnwSZ // Data dir for an ephemeral node - └── ... - -``` - -### Default flags and configuration - -The default avalanchego node flags (e.g. `--staking-port=`) and -default configuration like the avalanchego path are stored at -`[network-dir]/defaults.json`. The value for a given defaulted flag -will be set on initial and subsequently added nodes that do not supply -values for a given defaulted flag. - -### Genesis - -The genesis file is stored at `[network-dir]/genesis.json` and -referenced by default by all nodes in the network. The genesis file -content will be generated with reasonable defaults if not -supplied. Each node in the network can override the default by setting -an explicit value for `--genesis-file` or `--genesis-file-content`. - -### C-Chain config - -The C-Chain config for a local network is stored at -`[network-dir]/chains/C/config.json` and referenced by default by all -nodes in the network. The C-Chain config will be generated with -reasonable defaults if not supplied. Each node in the network can -override the default by setting an explicit value for -`--chain-config-dir` and ensuring the C-Chain config file exists at -`[chain-config-dir]/C/config.json`. - -TODO(marun) Enable configuration of X-Chain and P-Chain. - -### Network env - -A shell script that sets the `TESTNETCTL_NETWORK_DIR` env var to the -path of the network is stored at `[network-dir]/network.env`. Sourcing -this file (i.e. `source network.env`) in a shell will configure ginkgo -e2e and the `testnetctl` cli to target the network path specified in -the env var. - -### Node configuration - -The data dir for a node is set by default to -`[network-path]/[node-id]`. A node can be configured to use a -non-default path by explicitly setting the `--data-dir` -flag. - -#### Flags - -All flags used to configure a node are written to -`[network-path]/[node-id]/config.json` so that a node can be -configured with only a single argument: -`--config-file=/path/to/config.json`. This simplifies node launch and -ensures all parameters used to launch a node can be modified by -editing the config file. - -#### Process details - -The process details of a node are written by avalanchego to -`[base-data-dir]/process.json`. The file contains the PID of the node -process, the URI of the node's API, and the address other nodes can -use to bootstrap themselves (aka staking address). diff --git a/tests/fixture/testnet/local/config.go b/tests/fixture/testnet/local/config.go deleted file mode 100644 index 4b2f91a7ff2b..000000000000 --- a/tests/fixture/testnet/local/config.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package local - -import ( - "time" - - "github.com/ava-labs/avalanchego/config" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" -) - -const ( - // Constants defining the names of shell variables whose value can - // configure local network orchestration. - AvalancheGoPathEnvName = "CAMINOGO_BIN_PATH" - NetworkDirEnvName = "TESTNETCTL_NETWORK_DIR" - RootDirEnvName = "TESTNETCTL_ROOT_DIR" - - DefaultNetworkStartTimeout = 2 * time.Minute - DefaultNodeInitTimeout = 10 * time.Second - DefaultNodeStopTimeout = 5 * time.Second -) - -// A set of flags appropriate for local testing. -func LocalFlags() testnet.FlagsMap { - // Supply only non-default configuration to ensure that default values will be used. - return testnet.FlagsMap{ - config.NetworkPeerListGossipFreqKey: "250ms", - config.NetworkMaxReconnectDelayKey: "1s", - config.PublicIPKey: "127.0.0.1", - config.HTTPHostKey: "127.0.0.1", - config.StakingHostKey: "127.0.0.1", - config.HealthCheckFreqKey: "2s", - config.AdminAPIEnabledKey: "e2e", - config.IpcAPIEnabledKey: true, - config.IndexEnabledKey: true, - config.LogDisplayLevelKey: "INFO", - config.LogLevelKey: "DEBUG", - config.MinStakeDurationKey: testnet.DefaultMinStakeDuration.String(), - } -} - -// C-Chain config for local testing. -func LocalCChainConfig() testnet.FlagsMap { - // Supply only non-default configuration to ensure that default - // values will be used. Available C-Chain configuration options are - // defined in the `github.com/ava-labs/coreth/evm` package. - return testnet.FlagsMap{ - "log-level": "trace", - } -} diff --git a/tests/fixture/testnet/local/network.go b/tests/fixture/testnet/local/network.go deleted file mode 100644 index 4f83a7fb0869..000000000000 --- a/tests/fixture/testnet/local/network.go +++ /dev/null @@ -1,703 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package local - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/fs" - "os" - "path/filepath" - "strconv" - "time" - - "github.com/ava-labs/avalanchego/config" - "github.com/ava-labs/avalanchego/genesis" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/perms" - "github.com/ava-labs/avalanchego/utils/set" -) - -const ( - // This interval was chosen to avoid spamming node APIs during - // startup, as smaller intervals (e.g. 50ms) seemed to noticeably - // increase the time for a network's nodes to be seen as healthy. - networkHealthCheckInterval = 200 * time.Millisecond - - defaultEphemeralDirName = "ephemeral" -) - -var ( - errInvalidNodeCount = errors.New("failed to populate local network config: non-zero node count is only valid for a network without nodes") - errInvalidKeyCount = errors.New("failed to populate local network config: non-zero key count is only valid for a network without keys") - errLocalNetworkDirNotSet = errors.New("local network directory not set - has Create() been called?") - errInvalidNetworkDir = errors.New("failed to write local network: invalid network directory") - errMissingBootstrapNodes = errors.New("failed to add node due to missing bootstrap nodes") -) - -// Default root dir for storing networks and their configuration. -func GetDefaultRootDir() (string, error) { - homeDir, err := os.UserHomeDir() - if err != nil { - return "", err - } - return filepath.Join(homeDir, ".testnetctl", "networks"), nil -} - -// Find the next available network ID by attempting to create a -// directory numbered from 1000 until creation succeeds. Returns the -// network id and the full path of the created directory. -func FindNextNetworkID(rootDir string) (uint32, string, error) { - var ( - networkID uint32 = 1000 - dirPath string - ) - for { - _, reserved := constants.NetworkIDToNetworkName[networkID] - if reserved { - networkID++ - continue - } - - dirPath = filepath.Join(rootDir, strconv.FormatUint(uint64(networkID), 10)) - err := os.Mkdir(dirPath, perms.ReadWriteExecute) - if err == nil { - return networkID, dirPath, nil - } - - if !errors.Is(err, fs.ErrExist) { - return 0, "", fmt.Errorf("failed to create network directory: %w", err) - } - - // Directory already exists, keep iterating - networkID++ - } -} - -// Defines the configuration required for a local network (i.e. one composed of local processes). -type LocalNetwork struct { - testnet.NetworkConfig - LocalConfig - - // Nodes with local configuration - Nodes []*LocalNode - - // Path where network configuration will be stored - Dir string -} - -// Returns the configuration of the network in backend-agnostic form. -func (ln *LocalNetwork) GetConfig() testnet.NetworkConfig { - return ln.NetworkConfig -} - -// Returns the nodes of the network in backend-agnostic form. -func (ln *LocalNetwork) GetNodes() []testnet.Node { - nodes := make([]testnet.Node, 0, len(ln.Nodes)) - for _, node := range ln.Nodes { - nodes = append(nodes, node) - } - return nodes -} - -// Adds a backend-agnostic ephemeral node to the network -func (ln *LocalNetwork) AddEphemeralNode(w io.Writer, flags testnet.FlagsMap) (testnet.Node, error) { - if flags == nil { - flags = testnet.FlagsMap{} - } - return ln.AddLocalNode(w, &LocalNode{ - NodeConfig: testnet.NodeConfig{ - Flags: flags, - }, - }, true /* isEphemeral */) -} - -// Starts a new network stored under the provided root dir. Required -// configuration will be defaulted if not provided. -func StartNetwork( - ctx context.Context, - w io.Writer, - rootDir string, - network *LocalNetwork, - nodeCount int, - keyCount int, -) (*LocalNetwork, error) { - if _, err := fmt.Fprintf(w, "Preparing configuration for new local network with %s\n", network.ExecPath); err != nil { - return nil, err - } - - if len(rootDir) == 0 { - // Use the default root dir - var err error - rootDir, err = GetDefaultRootDir() - if err != nil { - return nil, err - } - } - - // Ensure creation of the root dir - if err := os.MkdirAll(rootDir, perms.ReadWriteExecute); err != nil { - return nil, fmt.Errorf("failed to create root network dir: %w", err) - } - - // Determine the network path and ID - var ( - networkDir string - networkID uint32 - ) - if network.Genesis != nil && network.Genesis.NetworkID > 0 { - // Use the network ID defined in the provided genesis - networkID = network.Genesis.NetworkID - } - if networkID > 0 { - // Use a directory with a random suffix - var err error - networkDir, err = os.MkdirTemp(rootDir, fmt.Sprintf("%d.", network.Genesis.NetworkID)) - if err != nil { - return nil, fmt.Errorf("failed to create network dir: %w", err) - } - } else { - // Find the next available network ID based on the contents of the root dir - var err error - networkID, networkDir, err = FindNextNetworkID(rootDir) - if err != nil { - return nil, err - } - } - - // Setting the network dir before populating config ensures the - // nodes know where to write their configuration. - network.Dir = networkDir - - if err := network.PopulateLocalNetworkConfig(networkID, nodeCount, keyCount); err != nil { - return nil, err - } - - if err := network.WriteAll(); err != nil { - return nil, err - } - if _, err := fmt.Fprintf(w, "Starting network %d @ %s\n", network.Genesis.NetworkID, network.Dir); err != nil { - return nil, err - } - if err := network.Start(w); err != nil { - return nil, err - } - if _, err := fmt.Fprintf(w, "Waiting for all nodes to report healthy...\n\n"); err != nil { - return nil, err - } - if err := network.WaitForHealthy(ctx, w); err != nil { - return nil, err - } - if _, err := fmt.Fprintf(w, "\nStarted network %d @ %s\n", network.Genesis.NetworkID, network.Dir); err != nil { - return nil, err - } - return network, nil -} - -// Read a network from the provided directory. -func ReadNetwork(dir string) (*LocalNetwork, error) { - network := &LocalNetwork{Dir: dir} - if err := network.ReadAll(); err != nil { - return nil, fmt.Errorf("failed to read local network: %w", err) - } - return network, nil -} - -// Stop the nodes of the network configured in the provided directory. -func StopNetwork(dir string) error { - network, err := ReadNetwork(dir) - if err != nil { - return err - } - return network.Stop() -} - -// Ensure the network has the configuration it needs to start. -func (ln *LocalNetwork) PopulateLocalNetworkConfig(networkID uint32, nodeCount int, keyCount int) error { - if len(ln.Nodes) > 0 && nodeCount > 0 { - return errInvalidNodeCount - } - if len(ln.FundedKeys) > 0 && keyCount > 0 { - return errInvalidKeyCount - } - - if nodeCount > 0 { - // Add the specified number of nodes - nodes := make([]*LocalNode, 0, nodeCount) - for i := 0; i < nodeCount; i++ { - nodes = append(nodes, NewLocalNode("")) - } - ln.Nodes = nodes - } - - // Ensure each node has keys and an associated node ID. This - // ensures the availability of validator node IDs for genesis - // generation. - for _, node := range ln.Nodes { - if err := node.EnsureKeys(); err != nil { - return err - } - } - - // Assume all initial nodes are validator ids - validatorIDs := make([]ids.NodeID, 0, len(ln.Nodes)) - for _, node := range ln.Nodes { - validatorIDs = append(validatorIDs, node.NodeID) - } - - if keyCount > 0 { - // Ensure there are keys for genesis generation to fund - keys := make([]*secp256k1.PrivateKey, 0, keyCount) - for i := 0; i < keyCount; i++ { - key, err := secp256k1.NewPrivateKey() - if err != nil { - return fmt.Errorf("failed to generate private key: %w", err) - } - keys = append(keys, key) - } - ln.FundedKeys = keys - } - - if err := ln.EnsureGenesis(networkID, validatorIDs); err != nil { - return err - } - - if ln.CChainConfig == nil { - ln.CChainConfig = LocalCChainConfig() - } - - // Default flags need to be set in advance of node config - // population to ensure correct node configuration. - if ln.DefaultFlags == nil { - ln.DefaultFlags = LocalFlags() - } - - for _, node := range ln.Nodes { - // Ensure the node is configured for use with the network and - // knows where to write its configuration. - if err := ln.PopulateNodeConfig(node, ln.Dir); err != nil { - return err - } - } - - return nil -} - -// Ensure the provided node has the configuration it needs to start. If the data dir is -// not set, it will be defaulted to [nodeParentDir]/[node ID]. Requires that the -// network has valid genesis data. -func (ln *LocalNetwork) PopulateNodeConfig(node *LocalNode, nodeParentDir string) error { - flags := node.Flags - - // Set values common to all nodes - flags.SetDefaults(ln.DefaultFlags) - flags.SetDefaults(testnet.FlagsMap{ - config.GenesisFileKey: ln.GetGenesisPath(), - config.ChainConfigDirKey: ln.GetChainConfigDir(), - }) - - // Convert the network id to a string to ensure consistency in JSON round-tripping. - flags[config.NetworkNameKey] = strconv.FormatUint(uint64(ln.Genesis.NetworkID), 10) - - // Ensure keys are added if necessary - if err := node.EnsureKeys(); err != nil { - return err - } - - // Ensure the node's data dir is configured - dataDir := node.GetDataDir() - if len(dataDir) == 0 { - // NodeID will have been set by EnsureKeys - dataDir = filepath.Join(nodeParentDir, node.NodeID.String()) - flags[config.DataDirKey] = dataDir - } - - return nil -} - -// Starts a network for the first time -func (ln *LocalNetwork) Start(w io.Writer) error { - if len(ln.Dir) == 0 { - return errLocalNetworkDirNotSet - } - - // Ensure configuration on disk is current - if err := ln.WriteAll(); err != nil { - return err - } - - // Accumulate bootstrap nodes such that each subsequently started - // node bootstraps from the nodes previously started. - // - // e.g. - // 1st node: no bootstrap nodes - // 2nd node: 1st node - // 3rd node: 1st and 2nd nodes - // ... - // - bootstrapIDs := make([]string, 0, len(ln.Nodes)) - bootstrapIPs := make([]string, 0, len(ln.Nodes)) - - // Configure networking and start each node - for _, node := range ln.Nodes { - // Update network configuration - node.SetNetworkingConfigDefaults(0, 0, bootstrapIDs, bootstrapIPs) - - // Write configuration to disk in preparation for node start - if err := node.WriteConfig(); err != nil { - return err - } - - // Start waits for the process context to be written which - // indicates that the node will be accepting connections on - // its staking port. The network will start faster with this - // synchronization due to the avoidance of exponential backoff - // if a node tries to connect to a beacon that is not ready. - if err := node.Start(w, ln.ExecPath); err != nil { - return err - } - - // Collect bootstrap nodes for subsequently started nodes to use - bootstrapIDs = append(bootstrapIDs, node.NodeID.String()) - bootstrapIPs = append(bootstrapIPs, node.StakingAddress) - } - - return nil -} - -// Wait until all nodes in the network are healthy. -func (ln *LocalNetwork) WaitForHealthy(ctx context.Context, w io.Writer) error { - ticker := time.NewTicker(networkHealthCheckInterval) - defer ticker.Stop() - - healthyNodes := set.NewSet[ids.NodeID](len(ln.Nodes)) - for healthyNodes.Len() < len(ln.Nodes) { - for _, node := range ln.Nodes { - if healthyNodes.Contains(node.NodeID) { - continue - } - - healthy, err := node.IsHealthy(ctx) - if err != nil && !errors.Is(err, testnet.ErrNotRunning) { - return err - } - if !healthy { - continue - } - - healthyNodes.Add(node.NodeID) - if _, err := fmt.Fprintf(w, "%s is healthy @ %s\n", node.NodeID, node.URI); err != nil { - return err - } - } - - select { - case <-ctx.Done(): - return fmt.Errorf("failed to see all nodes healthy before timeout: %w", ctx.Err()) - case <-ticker.C: - } - } - return nil -} - -// Retrieve API URIs for all running primary validator nodes. URIs for -// ephemeral nodes are not returned. -func (ln *LocalNetwork) GetURIs() []testnet.NodeURI { - uris := make([]testnet.NodeURI, 0, len(ln.Nodes)) - for _, node := range ln.Nodes { - // Only append URIs that are not empty. A node may have an - // empty URI if it was not running at the time - // node.ReadProcessContext() was called. - if len(node.URI) > 0 { - uris = append(uris, testnet.NodeURI{ - NodeID: node.NodeID, - URI: node.URI, - }) - } - } - return uris -} - -// Stop all nodes in the network. -func (ln *LocalNetwork) Stop() error { - var errs []error - // Assume the nodes are loaded and the pids are current - for _, node := range ln.Nodes { - if err := node.Stop(); err != nil { - errs = append(errs, fmt.Errorf("failed to stop node %s: %w", node.NodeID, err)) - } - } - if len(errs) > 0 { - return fmt.Errorf("failed to stop network:\n%w", errors.Join(errs...)) - } - return nil -} - -func (ln *LocalNetwork) GetGenesisPath() string { - return filepath.Join(ln.Dir, "genesis.json") -} - -func (ln *LocalNetwork) ReadGenesis() error { - bytes, err := os.ReadFile(ln.GetGenesisPath()) - if err != nil { - return fmt.Errorf("failed to read genesis: %w", err) - } - genesis := genesis.UnparsedConfig{} - if err := json.Unmarshal(bytes, &genesis); err != nil { - return fmt.Errorf("failed to unmarshal genesis: %w", err) - } - ln.Genesis = &genesis - return nil -} - -func (ln *LocalNetwork) WriteGenesis() error { - bytes, err := testnet.DefaultJSONMarshal(ln.Genesis) - if err != nil { - return fmt.Errorf("failed to marshal genesis: %w", err) - } - if err := os.WriteFile(ln.GetGenesisPath(), bytes, perms.ReadWrite); err != nil { - return fmt.Errorf("failed to write genesis: %w", err) - } - return nil -} - -func (ln *LocalNetwork) GetChainConfigDir() string { - return filepath.Join(ln.Dir, "chains") -} - -func (ln *LocalNetwork) GetCChainConfigPath() string { - return filepath.Join(ln.GetChainConfigDir(), "C", "config.json") -} - -func (ln *LocalNetwork) ReadCChainConfig() error { - chainConfig, err := testnet.ReadFlagsMap(ln.GetCChainConfigPath(), "C-Chain config") - if err != nil { - return err - } - ln.CChainConfig = *chainConfig - return nil -} - -func (ln *LocalNetwork) WriteCChainConfig() error { - path := ln.GetCChainConfigPath() - dir := filepath.Dir(path) - if err := os.MkdirAll(dir, perms.ReadWriteExecute); err != nil { - return fmt.Errorf("failed to create C-Chain config dir: %w", err) - } - return ln.CChainConfig.Write(path, "C-Chain config") -} - -// Used to marshal/unmarshal persistent local network defaults. -type localDefaults struct { - Flags testnet.FlagsMap - ExecPath string - FundedKeys []*secp256k1.PrivateKey -} - -func (ln *LocalNetwork) GetDefaultsPath() string { - return filepath.Join(ln.Dir, "defaults.json") -} - -func (ln *LocalNetwork) ReadDefaults() error { - bytes, err := os.ReadFile(ln.GetDefaultsPath()) - if err != nil { - return fmt.Errorf("failed to read defaults: %w", err) - } - defaults := localDefaults{} - if err := json.Unmarshal(bytes, &defaults); err != nil { - return fmt.Errorf("failed to unmarshal defaults: %w", err) - } - ln.DefaultFlags = defaults.Flags - ln.ExecPath = defaults.ExecPath - ln.FundedKeys = defaults.FundedKeys - return nil -} - -func (ln *LocalNetwork) WriteDefaults() error { - defaults := localDefaults{ - Flags: ln.DefaultFlags, - ExecPath: ln.ExecPath, - FundedKeys: ln.FundedKeys, - } - bytes, err := testnet.DefaultJSONMarshal(defaults) - if err != nil { - return fmt.Errorf("failed to marshal defaults: %w", err) - } - if err := os.WriteFile(ln.GetDefaultsPath(), bytes, perms.ReadWrite); err != nil { - return fmt.Errorf("failed to write defaults: %w", err) - } - return nil -} - -func (ln *LocalNetwork) EnvFilePath() string { - return filepath.Join(ln.Dir, "network.env") -} - -func (ln *LocalNetwork) EnvFileContents() string { - return fmt.Sprintf("export %s=%s", NetworkDirEnvName, ln.Dir) -} - -// Write an env file that sets the network dir env when sourced. -func (ln *LocalNetwork) WriteEnvFile() error { - if err := os.WriteFile(ln.EnvFilePath(), []byte(ln.EnvFileContents()), perms.ReadWrite); err != nil { - return fmt.Errorf("failed to write local network env file: %w", err) - } - return nil -} - -func (ln *LocalNetwork) WriteNodes() error { - for _, node := range ln.Nodes { - if err := node.WriteConfig(); err != nil { - return err - } - } - return nil -} - -// Write network configuration to disk. -func (ln *LocalNetwork) WriteAll() error { - if len(ln.Dir) == 0 { - return errInvalidNetworkDir - } - if err := ln.WriteGenesis(); err != nil { - return err - } - if err := ln.WriteCChainConfig(); err != nil { - return err - } - if err := ln.WriteDefaults(); err != nil { - return err - } - if err := ln.WriteEnvFile(); err != nil { - return err - } - return ln.WriteNodes() -} - -// Read network configuration from disk. -func (ln *LocalNetwork) ReadConfig() error { - if err := ln.ReadGenesis(); err != nil { - return err - } - if err := ln.ReadCChainConfig(); err != nil { - return err - } - return ln.ReadDefaults() -} - -// Read node configuration and process context from disk. -func (ln *LocalNetwork) ReadNodes() error { - nodes := []*LocalNode{} - - // Node configuration / process context is stored in child directories - entries, err := os.ReadDir(ln.Dir) - if err != nil { - return fmt.Errorf("failed to read network path: %w", err) - } - for _, entry := range entries { - if !entry.IsDir() { - continue - } - - nodeDir := filepath.Join(ln.Dir, entry.Name()) - node, err := ReadNode(nodeDir) - if errors.Is(err, os.ErrNotExist) { - // If no config file exists, assume this is not the path of a local node - continue - } else if err != nil { - return err - } - - nodes = append(nodes, node) - } - - ln.Nodes = nodes - - return nil -} - -// Read network and node configuration from disk. -func (ln *LocalNetwork) ReadAll() error { - if err := ln.ReadConfig(); err != nil { - return err - } - return ln.ReadNodes() -} - -func (ln *LocalNetwork) AddLocalNode(w io.Writer, node *LocalNode, isEphemeral bool) (*LocalNode, error) { - // Assume network configuration has been written to disk and is current in memory - - if node == nil { - // Set an empty data dir so that PopulateNodeConfig will know - // to set the default of `[network dir]/[node id]`. - node = NewLocalNode("") - } - - // Default to a data dir of [network-dir]/[node-ID] - nodeParentDir := ln.Dir - if isEphemeral { - // For an ephemeral node, default to a data dir of [network-dir]/[ephemeral-dir]/[node-ID] - // to provide a clear separation between nodes that are expected to expose stable API - // endpoints and those that will live for only a short time (e.g. a node started by a test - // and stopped on teardown). - // - // The data for an ephemeral node is still stored in the file tree rooted at the network - // dir to ensure that recursively archiving the network dir in CI will collect all node - // data used for a test run. - nodeParentDir = filepath.Join(ln.Dir, defaultEphemeralDirName) - } - - if err := ln.PopulateNodeConfig(node, nodeParentDir); err != nil { - return nil, err - } - - bootstrapIPs, bootstrapIDs, err := ln.GetBootstrapIPsAndIDs() - if err != nil { - return nil, err - } - - var ( - // Use dynamic port allocation. - httpPort uint16 = 0 - stakingPort uint16 = 0 - ) - node.SetNetworkingConfigDefaults(httpPort, stakingPort, bootstrapIDs, bootstrapIPs) - - if err := node.WriteConfig(); err != nil { - return nil, err - } - return node, node.Start(w, ln.ExecPath) -} - -func (ln *LocalNetwork) GetBootstrapIPsAndIDs() ([]string, []string, error) { - // Collect staking addresses of running nodes for use in bootstrapping a node - if err := ln.ReadNodes(); err != nil { - return nil, nil, fmt.Errorf("failed to read local network nodes: %w", err) - } - var ( - bootstrapIPs = make([]string, 0, len(ln.Nodes)) - bootstrapIDs = make([]string, 0, len(ln.Nodes)) - ) - for _, node := range ln.Nodes { - if len(node.StakingAddress) == 0 { - // Node is not running - continue - } - - bootstrapIPs = append(bootstrapIPs, node.StakingAddress) - bootstrapIDs = append(bootstrapIDs, node.NodeID.String()) - } - - if len(bootstrapIDs) == 0 { - return nil, nil, errMissingBootstrapNodes - } - - return bootstrapIPs, bootstrapIDs, nil -} diff --git a/tests/fixture/testnet/local/node.go b/tests/fixture/testnet/local/node.go deleted file mode 100644 index 2de516825677..000000000000 --- a/tests/fixture/testnet/local/node.go +++ /dev/null @@ -1,342 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package local - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/fs" - "net" - "os" - "os/exec" - "path/filepath" - "syscall" - "time" - - "github.com/spf13/cast" - - "github.com/ava-labs/avalanchego/api/health" - "github.com/ava-labs/avalanchego/config" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/node" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" - "github.com/ava-labs/avalanchego/utils/perms" -) - -var errNodeAlreadyRunning = errors.New("failed to start local node: node is already running") - -// Defines local-specific node configuration. Supports setting default -// and node-specific values. -// -// TODO(marun) Support persisting this configuration per-node when -// node restart is implemented. Currently it can be supplied for node -// start but won't survive restart. -type LocalConfig struct { - // Path to avalanchego binary - ExecPath string -} - -// Stores the configuration and process details of a node in a local network. -type LocalNode struct { - testnet.NodeConfig - LocalConfig - node.NodeProcessContext - - // Configuration is intended to be stored at the path identified in NodeConfig.Flags[config.DataDirKey] -} - -func NewLocalNode(dataDir string) *LocalNode { - return &LocalNode{ - NodeConfig: testnet.NodeConfig{ - Flags: testnet.FlagsMap{ - config.DataDirKey: dataDir, - }, - }, - } -} - -// Attempt to read configuration and process details for a local node -// from the specified directory. -func ReadNode(dataDir string) (*LocalNode, error) { - node := NewLocalNode(dataDir) - if _, err := os.Stat(node.GetConfigPath()); err != nil { - return nil, fmt.Errorf("failed to read local node config file: %w", err) - } - return node, node.ReadAll() -} - -// Retrieve the ID of the node. The returned value may be nil if the -// node configuration has not yet been populated or read. -func (n *LocalNode) GetID() ids.NodeID { - return n.NodeConfig.NodeID -} - -// Retrieve backend-agnostic node configuration. -func (n *LocalNode) GetConfig() testnet.NodeConfig { - return n.NodeConfig -} - -// Retrieve backend-agnostic process details. -func (n *LocalNode) GetProcessContext() node.NodeProcessContext { - return n.NodeProcessContext -} - -func (n *LocalNode) GetDataDir() string { - return cast.ToString(n.Flags[config.DataDirKey]) -} - -func (n *LocalNode) GetConfigPath() string { - return filepath.Join(n.GetDataDir(), "config.json") -} - -func (n *LocalNode) ReadConfig() error { - bytes, err := os.ReadFile(n.GetConfigPath()) - if err != nil { - return fmt.Errorf("failed to read local node config: %w", err) - } - flags := testnet.FlagsMap{} - if err := json.Unmarshal(bytes, &flags); err != nil { - return fmt.Errorf("failed to unmarshal local node config: %w", err) - } - config := testnet.NodeConfig{Flags: flags} - if err := config.EnsureNodeID(); err != nil { - return err - } - n.NodeConfig = config - return nil -} - -func (n *LocalNode) WriteConfig() error { - if err := os.MkdirAll(n.GetDataDir(), perms.ReadWriteExecute); err != nil { - return fmt.Errorf("failed to create node dir: %w", err) - } - - bytes, err := testnet.DefaultJSONMarshal(n.Flags) - if err != nil { - return fmt.Errorf("failed to marshal local node config: %w", err) - } - - if err := os.WriteFile(n.GetConfigPath(), bytes, perms.ReadWrite); err != nil { - return fmt.Errorf("failed to write local node config: %w", err) - } - return nil -} - -func (n *LocalNode) GetProcessContextPath() string { - return filepath.Join(n.GetDataDir(), config.DefaultProcessContextFilename) -} - -func (n *LocalNode) ReadProcessContext() error { - path := n.GetProcessContextPath() - if _, err := os.Stat(path); errors.Is(err, fs.ErrNotExist) { - // The absence of the process context file indicates the node is not running - n.NodeProcessContext = node.NodeProcessContext{} - return nil - } - - bytes, err := os.ReadFile(path) - if err != nil { - return fmt.Errorf("failed to read local node process context: %w", err) - } - processContext := node.NodeProcessContext{} - if err := json.Unmarshal(bytes, &processContext); err != nil { - return fmt.Errorf("failed to unmarshal local node process context: %w", err) - } - n.NodeProcessContext = processContext - return nil -} - -func (n *LocalNode) ReadAll() error { - if err := n.ReadConfig(); err != nil { - return err - } - return n.ReadProcessContext() -} - -func (n *LocalNode) Start(w io.Writer, defaultExecPath string) error { - // Avoid attempting to start an already running node. - proc, err := n.GetProcess() - if err != nil { - return fmt.Errorf("failed to start local node: %w", err) - } - if proc != nil { - return errNodeAlreadyRunning - } - - // Ensure a stale process context file is removed so that the - // creation of a new file can indicate node start. - if err := os.Remove(n.GetProcessContextPath()); err != nil && !errors.Is(err, fs.ErrNotExist) { - return fmt.Errorf("failed to remove stale process context file: %w", err) - } - - execPath := n.ExecPath - if len(execPath) == 0 { - execPath = defaultExecPath - } - - cmd := exec.Command(execPath, "--config-file", n.GetConfigPath()) - if err := cmd.Start(); err != nil { - return err - } - - // Determine appropriate level of node description detail - nodeDescription := fmt.Sprintf("node %q", n.NodeID) - isEphemeralNode := filepath.Base(filepath.Dir(n.GetDataDir())) == defaultEphemeralDirName - if isEphemeralNode { - nodeDescription = "ephemeral " + nodeDescription - } - nonDefaultNodeDir := filepath.Base(n.GetDataDir()) != n.NodeID.String() - if nonDefaultNodeDir { - // Only include the data dir if its base is not the default (the node ID) - nodeDescription = fmt.Sprintf("%s with path: %s", nodeDescription, n.GetDataDir()) - } - - go func() { - if err := cmd.Wait(); err != nil { - if err.Error() != "signal: killed" { - _, _ = fmt.Fprintf(w, "%s finished with error: %v\n", nodeDescription, err) - } - } - _, _ = fmt.Fprintf(w, "%s exited\n", nodeDescription) - }() - - // A node writes a process context file on start. If the file is not - // found in a reasonable amount of time, the node is unlikely to have - // started successfully. - if err := n.WaitForProcessContext(context.Background()); err != nil { - return fmt.Errorf("failed to start local node: %w", err) - } - - _, err = fmt.Fprintf(w, "Started %s\n", nodeDescription) - return err -} - -// Retrieve the node process if it is running. As part of determining -// process liveness, the node's process context will be refreshed if -// live or cleared if not running. -func (n *LocalNode) GetProcess() (*os.Process, error) { - // Read the process context to ensure freshness. The node may have - // stopped or been restarted since last read. - if err := n.ReadProcessContext(); err != nil { - return nil, fmt.Errorf("failed to read process context: %w", err) - } - - if n.PID == 0 { - // Process is not running - return nil, nil - } - - proc, err := os.FindProcess(n.PID) - if err != nil { - return nil, fmt.Errorf("failed to find process: %w", err) - } - - // Sending 0 will not actually send a signal but will perform - // error checking. - err = proc.Signal(syscall.Signal(0)) - if err == nil { - // Process is running - return proc, nil - } - if errors.Is(err, os.ErrProcessDone) { - // Process is not running - return nil, nil - } - return nil, fmt.Errorf("failed to determine process status: %w", err) -} - -// Signals the node process to stop and waits for the node process to -// stop running. -func (n *LocalNode) Stop() error { - proc, err := n.GetProcess() - if err != nil { - return fmt.Errorf("failed to retrieve process to stop: %w", err) - } - if proc == nil { - // Already stopped - return nil - } - if err := proc.Signal(syscall.SIGTERM); err != nil { - return fmt.Errorf("failed to send SIGTERM to pid %d: %w", n.PID, err) - } - - // Wait for the node process to stop - ticker := time.NewTicker(testnet.DefaultNodeTickerInterval) - defer ticker.Stop() - ctx, cancel := context.WithTimeout(context.Background(), DefaultNodeStopTimeout) - defer cancel() - for { - proc, err := n.GetProcess() - if err != nil { - return fmt.Errorf("failed to retrieve process: %w", err) - } - if proc == nil { - return nil - } - - select { - case <-ctx.Done(): - return fmt.Errorf("failed to see node process stop %q before timeout: %w", n.NodeID, ctx.Err()) - case <-ticker.C: - } - } -} - -func (n *LocalNode) IsHealthy(ctx context.Context) (bool, error) { - // Check that the node process is running as a precondition for - // checking health. GetProcess will also ensure that the node's - // API URI is current. - proc, err := n.GetProcess() - if err != nil { - return false, fmt.Errorf("failed to determine process status: %w", err) - } - if proc == nil { - return false, testnet.ErrNotRunning - } - - // Check that the node is reporting healthy - health, err := health.NewClient(n.URI).Health(ctx, nil) - if err == nil { - return health.Healthy, nil - } - - switch t := err.(type) { - case *net.OpError: - if t.Op == "read" { - // Connection refused - potentially recoverable - return false, nil - } - case syscall.Errno: - if t == syscall.ECONNREFUSED { - // Connection refused - potentially recoverable - return false, nil - } - } - // Assume all other errors are not recoverable - return false, fmt.Errorf("failed to query node health: %w", err) -} - -func (n *LocalNode) WaitForProcessContext(ctx context.Context) error { - ticker := time.NewTicker(testnet.DefaultNodeTickerInterval) - defer ticker.Stop() - - ctx, cancel := context.WithTimeout(ctx, DefaultNodeInitTimeout) - defer cancel() - for len(n.URI) == 0 { - err := n.ReadProcessContext() - if err != nil { - return fmt.Errorf("failed to read process context for node %q: %w", n.NodeID, err) - } - - select { - case <-ctx.Done(): - return fmt.Errorf("failed to load process context for node %q before timeout: %w", n.NodeID, ctx.Err()) - case <-ticker.C: - } - } - return nil -} diff --git a/tests/fixture/testnet/local/node_test.go b/tests/fixture/testnet/local/node_test.go deleted file mode 100644 index 64cd77928a4d..000000000000 --- a/tests/fixture/testnet/local/node_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package local - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestNodeSerialization(t *testing.T) { - require := require.New(t) - - tmpDir := t.TempDir() - - node := NewLocalNode(tmpDir) - require.NoError(node.EnsureKeys()) - require.NoError(node.WriteConfig()) - - loadedNode, err := ReadNode(tmpDir) - require.NoError(err) - require.Equal(node, loadedNode) -} diff --git a/tests/fixture/tmpnet/README.md b/tests/fixture/tmpnet/README.md new file mode 100644 index 000000000000..909a29c6ee12 --- /dev/null +++ b/tests/fixture/tmpnet/README.md @@ -0,0 +1,231 @@ +# tmpnet - temporary network orchestration + +This package implements a simple orchestrator for the avalanchego +nodes of a temporary network. Configuration is stored on disk, and +nodes run as independent processes whose process details are also +written to disk. Using the filesystem to store configuration and +process details allows for the `tmpnetctl` cli and e2e test fixture to +orchestrate the same temporary networks without the use of an rpc daemon. + +## What's in a name? + +The name of this package was originally `testnet` and its cli was +`testnetctl`. This name was chosen in ignorance that `testnet` +commonly refers to a persistent blockchain network used for testing. + +To avoid confusion, the name was changed to `tmpnet` and its cli +`tmpnetctl`. `tmpnet` is short for `temporary network` since the +networks it deploys are likely to live for a limited duration in +support of the development and testing of avalanchego and its related +repositories. + +## Package details + +The functionality in this package is grouped by logical purpose into +the following non-test files: + +| Filename | Types | Purpose | +|:------------------|:------------|:-----------------------------------------------| +| defaults.go | | Defines common default configuration | +| flags.go | FlagsMap | Simplifies configuration of avalanchego flags | +| genesis.go | | Creates test genesis | +| network.go | Network | Orchestrates and configures temporary networks | +| network_config.go | Network | Reads and writes network configuration | +| node.go | Node | Orchestrates and configures nodes | +| node_config.go | Node | Reads and writes node configuration | +| node_process.go | NodeProcess | Orchestrates node processes | +| subnet.go | Subnet | Orchestrates subnets | +| utils.go | | Defines shared utility functions | + +## Usage + +### Via tmpnetctl + +A temporary network can be managed by the `tmpnetctl` cli tool: + +```bash +# From the root of the avalanchego repo + +# Build the tmpnetctl binary +$ ./scripts/build_tmpnetctl.sh + +# Start a new network +$ ./build/tmpnetctl start-network --avalanchego-path=/path/to/avalanchego +... +Started network 1000 @ /home/me/.tmpnet/networks/1000 + +Configure tmpnetctl to target this network by default with one of the following statements: + - source /home/me/.tmpnet/networks/1000/network.env + - export TMPNET_NETWORK_DIR=/home/me/.tmpnet/networks/1000 + - export TMPNET_NETWORK_DIR=/home/me/.tmpnet/networks/latest + +# Stop the network +$ ./build/tmpnetctl stop-network --network-dir=/path/to/network +``` + +Note the export of the path ending in `latest`. This is a symlink that +is set to the last network created by `tmpnetctl start-network`. Setting +the `TMPNET_NETWORK_DIR` env var to this symlink ensures that +`tmpnetctl` commands and e2e execution with +`--use-existing-network` will target the most recently deployed temporary +network. + +### Via code + +A temporary network can be managed in code: + +```golang +network := &tmpnet.Network{ // Configure non-default values for the new network + DefaultFlags: tmpnet.FlagsMap{ + config.LogLevelKey: "INFO", // Change one of the network's defaults + }, + Subnets: []*tmpnet.Subnet{ // Subnets to create on the new network once it is running + { + Name: "xsvm-a", // User-defined name used to reference subnet in code and on disk + Chains: []*tmpnet.Chain{ + { + VMName: "xsvm", // Name of the VM the chain will run, will be used to derive the name of the VM binary + Genesis: , // Genesis bytes used to initialize the custom chain + PreFundedKey: , // (Optional) A private key that is funded in the genesis bytes + }, + }, + }, + }, +} + +_ := tmpnet.StartNewNetwork( // Start the network + ctx, // Context used to limit duration of waiting for network health + ginkgo.GinkgoWriter, // Writer to report progress of initialization + network, + "", // Empty string uses the default network path (~/tmpnet/networks) + "/path/to/avalanchego", // The path to the binary that nodes will execute + "/path/to/plugins", // The path nodes will use for plugin binaries (suggested value ~/.avalanchego/plugins) + 5, // Number of initial validating nodes +) + +uris := network.GetNodeURIs() + +// Use URIs to interact with the network + +// Stop all nodes in the network +network.Stop(context.Background()) +``` + +## Networking configuration + +By default, nodes in a temporary network will be started with staking and +API ports set to `0` to ensure that ports will be dynamically +chosen. The tmpnet fixture discovers the ports used by a given node +by reading the `[base-data-dir]/process.json` file written by +avalanchego on node start. The use of dynamic ports supports testing +with many temporary networks without having to manually select compatible +port ranges. + +## Configuration on disk + +A temporary network relies on configuration written to disk in the following structure: + +``` +HOME +└── .tmpnet // Root path for the temporary network fixture + └── networks // Default parent directory for temporary networks + └── 1000 // The networkID is used to name the network dir and starts at 1000 + ├── NodeID-37E8UK3x2YFsHE3RdALmfWcppcZ1eTuj9 // The ID of a node is the name of its data dir + │ ├── chainData + │ │ └── ... + │ ├── config.json // Node runtime configuration + │ ├── db + │ │ └── ... + │ ├── flags.json // Node flags + │ ├── logs + │ │ └── ... + │ ├── plugins + │ │ └── ... + │ └── process.json // Node process details (PID, API URI, staking address) + ├── chains + │ ├── C + │ │ └── config.json // C-Chain config for all nodes + │ └── raZ51bwfepaSaZ1MNSRNYNs3ZPfj...U7pa3 + │ └── config.json // Custom chain configuration for all nodes + ├── config.json // Common configuration (including defaults and pre-funded keys) + ├── genesis.json // Genesis for all nodes + ├── network.env // Sets network dir env var to simplify network usage + └── subnets // Parent directory for subnet definitions + ├─ subnet-a.json // Configuration for subnet-a and its chain(s) + └─ subnet-b.json // Configuration for subnet-b and its chain(s) +``` + +### Common networking configuration + +Network configuration such as default flags (e.g. `--log-level=`), +runtime defaults (e.g. avalanchego path) and pre-funded private keys +are stored at `[network-dir]/config.json`. A given default will only +be applied to a new node on its addition to the network if the node +does not explicitly set a given value. + +### Genesis + +The genesis file is stored at `[network-dir]/genesis.json` and +referenced by default by all nodes in the network. The genesis file +content will be generated with reasonable defaults if not +supplied. Each node in the network can override the default by setting +an explicit value for `--genesis-file` or `--genesis-file-content`. + +### Chain configuration + +The chain configuration for a temporary network is stored at +`[network-dir]/chains/[chain alias or ID]/config.json` and referenced +by all nodes in the network. The C-Chain config will be generated with +reasonable defaults if not supplied. X-Chain and P-Chain will use +implicit defaults. The configuration for custom chains can be provided +with subnet configuration and will be writen to the appropriate path. + +Each node in the network can override network-level chain +configuration by setting `--chain-config-dir` to an explicit value and +ensuring that configuration files for all chains exist at +`[custom-chain-config-dir]/[chain alias or ID]/config.json`. + +### Network env + +A shell script that sets the `TMPNET_NETWORK_DIR` env var to the +path of the network is stored at `[network-dir]/network.env`. Sourcing +this file (i.e. `source network.env`) in a shell will configure ginkgo +e2e and the `tmpnetctl` cli to target the network path specified in +the env var. + +Set `TMPNET_ROOT_DIR` to specify the root directory in which to create +the configuration directory of new networks +(e.g. `$TMPNET_ROOT_DIR/[network-dir]`). The default root directory is +`~/.tmpdir/networks`. Configuring the root directory is only relevant +when creating new networks as the path of existing networks will +already have been set. + +### Node configuration + +The data dir for a node is set by default to +`[network-path]/[node-id]`. A node can be configured to use a +non-default path by explicitly setting the `--data-dir` +flag. + +#### Runtime config + +The details required to configure a node's execution are written to +`[network-path]/[node-id]/config.json`. This file contains the +runtime-specific details like the path of the avalanchego binary to +start the node with. + +#### Flags + +All flags used to configure a node are written to +`[network-path]/[node-id]/flags.json` so that a node can be +configured with only a single argument: +`--config-file=/path/to/flags.json`. This simplifies node launch and +ensures all parameters used to launch a node can be modified by +editing the config file. + +#### Process details + +The process details of a node are written by avalanchego to +`[base-data-dir]/process.json`. The file contains the PID of the node +process, the URI of the node's API, and the address other nodes can +use to bootstrap themselves (aka staking address). diff --git a/tests/fixture/tmpnet/cmd/main.go b/tests/fixture/tmpnet/cmd/main.go new file mode 100644 index 000000000000..dd59c300bbb3 --- /dev/null +++ b/tests/fixture/tmpnet/cmd/main.go @@ -0,0 +1,148 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "time" + + "github.com/spf13/cobra" + + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/version" +) + +const cliVersion = "0.0.1" + +var ( + errAvalancheGoRequired = fmt.Errorf("--avalanchego-path or %s are required", tmpnet.AvalancheGoPathEnvName) + errNetworkDirRequired = fmt.Errorf("--network-dir or %s are required", tmpnet.NetworkDirEnvName) +) + +func main() { + var networkDir string + rootCmd := &cobra.Command{ + Use: "tmpnetctl", + Short: "tmpnetctl commands", + } + rootCmd.PersistentFlags().StringVar(&networkDir, "network-dir", os.Getenv(tmpnet.NetworkDirEnvName), "The path to the configuration directory of a temporary network") + + versionCmd := &cobra.Command{ + Use: "version", + Short: "Print version details", + RunE: func(*cobra.Command, []string) error { + msg := cliVersion + if len(version.GitCommit) > 0 { + msg += ", commit=" + version.GitCommit + } + fmt.Fprintf(os.Stdout, msg+"\n") + return nil + }, + } + rootCmd.AddCommand(versionCmd) + + var ( + rootDir string + avalancheGoPath string + pluginDir string + nodeCount uint8 + ) + startNetworkCmd := &cobra.Command{ + Use: "start-network", + Short: "Start a new temporary network", + RunE: func(*cobra.Command, []string) error { + if len(avalancheGoPath) == 0 { + return errAvalancheGoRequired + } + + // Root dir will be defaulted on start if not provided + + network := &tmpnet.Network{} + + // Extreme upper bound, should never take this long + networkStartTimeout := 2 * time.Minute + + ctx, cancel := context.WithTimeout(context.Background(), networkStartTimeout) + defer cancel() + err := tmpnet.StartNewNetwork( + ctx, + os.Stdout, + network, + rootDir, + avalancheGoPath, + pluginDir, + int(nodeCount), + ) + if err != nil { + return err + } + + // Symlink the new network to the 'latest' network to simplify usage + networkRootDir := filepath.Dir(network.Dir) + networkDirName := filepath.Base(network.Dir) + latestSymlinkPath := filepath.Join(networkRootDir, "latest") + if err := os.Remove(latestSymlinkPath); err != nil && !errors.Is(err, fs.ErrNotExist) { + return err + } + if err := os.Symlink(networkDirName, latestSymlinkPath); err != nil { + return err + } + + fmt.Fprintf(os.Stdout, "\nConfigure tmpnetctl to target this network by default with one of the following statements:\n") + fmt.Fprintf(os.Stdout, " - source %s\n", network.EnvFilePath()) + fmt.Fprintf(os.Stdout, " - %s\n", network.EnvFileContents()) + fmt.Fprintf(os.Stdout, " - export %s=%s\n", tmpnet.NetworkDirEnvName, latestSymlinkPath) + + return nil + }, + } + startNetworkCmd.PersistentFlags().StringVar(&rootDir, "root-dir", os.Getenv(tmpnet.RootDirEnvName), "The path to the root directory for temporary networks") + startNetworkCmd.PersistentFlags().StringVar(&avalancheGoPath, "avalanchego-path", os.Getenv(tmpnet.AvalancheGoPathEnvName), "The path to an avalanchego binary") + startNetworkCmd.PersistentFlags().StringVar(&pluginDir, "plugin-dir", os.ExpandEnv("$HOME/.avalanchego/plugins"), "[optional] the dir containing VM plugins") + startNetworkCmd.PersistentFlags().Uint8Var(&nodeCount, "node-count", tmpnet.DefaultNodeCount, "Number of nodes the network should initially consist of") + rootCmd.AddCommand(startNetworkCmd) + + stopNetworkCmd := &cobra.Command{ + Use: "stop-network", + Short: "Stop a temporary network", + RunE: func(*cobra.Command, []string) error { + if len(networkDir) == 0 { + return errNetworkDirRequired + } + ctx, cancel := context.WithTimeout(context.Background(), tmpnet.DefaultNetworkTimeout) + defer cancel() + if err := tmpnet.StopNetwork(ctx, networkDir); err != nil { + return err + } + fmt.Fprintf(os.Stdout, "Stopped network configured at: %s\n", networkDir) + return nil + }, + } + rootCmd.AddCommand(stopNetworkCmd) + + restartNetworkCmd := &cobra.Command{ + Use: "restart-network", + Short: "Restart a temporary network", + RunE: func(*cobra.Command, []string) error { + if len(networkDir) == 0 { + return errNetworkDirRequired + } + ctx, cancel := context.WithTimeout(context.Background(), tmpnet.DefaultNetworkTimeout) + defer cancel() + return tmpnet.RestartNetwork(ctx, os.Stdout, networkDir) + }, + } + rootCmd.AddCommand(restartNetworkCmd) + + if err := rootCmd.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "tmpnetctl failed: %v\n", err) + os.Exit(1) + } + os.Exit(0) +} diff --git a/tests/fixture/tmpnet/defaults.go b/tests/fixture/tmpnet/defaults.go new file mode 100644 index 000000000000..c10b84f98ea4 --- /dev/null +++ b/tests/fixture/tmpnet/defaults.go @@ -0,0 +1,67 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "time" + + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" +) + +const ( + // Interval appropriate for network operations that should be + // retried periodically but not too often. + DefaultPollingInterval = 500 * time.Millisecond + + // Validator start time must be a minimum of SyncBound from the + // current time for validator addition to succeed, and adding 20 + // seconds provides a buffer in case of any delay in processing. + DefaultValidatorStartTimeDiff = executor.SyncBound + 20*time.Second + + DefaultNetworkTimeout = 2 * time.Minute + + // Minimum required to ensure connectivity-based health checks will pass + DefaultNodeCount = 2 + + // Arbitrary number of pre-funded keys to create by default + DefaultPreFundedKeyCount = 50 + + // A short minimum stake duration enables testing of staking logic. + DefaultMinStakeDuration = time.Second + + defaultConfigFilename = "config.json" +) + +// A set of flags appropriate for testing. +func DefaultFlags() FlagsMap { + // Supply only non-default configuration to ensure that default values will be used. + return FlagsMap{ + config.NetworkPeerListGossipFreqKey: "250ms", + config.NetworkMaxReconnectDelayKey: "1s", + config.PublicIPKey: "127.0.0.1", + config.HTTPHostKey: "127.0.0.1", + config.StakingHostKey: "127.0.0.1", + config.HealthCheckFreqKey: "2s", + config.AdminAPIEnabledKey: "e2e", + config.IpcAPIEnabledKey: true, + config.IndexEnabledKey: true, + config.LogDisplayLevelKey: "INFO", + config.LogLevelKey: "DEBUG", + config.MinStakeDurationKey: DefaultMinStakeDuration.String(), + } +} + +// A set of chain configurations appropriate for testing. +func DefaultChainConfigs() map[string]FlagsMap { + return map[string]FlagsMap{ + // Supply only non-default configuration to ensure that default + // values will be used. Available C-Chain configuration options are + // defined in the `github.com/ava-labs/coreth/evm` package. + "C": { + "warp-api-enabled": true, + "log-level": "trace", + }, + } +} diff --git a/tests/fixture/tmpnet/flags.go b/tests/fixture/tmpnet/flags.go new file mode 100644 index 000000000000..3084982ea704 --- /dev/null +++ b/tests/fixture/tmpnet/flags.go @@ -0,0 +1,69 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/spf13/cast" + + "github.com/ava-labs/avalanchego/utils/perms" +) + +// Defines a mapping of flag keys to values intended to be supplied to +// an invocation of an AvalancheGo node. +type FlagsMap map[string]interface{} + +// Utility function simplifying construction of a FlagsMap from a file. +func ReadFlagsMap(path string, description string) (*FlagsMap, error) { + bytes, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read %s: %w", description, err) + } + flagsMap := &FlagsMap{} + if err := json.Unmarshal(bytes, flagsMap); err != nil { + return nil, fmt.Errorf("failed to unmarshal %s: %w", description, err) + } + return flagsMap, nil +} + +// SetDefaults ensures the effectiveness of flag overrides by only +// setting values supplied in the defaults map that are not already +// explicitly set. +func (f FlagsMap) SetDefaults(defaults FlagsMap) { + for key, value := range defaults { + if _, ok := f[key]; !ok { + f[key] = value + } + } +} + +// GetStringVal simplifies retrieving a map value as a string. +func (f FlagsMap) GetStringVal(key string) (string, error) { + rawVal, ok := f[key] + if !ok { + return "", nil + } + + val, err := cast.ToStringE(rawVal) + if err != nil { + return "", fmt.Errorf("failed to cast value for %q: %w", key, err) + } + return val, nil +} + +// Write simplifies writing a FlagsMap to the provided path. The +// description is used in error messages. +func (f FlagsMap) Write(path string, description string) error { + bytes, err := DefaultJSONMarshal(f) + if err != nil { + return fmt.Errorf("failed to marshal %s: %w", description, err) + } + if err := os.WriteFile(path, bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write %s: %w", description, err) + } + return nil +} diff --git a/tests/fixture/tmpnet/genesis.go b/tests/fixture/tmpnet/genesis.go new file mode 100644 index 000000000000..a9c85fe8b441 --- /dev/null +++ b/tests/fixture/tmpnet/genesis.go @@ -0,0 +1,191 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + "time" + + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" +) + +const ( + defaultGasLimit = uint64(100_000_000) // Gas limit is arbitrary + + // Arbitrarily large amount of AVAX to fund keys on the X-Chain for testing + defaultFundedKeyXChainAmount = 30 * units.MegaAvax +) + +var ( + // Arbitrarily large amount of AVAX (10^12) to fund keys on the C-Chain for testing + defaultFundedKeyCChainAmount = new(big.Int).Exp(big.NewInt(10), big.NewInt(30), nil) + + errNoKeysForGenesis = errors.New("no keys to fund for genesis") + errInvalidNetworkIDForGenesis = errors.New("network ID can't be mainnet, testnet or local network ID for genesis") + errMissingStakersForGenesis = errors.New("no stakers provided for genesis") +) + +// Helper type to simplify configuring X-Chain genesis balances +type XChainBalanceMap map[ids.ShortID]uint64 + +// Create a genesis struct valid for bootstrapping a test +// network. Note that many of the genesis fields (e.g. reward +// addresses) are randomly generated or hard-coded. +func NewTestGenesis( + networkID uint32, + nodes []*Node, + keysToFund []*secp256k1.PrivateKey, +) (*genesis.UnparsedConfig, error) { + // Validate inputs + switch networkID { + case constants.TestnetID, constants.MainnetID, constants.LocalID: + return nil, errInvalidNetworkIDForGenesis + } + if len(nodes) == 0 { + return nil, errMissingStakersForGenesis + } + if len(keysToFund) == 0 { + return nil, errNoKeysForGenesis + } + + initialStakers, err := stakersForNodes(networkID, nodes) + if err != nil { + return nil, fmt.Errorf("failed to configure stakers for nodes: %w", err) + } + + // Address that controls stake doesn't matter -- generate it randomly + stakeAddress, err := address.Format( + "X", + constants.GetHRP(networkID), + ids.GenerateTestShortID().Bytes(), + ) + if err != nil { + return nil, fmt.Errorf("failed to format stake address: %w", err) + } + + // Ensure the total stake allows a MegaAvax per staker + totalStake := uint64(len(initialStakers)) * units.MegaAvax + + // The eth address is only needed to link pre-mainnet assets. Until that capability + // becomes necessary for testing, use a bogus address. + // + // Reference: https://github.com/ava-labs/avalanchego/issues/1365#issuecomment-1511508767 + ethAddress := "0x0000000000000000000000000000000000000000" + + now := time.Now() + + config := &genesis.UnparsedConfig{ + NetworkID: networkID, + Allocations: []genesis.UnparsedAllocation{ + { + ETHAddr: ethAddress, + AVAXAddr: stakeAddress, + InitialAmount: 0, + UnlockSchedule: []genesis.LockedAmount{ // Provides stake to validators + { + Amount: totalStake, + Locktime: uint64(now.Add(7 * 24 * time.Hour).Unix()), // 1 Week + }, + }, + }, + }, + StartTime: uint64(now.Unix()), + InitialStakedFunds: []string{stakeAddress}, + InitialStakeDuration: 365 * 24 * 60 * 60, // 1 year + InitialStakeDurationOffset: 90 * 60, // 90 minutes + Message: "hello avalanche!", + InitialStakers: initialStakers, + } + + // Ensure pre-funded keys have arbitrary large balances on both chains to support testing + xChainBalances := make(XChainBalanceMap, len(keysToFund)) + cChainBalances := make(core.GenesisAlloc, len(keysToFund)) + for _, key := range keysToFund { + xChainBalances[key.Address()] = defaultFundedKeyXChainAmount + cChainBalances[evm.GetEthAddress(key)] = core.GenesisAccount{ + Balance: defaultFundedKeyCChainAmount, + } + } + + // Set X-Chain balances + for xChainAddress, balance := range xChainBalances { + avaxAddr, err := address.Format("X", constants.GetHRP(networkID), xChainAddress[:]) + if err != nil { + return nil, fmt.Errorf("failed to format X-Chain address: %w", err) + } + config.Allocations = append( + config.Allocations, + genesis.UnparsedAllocation{ + ETHAddr: ethAddress, + AVAXAddr: avaxAddr, + InitialAmount: balance, + UnlockSchedule: []genesis.LockedAmount{ + { + Amount: 20 * units.MegaAvax, + }, + { + Amount: totalStake, + Locktime: uint64(now.Add(7 * 24 * time.Hour).Unix()), // 1 Week + }, + }, + }, + ) + } + + // Define C-Chain genesis + cChainGenesis := &core.Genesis{ + Config: params.AvalancheLocalChainConfig, + Difficulty: big.NewInt(0), // Difficulty is a mandatory field + GasLimit: defaultGasLimit, + Alloc: cChainBalances, + } + cChainGenesisBytes, err := json.Marshal(cChainGenesis) + if err != nil { + return nil, fmt.Errorf("failed to marshal C-Chain genesis: %w", err) + } + config.CChainGenesis = string(cChainGenesisBytes) + + return config, nil +} + +// Returns staker configuration for the given set of nodes. +func stakersForNodes(networkID uint32, nodes []*Node) ([]genesis.UnparsedStaker, error) { + // Give staking rewards for initial validators to a random address. Any testing of staking rewards + // will be easier to perform with nodes other than the initial validators since the timing of + // staking can be more easily controlled. + rewardAddr, err := address.Format("X", constants.GetHRP(networkID), ids.GenerateTestShortID().Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to format reward address: %w", err) + } + + // Configure provided nodes as initial stakers + initialStakers := make([]genesis.UnparsedStaker, len(nodes)) + for i, node := range nodes { + pop, err := node.GetProofOfPossession() + if err != nil { + return nil, fmt.Errorf("failed to derive proof of possession for node %s: %w", node.NodeID, err) + } + initialStakers[i] = genesis.UnparsedStaker{ + NodeID: node.NodeID, + RewardAddress: rewardAddr, + DelegationFee: .01 * reward.PercentDenominator, + Signer: pop, + } + } + + return initialStakers, nil +} diff --git a/tests/fixture/tmpnet/network.go b/tests/fixture/tmpnet/network.go new file mode 100644 index 000000000000..01829da70da5 --- /dev/null +++ b/tests/fixture/tmpnet/network.go @@ -0,0 +1,694 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/perms" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/platformvm" +) + +// The Network type is defined in this file (orchestration) and +// network_config.go (reading/writing configuration). + +const ( + // Constants defining the names of shell variables whose value can + // configure network orchestration. + NetworkDirEnvName = "TMPNET_NETWORK_DIR" + RootDirEnvName = "TMPNET_ROOT_DIR" + + // This interval was chosen to avoid spamming node APIs during + // startup, as smaller intervals (e.g. 50ms) seemed to noticeably + // increase the time for a network's nodes to be seen as healthy. + networkHealthCheckInterval = 200 * time.Millisecond + + // eth address: 0x8db97C7cEcE249c2b98bDC0226Cc4C2A57BF52FC + HardHatKeyStr = "56289e99c94b6912bfc12adc093c9b51124f0dc54ac7a766b2bc5ccf558d8027" +) + +// HardhatKey is a legacy used for hardhat testing in subnet-evm +// TODO(marun) Remove when no longer needed. +var HardhatKey *secp256k1.PrivateKey + +func init() { + hardhatKeyBytes, err := hex.DecodeString(HardHatKeyStr) + if err != nil { + panic(err) + } + HardhatKey, err = secp256k1.ToPrivateKey(hardhatKeyBytes) + if err != nil { + panic(err) + } +} + +// Collects the configuration for running a temporary avalanchego network +type Network struct { + // Path where network configuration and data is stored + Dir string + + // Configuration common across nodes + Genesis *genesis.UnparsedConfig + ChainConfigs map[string]FlagsMap + + // Default configuration to use when creating new nodes + DefaultFlags FlagsMap + DefaultRuntimeConfig NodeRuntimeConfig + + // Keys pre-funded in the genesis on both the X-Chain and the C-Chain + PreFundedKeys []*secp256k1.PrivateKey + + // Nodes that constitute the network + Nodes []*Node + + // Subnets that have been enabled on the network + Subnets []*Subnet +} + +// Ensure a real and absolute network dir so that node +// configuration that embeds the network path will continue to +// work regardless of symlink and working directory changes. +func toCanonicalDir(dir string) (string, error) { + absDir, err := filepath.Abs(dir) + if err != nil { + return "", err + } + return filepath.EvalSymlinks(absDir) +} + +func StartNewNetwork( + ctx context.Context, + w io.Writer, + network *Network, + rootNetworkDir string, + avalancheGoExecPath string, + pluginDir string, + nodeCount int, +) error { + if err := network.EnsureDefaultConfig(w, avalancheGoExecPath, pluginDir, nodeCount); err != nil { + return err + } + if err := network.Create(rootNetworkDir); err != nil { + return err + } + return network.Start(ctx, w) +} + +// Stops the nodes of the network configured in the provided directory. +func StopNetwork(ctx context.Context, dir string) error { + network, err := ReadNetwork(dir) + if err != nil { + return err + } + return network.Stop(ctx) +} + +// Restarts the nodes of the network configured in the provided directory. +func RestartNetwork(ctx context.Context, w io.Writer, dir string) error { + network, err := ReadNetwork(dir) + if err != nil { + return err + } + return network.Restart(ctx, w) +} + +// Reads a network from the provided directory. +func ReadNetwork(dir string) (*Network, error) { + canonicalDir, err := toCanonicalDir(dir) + if err != nil { + return nil, err + } + network := &Network{ + Dir: canonicalDir, + } + if err := network.Read(); err != nil { + return nil, fmt.Errorf("failed to read network: %w", err) + } + return network, nil +} + +// Initializes a new network with default configuration. +func (n *Network) EnsureDefaultConfig(w io.Writer, avalancheGoPath string, pluginDir string, nodeCount int) error { + if _, err := fmt.Fprintf(w, "Preparing configuration for new network with %s\n", avalancheGoPath); err != nil { + return err + } + + // Ensure default flags + if n.DefaultFlags == nil { + n.DefaultFlags = FlagsMap{} + } + n.DefaultFlags.SetDefaults(DefaultFlags()) + + // Only configure the plugin dir with a non-empty value to ensure + // the use of the default value (`[datadir]/plugins`) when + // no plugin dir is configured. + if len(pluginDir) > 0 { + if _, ok := n.DefaultFlags[config.PluginDirKey]; !ok { + n.DefaultFlags[config.PluginDirKey] = pluginDir + } + } + + // Ensure pre-funded keys + if len(n.PreFundedKeys) == 0 { + keys, err := NewPrivateKeys(DefaultPreFundedKeyCount) + if err != nil { + return err + } + n.PreFundedKeys = keys + } + + // Ensure primary chains are configured + if n.ChainConfigs == nil { + n.ChainConfigs = map[string]FlagsMap{} + } + defaultChainConfigs := DefaultChainConfigs() + for alias, chainConfig := range defaultChainConfigs { + if _, ok := n.ChainConfigs[alias]; !ok { + n.ChainConfigs[alias] = FlagsMap{} + } + n.ChainConfigs[alias].SetDefaults(chainConfig) + } + + // Ensure runtime is configured + if len(n.DefaultRuntimeConfig.AvalancheGoPath) == 0 { + n.DefaultRuntimeConfig.AvalancheGoPath = avalancheGoPath + } + + // Ensure nodes are created + if len(n.Nodes) == 0 { + n.Nodes = NewNodes(nodeCount) + } + + // Ensure nodes are configured + for i := range n.Nodes { + if err := n.EnsureNodeConfig(n.Nodes[i]); err != nil { + return err + } + } + + return nil +} + +// Creates the network on disk, choosing its network id and generating its genesis in the process. +func (n *Network) Create(rootDir string) error { + if len(rootDir) == 0 { + // Use the default root dir + var err error + rootDir, err = getDefaultRootDir() + if err != nil { + return err + } + } + + // Ensure creation of the root dir + if err := os.MkdirAll(rootDir, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create root network dir: %w", err) + } + + // Determine the network path and ID + var ( + networkDir string + networkID uint32 + ) + if n.Genesis != nil && n.Genesis.NetworkID > 0 { + // Use the network ID defined in the provided genesis + networkID = n.Genesis.NetworkID + } + if networkID > 0 { + // Use a directory with a random suffix + var err error + networkDir, err = os.MkdirTemp(rootDir, fmt.Sprintf("%d.", n.Genesis.NetworkID)) + if err != nil { + return fmt.Errorf("failed to create network dir: %w", err) + } + } else { + // Find the next available network ID based on the contents of the root dir + var err error + networkID, networkDir, err = findNextNetworkID(rootDir) + if err != nil { + return err + } + } + canonicalDir, err := toCanonicalDir(networkDir) + if err != nil { + return err + } + n.Dir = canonicalDir + + pluginDir, err := n.DefaultFlags.GetStringVal(config.PluginDirKey) + if err != nil { + return err + } + if len(pluginDir) > 0 { + // Ensure the existence of the plugin directory or nodes won't be able to start. + if err := os.MkdirAll(pluginDir, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create plugin dir: %w", err) + } + } + + if n.Genesis == nil { + // Pre-fund known legacy keys to support ad-hoc testing. Usage of a legacy key will + // require knowing the key beforehand rather than retrieving it from the set of pre-funded + // keys exposed by a network. Since allocation will not be exclusive, a test using a + // legacy key is unlikely to be a good candidate for parallel execution. + keysToFund := []*secp256k1.PrivateKey{ + genesis.VMRQKey, + genesis.EWOQKey, + HardhatKey, + } + keysToFund = append(keysToFund, n.PreFundedKeys...) + + genesis, err := NewTestGenesis(networkID, n.Nodes, keysToFund) + if err != nil { + return err + } + n.Genesis = genesis + } + + for _, node := range n.Nodes { + // Ensure the node is configured for use with the network and + // knows where to write its configuration. + if err := n.EnsureNodeConfig(node); err != nil { + return nil + } + } + + // Ensure configuration on disk is current + return n.Write() +} + +// Starts all nodes in the network +func (n *Network) Start(ctx context.Context, w io.Writer) error { + if _, err := fmt.Fprintf(w, "Starting network %d @ %s\n", n.Genesis.NetworkID, n.Dir); err != nil { + return err + } + + // Configure the networking for each node and start + for _, node := range n.Nodes { + if err := n.StartNode(ctx, w, node); err != nil { + return err + } + } + + if _, err := fmt.Fprintf(w, "Waiting for all nodes to report healthy...\n\n"); err != nil { + return err + } + if err := n.WaitForHealthy(ctx, w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "\nStarted network %d @ %s\n", n.Genesis.NetworkID, n.Dir); err != nil { + return err + } + + return nil +} + +func (n *Network) AddEphemeralNode(ctx context.Context, w io.Writer, flags FlagsMap) (*Node, error) { + node := NewNode("") + node.Flags = flags + node.IsEphemeral = true + if err := n.StartNode(ctx, w, node); err != nil { + return nil, err + } + return node, nil +} + +// Starts the provided node after configuring it for the network. +func (n *Network) StartNode(ctx context.Context, w io.Writer, node *Node) error { + if err := n.EnsureNodeConfig(node); err != nil { + return err + } + + bootstrapIPs, bootstrapIDs, err := n.getBootstrapIPsAndIDs(node) + if err != nil { + return err + } + node.SetNetworkingConfig(bootstrapIDs, bootstrapIPs) + + if err := node.Write(); err != nil { + return err + } + + if err := node.Start(w); err != nil { + // Attempt to stop an unhealthy node to provide some assurance to the caller + // that an error condition will not result in a lingering process. + err = errors.Join(err, node.Stop(ctx)) + return err + } + + return nil +} + +// Waits until all nodes in the network are healthy. +func (n *Network) WaitForHealthy(ctx context.Context, w io.Writer) error { + ticker := time.NewTicker(networkHealthCheckInterval) + defer ticker.Stop() + + healthyNodes := set.NewSet[ids.NodeID](len(n.Nodes)) + for healthyNodes.Len() < len(n.Nodes) { + for _, node := range n.Nodes { + if healthyNodes.Contains(node.NodeID) { + continue + } + + healthy, err := node.IsHealthy(ctx) + if err != nil && !errors.Is(err, ErrNotRunning) { + return err + } + if !healthy { + continue + } + + healthyNodes.Add(node.NodeID) + if _, err := fmt.Fprintf(w, "%s is healthy @ %s\n", node.NodeID, node.URI); err != nil { + return err + } + } + + select { + case <-ctx.Done(): + return fmt.Errorf("failed to see all nodes healthy before timeout: %w", ctx.Err()) + case <-ticker.C: + } + } + return nil +} + +// Stops all nodes in the network. +func (n *Network) Stop(ctx context.Context) error { + // Target all nodes, including the ephemeral ones + nodes, err := ReadNodes(n.Dir, true /* includeEphemeral */) + if err != nil { + return err + } + + var errs []error + + // Initiate stop on all nodes + for _, node := range nodes { + if err := node.InitiateStop(ctx); err != nil { + errs = append(errs, fmt.Errorf("failed to stop node %s: %w", node.NodeID, err)) + } + } + + // Wait for stop to complete on all nodes + for _, node := range nodes { + if err := node.WaitForStopped(ctx); err != nil { + errs = append(errs, fmt.Errorf("failed to wait for node %s to stop: %w", node.NodeID, err)) + } + } + + if len(errs) > 0 { + return fmt.Errorf("failed to stop network:\n%w", errors.Join(errs...)) + } + return nil +} + +// Restarts all non-ephemeral nodes in the network. +func (n *Network) Restart(ctx context.Context, w io.Writer) error { + if _, err := fmt.Fprintf(w, " restarting network\n"); err != nil { + return err + } + for _, node := range n.Nodes { + if err := node.Stop(ctx); err != nil { + return fmt.Errorf("failed to stop node %s: %w", node.NodeID, err) + } + if err := n.StartNode(ctx, w, node); err != nil { + return fmt.Errorf("failed to start node %s: %w", node.NodeID, err) + } + if _, err := fmt.Fprintf(w, " waiting for node %s to report healthy\n", node.NodeID); err != nil { + return err + } + if err := WaitForHealthy(ctx, node); err != nil { + return err + } + } + return nil +} + +// Ensures the provided node has the configuration it needs to start. If the data dir is not +// set, it will be defaulted to [nodeParentDir]/[node ID]. For a not-yet-created network, +// no action will be taken. +// TODO(marun) Reword or refactor to account for the differing behavior pre- vs post-start +func (n *Network) EnsureNodeConfig(node *Node) error { + flags := node.Flags + + // Set the network name if available + if n.Genesis != nil && n.Genesis.NetworkID > 0 { + // Convert the network id to a string to ensure consistency in JSON round-tripping. + flags[config.NetworkNameKey] = strconv.FormatUint(uint64(n.Genesis.NetworkID), 10) + } + + if err := node.EnsureKeys(); err != nil { + return err + } + + flags.SetDefaults(n.DefaultFlags) + + // Set fields including the network path + if len(n.Dir) > 0 { + node.Flags.SetDefaults(FlagsMap{ + config.GenesisFileKey: n.getGenesisPath(), + config.ChainConfigDirKey: n.getChainConfigDir(), + }) + + // Ensure the node's data dir is configured + dataDir := node.getDataDir() + if len(dataDir) == 0 { + // NodeID will have been set by EnsureKeys + dataDir = filepath.Join(n.Dir, node.NodeID.String()) + flags[config.DataDirKey] = dataDir + } + } + + // Ensure the node runtime is configured + if node.RuntimeConfig == nil { + node.RuntimeConfig = &NodeRuntimeConfig{ + AvalancheGoPath: n.DefaultRuntimeConfig.AvalancheGoPath, + } + } + + // Ensure available subnets are tracked + subnetIDs := make([]string, 0, len(n.Subnets)) + for _, subnet := range n.Subnets { + if subnet.SubnetID == ids.Empty { + continue + } + subnetIDs = append(subnetIDs, subnet.SubnetID.String()) + } + flags[config.TrackSubnetsKey] = strings.Join(subnetIDs, ",") + + return nil +} + +func (n *Network) GetSubnet(name string) *Subnet { + for _, subnet := range n.Subnets { + if subnet.Name == name { + return subnet + } + } + return nil +} + +// Ensure that each subnet on the network is created and that it is validated by all non-ephemeral nodes. +func (n *Network) CreateSubnets(ctx context.Context, w io.Writer) error { + createdSubnets := make([]*Subnet, 0, len(n.Subnets)) + for _, subnet := range n.Subnets { + if _, err := fmt.Fprintf(w, "Creating subnet %q\n", subnet.Name); err != nil { + return err + } + if subnet.SubnetID != ids.Empty { + // The subnet already exists + continue + } + + if subnet.OwningKey == nil { + // Allocate a pre-funded key and remove it from the network so it won't be used for + // other purposes + if len(n.PreFundedKeys) == 0 { + return fmt.Errorf("no pre-funded keys available to create subnet %q", subnet.Name) + } + subnet.OwningKey = n.PreFundedKeys[len(n.PreFundedKeys)-1] + n.PreFundedKeys = n.PreFundedKeys[:len(n.PreFundedKeys)-1] + } + + // Create the subnet on the network + if err := subnet.Create(ctx, n.Nodes[0].URI); err != nil { + return err + } + + if _, err := fmt.Fprintf(w, " created subnet %q as %q\n", subnet.Name, subnet.SubnetID); err != nil { + return err + } + + // Persist the subnet configuration + if err := subnet.Write(n.getSubnetDir(), n.getChainConfigDir()); err != nil { + return err + } + + if _, err := fmt.Fprintf(w, " wrote configuration for subnet %q\n", subnet.Name); err != nil { + return err + } + + createdSubnets = append(createdSubnets, subnet) + } + + if len(createdSubnets) == 0 { + return nil + } + + // Ensure the in-memory subnet state + n.Subnets = append(n.Subnets, createdSubnets...) + + // Ensure the pre-funded key changes are persisted to disk + if err := n.Write(); err != nil { + return err + } + + // Reconfigure nodes for the new subnets and their chains + if _, err := fmt.Fprintf(w, "Configured nodes to track new subnet(s). Restart is required.\n"); err != nil { + return err + } + for _, node := range n.Nodes { + if err := n.EnsureNodeConfig(node); err != nil { + return err + } + } + + // Restart nodes to allow new configuration to take effect + if err := n.Restart(ctx, w); err != nil { + return err + } + + // Add each node as a subnet validator + for _, subnet := range createdSubnets { + if _, err := fmt.Fprintf(w, "Adding validators for subnet %q\n", subnet.Name); err != nil { + return err + } + if err := subnet.AddValidators(ctx, w, n.Nodes); err != nil { + return err + } + } + + // Wait for nodes to become subnet validators + pChainClient := platformvm.NewClient(n.Nodes[0].URI) + for _, subnet := range createdSubnets { + if err := waitForActiveValidators(ctx, w, pChainClient, subnet); err != nil { + return err + } + + // It should now be safe to create chains for the subnet + if err := subnet.CreateChains(ctx, w, n.Nodes[0].URI); err != nil { + return err + } + + // Persist the chain configuration + if err := subnet.Write(n.getSubnetDir(), n.getChainConfigDir()); err != nil { + return err + } + if _, err := fmt.Fprintf(w, " wrote chain configuration for subnet %q\n", subnet.Name); err != nil { + return err + } + } + + return nil +} + +func (n *Network) GetURIForNodeID(nodeID ids.NodeID) (string, error) { + for _, node := range n.Nodes { + if node.NodeID == nodeID { + return node.URI, nil + } + } + return "", fmt.Errorf("%s is not known to the network", nodeID) +} + +func (n *Network) GetNodeURIs() []NodeURI { + return GetNodeURIs(n.Nodes) +} + +// Retrieves bootstrap IPs and IDs for all nodes except the skipped one (this supports +// collecting the bootstrap details for restarting a node). +func (n *Network) getBootstrapIPsAndIDs(skippedNode *Node) ([]string, []string, error) { + // Collect staking addresses of non-ephemeral nodes for use in bootstrapping a node + nodes, err := ReadNodes(n.Dir, false /* includeEphemeral */) + if err != nil { + return nil, nil, fmt.Errorf("failed to read network's nodes: %w", err) + } + var ( + bootstrapIPs = make([]string, 0, len(nodes)) + bootstrapIDs = make([]string, 0, len(nodes)) + ) + for _, node := range nodes { + if skippedNode != nil && node.NodeID == skippedNode.NodeID { + continue + } + + if len(node.StakingAddress) == 0 { + // Node is not running + continue + } + + bootstrapIPs = append(bootstrapIPs, node.StakingAddress) + bootstrapIDs = append(bootstrapIDs, node.NodeID.String()) + } + + return bootstrapIPs, bootstrapIDs, nil +} + +// Retrieves the default root dir for storing networks and their +// configuration. +func getDefaultRootDir() (string, error) { + homeDir, err := os.UserHomeDir() + if err != nil { + return "", err + } + return filepath.Join(homeDir, ".tmpnet", "networks"), nil +} + +// Finds the next available network ID by attempting to create a +// directory numbered from 1000 until creation succeeds. Returns the +// network id and the full path of the created directory. +func findNextNetworkID(rootDir string) (uint32, string, error) { + var ( + networkID uint32 = 1000 + dirPath string + ) + for { + _, reserved := constants.NetworkIDToNetworkName[networkID] + if reserved { + networkID++ + continue + } + + dirPath = filepath.Join(rootDir, strconv.FormatUint(uint64(networkID), 10)) + err := os.Mkdir(dirPath, perms.ReadWriteExecute) + if err == nil { + return networkID, dirPath, nil + } + + if !errors.Is(err, fs.ErrExist) { + return 0, "", fmt.Errorf("failed to create network directory: %w", err) + } + + // Directory already exists, keep iterating + networkID++ + } +} diff --git a/tests/fixture/tmpnet/network_config.go b/tests/fixture/tmpnet/network_config.go new file mode 100644 index 000000000000..c5bb603ed3e5 --- /dev/null +++ b/tests/fixture/tmpnet/network_config.go @@ -0,0 +1,236 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/perms" +) + +// The Network type is defined in this file (reading/writing configuration) and network.go +// (orchestration). + +var errMissingNetworkDir = errors.New("failed to write network: missing network directory") + +// Read network and node configuration from disk. +func (n *Network) Read() error { + if err := n.readNetwork(); err != nil { + return err + } + if err := n.readNodes(); err != nil { + return err + } + return n.readSubnets() +} + +// Write network configuration to disk. +func (n *Network) Write() error { + if len(n.Dir) == 0 { + return errMissingNetworkDir + } + if err := n.writeGenesis(); err != nil { + return err + } + if err := n.writeChainConfigs(); err != nil { + return err + } + if err := n.writeNetworkConfig(); err != nil { + return err + } + if err := n.writeEnvFile(); err != nil { + return err + } + return n.writeNodes() +} + +// Read network configuration from disk. +func (n *Network) readNetwork() error { + if err := n.readGenesis(); err != nil { + return err + } + if err := n.readChainConfigs(); err != nil { + return err + } + return n.readConfig() +} + +// Read the non-ephemeral nodes associated with the network from disk. +func (n *Network) readNodes() error { + nodes, err := ReadNodes(n.Dir, false /* includeEphemeral */) + if err != nil { + return err + } + n.Nodes = nodes + return nil +} + +func (n *Network) writeNodes() error { + for _, node := range n.Nodes { + if err := node.Write(); err != nil { + return err + } + } + return nil +} + +func (n *Network) getGenesisPath() string { + return filepath.Join(n.Dir, "genesis.json") +} + +func (n *Network) readGenesis() error { + bytes, err := os.ReadFile(n.getGenesisPath()) + if err != nil { + return fmt.Errorf("failed to read genesis: %w", err) + } + genesis := genesis.UnparsedConfig{} + if err := json.Unmarshal(bytes, &genesis); err != nil { + return fmt.Errorf("failed to unmarshal genesis: %w", err) + } + n.Genesis = &genesis + return nil +} + +func (n *Network) writeGenesis() error { + bytes, err := DefaultJSONMarshal(n.Genesis) + if err != nil { + return fmt.Errorf("failed to marshal genesis: %w", err) + } + if err := os.WriteFile(n.getGenesisPath(), bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write genesis: %w", err) + } + return nil +} + +func (n *Network) getChainConfigDir() string { + return filepath.Join(n.Dir, "chains") +} + +func (n *Network) readChainConfigs() error { + baseChainConfigDir := n.getChainConfigDir() + entries, err := os.ReadDir(baseChainConfigDir) + if err != nil { + return fmt.Errorf("failed to read chain config dir: %w", err) + } + + // Clear the map of data that may end up stale (e.g. if a given + // chain is in the map but no longer exists on disk) + n.ChainConfigs = map[string]FlagsMap{} + + for _, entry := range entries { + if !entry.IsDir() { + // Chain config files are expected to be nested under a + // directory with the name of the chain alias. + continue + } + chainAlias := entry.Name() + configPath := filepath.Join(baseChainConfigDir, chainAlias, defaultConfigFilename) + if _, err := os.Stat(configPath); os.IsNotExist(err) { + // No config file present + continue + } + chainConfig, err := ReadFlagsMap(configPath, fmt.Sprintf("%s chain config", chainAlias)) + if err != nil { + return err + } + n.ChainConfigs[chainAlias] = *chainConfig + } + + return nil +} + +func (n *Network) writeChainConfigs() error { + baseChainConfigDir := n.getChainConfigDir() + + for chainAlias, chainConfig := range n.ChainConfigs { + // Create the directory + chainConfigDir := filepath.Join(baseChainConfigDir, chainAlias) + if err := os.MkdirAll(chainConfigDir, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create %s chain config dir: %w", chainAlias, err) + } + + // Write the file + path := filepath.Join(chainConfigDir, defaultConfigFilename) + if err := chainConfig.Write(path, fmt.Sprintf("%s chain config", chainAlias)); err != nil { + return err + } + } + + // TODO(marun) Ensure the removal of chain aliases that aren't present in the map + + return nil +} + +func (n *Network) getConfigPath() string { + return filepath.Join(n.Dir, defaultConfigFilename) +} + +func (n *Network) readConfig() error { + bytes, err := os.ReadFile(n.getConfigPath()) + if err != nil { + return fmt.Errorf("failed to read network config: %w", err) + } + if err := json.Unmarshal(bytes, n); err != nil { + return fmt.Errorf("failed to unmarshal network config: %w", err) + } + return nil +} + +// The subset of network fields to store in the network config file. +type serializedNetworkConfig struct { + DefaultFlags FlagsMap + DefaultRuntimeConfig NodeRuntimeConfig + PreFundedKeys []*secp256k1.PrivateKey +} + +func (n *Network) writeNetworkConfig() error { + config := &serializedNetworkConfig{ + DefaultFlags: n.DefaultFlags, + DefaultRuntimeConfig: n.DefaultRuntimeConfig, + PreFundedKeys: n.PreFundedKeys, + } + bytes, err := DefaultJSONMarshal(config) + if err != nil { + return fmt.Errorf("failed to marshal network config: %w", err) + } + if err := os.WriteFile(n.getConfigPath(), bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write network config: %w", err) + } + return nil +} + +func (n *Network) EnvFilePath() string { + return filepath.Join(n.Dir, "network.env") +} + +func (n *Network) EnvFileContents() string { + return fmt.Sprintf("export %s=%s", NetworkDirEnvName, n.Dir) +} + +// Write an env file that sets the network dir env when sourced. +func (n *Network) writeEnvFile() error { + if err := os.WriteFile(n.EnvFilePath(), []byte(n.EnvFileContents()), perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write network env file: %w", err) + } + return nil +} + +func (n *Network) getSubnetDir() string { + return filepath.Join(n.Dir, defaultSubnetDirName) +} + +func (n *Network) readSubnets() error { + subnets, err := readSubnets(n.getSubnetDir()) + if err != nil { + return err + } + n.Subnets = subnets + return nil +} diff --git a/tests/fixture/testnet/local/network_test.go b/tests/fixture/tmpnet/network_test.go similarity index 50% rename from tests/fixture/testnet/local/network_test.go rename to tests/fixture/tmpnet/network_test.go index 8f7e66d37d5b..c04c497c2485 100644 --- a/tests/fixture/testnet/local/network_test.go +++ b/tests/fixture/tmpnet/network_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package local +package tmpnet import ( + "bytes" "testing" "github.com/stretchr/testify/require" @@ -14,13 +15,15 @@ func TestNetworkSerialization(t *testing.T) { tmpDir := t.TempDir() - network := &LocalNetwork{Dir: tmpDir} - require.NoError(network.PopulateLocalNetworkConfig(1337, 1, 1)) - require.NoError(network.WriteAll()) + network := &Network{} + require.NoError(network.EnsureDefaultConfig(&bytes.Buffer{}, "/path/to/avalanche/go", "", 1)) + require.NoError(network.Create(tmpDir)) + // Ensure node runtime is initialized + require.NoError(network.readNodes()) - loadedNetwork, err := ReadNetwork(tmpDir) + loadedNetwork, err := ReadNetwork(network.Dir) require.NoError(err) - for _, key := range loadedNetwork.FundedKeys { + for _, key := range loadedNetwork.PreFundedKeys { // Address() enables comparison with the original network by // ensuring full population of a key's in-memory representation. _ = key.Address() diff --git a/tests/fixture/tmpnet/node.go b/tests/fixture/tmpnet/node.go new file mode 100644 index 000000000000..ea21e0f27ff0 --- /dev/null +++ b/tests/fixture/tmpnet/node.go @@ -0,0 +1,340 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/spf13/cast" + + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" +) + +// The Node type is defined in this file (node.go - orchestration) and +// node_config.go (reading/writing configuration). + +const ( + defaultNodeTickerInterval = 50 * time.Millisecond +) + +var ( + errMissingTLSKeyForNodeID = fmt.Errorf("failed to ensure node ID: missing value for %q", config.StakingTLSKeyContentKey) + errMissingCertForNodeID = fmt.Errorf("failed to ensure node ID: missing value for %q", config.StakingCertContentKey) + errInvalidKeypair = fmt.Errorf("%q and %q must be provided together or not at all", config.StakingTLSKeyContentKey, config.StakingCertContentKey) +) + +// NodeRuntime defines the methods required to support running a node. +type NodeRuntime interface { + readState() error + Start(w io.Writer) error + InitiateStop() error + WaitForStopped(ctx context.Context) error + IsHealthy(ctx context.Context) (bool, error) +} + +// Configuration required to configure a node runtime. +type NodeRuntimeConfig struct { + AvalancheGoPath string +} + +// Node supports configuring and running a node participating in a temporary network. +type Node struct { + // Set by EnsureNodeID which is also called when the node is read. + NodeID ids.NodeID + + // Flags that will be supplied to the node at startup + Flags FlagsMap + + // An ephemeral node is not expected to be a persistent member of the network and + // should therefore not be used as for bootstrapping purposes. + IsEphemeral bool + + // The configuration used to initialize the node runtime. + RuntimeConfig *NodeRuntimeConfig + + // Runtime state, intended to be set by NodeRuntime + URI string + StakingAddress string + + // Initialized on demand + runtime NodeRuntime +} + +// Initializes a new node with only the data dir set +func NewNode(dataDir string) *Node { + return &Node{ + Flags: FlagsMap{ + config.DataDirKey: dataDir, + }, + } +} + +// Initializes the specified number of nodes. +func NewNodes(count int) []*Node { + nodes := make([]*Node, count) + for i := range nodes { + nodes[i] = NewNode("") + } + return nodes +} + +// Reads a node's configuration from the specified directory. +func ReadNode(dataDir string) (*Node, error) { + node := NewNode(dataDir) + return node, node.Read() +} + +// Reads nodes from the specified network directory. +func ReadNodes(networkDir string, includeEphemeral bool) ([]*Node, error) { + nodes := []*Node{} + + // Node configuration is stored in child directories + entries, err := os.ReadDir(networkDir) + if err != nil { + return nil, fmt.Errorf("failed to read dir: %w", err) + } + for _, entry := range entries { + if !entry.IsDir() { + continue + } + + nodeDir := filepath.Join(networkDir, entry.Name()) + node, err := ReadNode(nodeDir) + if errors.Is(err, os.ErrNotExist) { + // If no config file exists, assume this is not the path of a node + continue + } else if err != nil { + return nil, err + } + + if !includeEphemeral && node.IsEphemeral { + continue + } + + nodes = append(nodes, node) + } + + return nodes, nil +} + +// Retrieves the runtime for the node. +func (n *Node) getRuntime() NodeRuntime { + if n.runtime == nil { + n.runtime = &NodeProcess{ + node: n, + } + } + return n.runtime +} + +// Runtime methods + +func (n *Node) IsHealthy(ctx context.Context) (bool, error) { + return n.getRuntime().IsHealthy(ctx) +} + +func (n *Node) Start(w io.Writer) error { + return n.getRuntime().Start(w) +} + +func (n *Node) InitiateStop(ctx context.Context) error { + if err := n.SaveMetricsSnapshot(ctx); err != nil { + return err + } + return n.getRuntime().InitiateStop() +} + +func (n *Node) WaitForStopped(ctx context.Context) error { + return n.getRuntime().WaitForStopped(ctx) +} + +func (n *Node) readState() error { + return n.getRuntime().readState() +} + +func (n *Node) getDataDir() string { + return cast.ToString(n.Flags[config.DataDirKey]) +} + +// Writes the current state of the metrics endpoint to disk +func (n *Node) SaveMetricsSnapshot(ctx context.Context) error { + if len(n.URI) == 0 { + // No URI to request metrics from + return nil + } + uri := n.URI + "/ext/metrics" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + return n.writeMetricsSnapshot(body) +} + +// Initiates node shutdown and waits for the node to stop. +func (n *Node) Stop(ctx context.Context) error { + if err := n.InitiateStop(ctx); err != nil { + return err + } + return n.WaitForStopped(ctx) +} + +// Sets networking configuration for the node. +// Convenience method for setting networking flags. +func (n *Node) SetNetworkingConfig(bootstrapIDs []string, bootstrapIPs []string) { + var ( + // Use dynamic port allocation. + httpPort uint16 = 0 + stakingPort uint16 = 0 + ) + n.Flags[config.HTTPPortKey] = httpPort + n.Flags[config.StakingPortKey] = stakingPort + n.Flags[config.BootstrapIDsKey] = strings.Join(bootstrapIDs, ",") + n.Flags[config.BootstrapIPsKey] = strings.Join(bootstrapIPs, ",") +} + +// Ensures staking and signing keys are generated if not already present and +// that the node ID (derived from the staking keypair) is set. +func (n *Node) EnsureKeys() error { + if err := n.EnsureBLSSigningKey(); err != nil { + return err + } + if err := n.EnsureStakingKeypair(); err != nil { + return err + } + return n.EnsureNodeID() +} + +// Ensures a BLS signing key is generated if not already present. +func (n *Node) EnsureBLSSigningKey() error { + // Attempt to retrieve an existing key + existingKey, err := n.Flags.GetStringVal(config.StakingSignerKeyContentKey) + if err != nil { + return err + } + if len(existingKey) > 0 { + // Nothing to do + return nil + } + + // Generate a new signing key + newKey, err := bls.NewSecretKey() + if err != nil { + return fmt.Errorf("failed to generate staking signer key: %w", err) + } + n.Flags[config.StakingSignerKeyContentKey] = base64.StdEncoding.EncodeToString(bls.SerializeSecretKey(newKey)) + return nil +} + +// Ensures a staking keypair is generated if not already present. +func (n *Node) EnsureStakingKeypair() error { + keyKey := config.StakingTLSKeyContentKey + certKey := config.StakingCertContentKey + + key, err := n.Flags.GetStringVal(keyKey) + if err != nil { + return err + } + + cert, err := n.Flags.GetStringVal(certKey) + if err != nil { + return err + } + + if len(key) == 0 && len(cert) == 0 { + // Generate new keypair + tlsCertBytes, tlsKeyBytes, err := staking.NewCertAndKeyBytes() + if err != nil { + return fmt.Errorf("failed to generate staking keypair: %w", err) + } + n.Flags[keyKey] = base64.StdEncoding.EncodeToString(tlsKeyBytes) + n.Flags[certKey] = base64.StdEncoding.EncodeToString(tlsCertBytes) + } else if len(key) == 0 || len(cert) == 0 { + // Only one of key and cert was provided + return errInvalidKeypair + } + + return nil +} + +// Derives the nodes proof-of-possession. Requires the node to have a +// BLS signing key. +func (n *Node) GetProofOfPossession() (*signer.ProofOfPossession, error) { + signingKey, err := n.Flags.GetStringVal(config.StakingSignerKeyContentKey) + if err != nil { + return nil, err + } + signingKeyBytes, err := base64.StdEncoding.DecodeString(signingKey) + if err != nil { + return nil, err + } + secretKey, err := bls.SecretKeyFromBytes(signingKeyBytes) + if err != nil { + return nil, err + } + return signer.NewProofOfPossession(secretKey), nil +} + +// Derives the node ID. Requires that a tls keypair is present. +func (n *Node) EnsureNodeID() error { + keyKey := config.StakingTLSKeyContentKey + certKey := config.StakingCertContentKey + + key, err := n.Flags.GetStringVal(keyKey) + if err != nil { + return err + } + if len(key) == 0 { + return errMissingTLSKeyForNodeID + } + keyBytes, err := base64.StdEncoding.DecodeString(key) + if err != nil { + return fmt.Errorf("failed to ensure node ID: failed to base64 decode value for %q: %w", keyKey, err) + } + + cert, err := n.Flags.GetStringVal(certKey) + if err != nil { + return err + } + if len(cert) == 0 { + return errMissingCertForNodeID + } + certBytes, err := base64.StdEncoding.DecodeString(cert) + if err != nil { + return fmt.Errorf("failed to ensure node ID: failed to base64 decode value for %q: %w", certKey, err) + } + + tlsCert, err := staking.LoadTLSCertFromBytes(keyBytes, certBytes) + if err != nil { + return fmt.Errorf("failed to ensure node ID: failed to load tls cert: %w", err) + } + stakingCert, err := staking.CertificateFromX509(tlsCert.Leaf) + if err != nil { + return fmt.Errorf("failed to ensure node ID: failed to load tls cert: %w", err) + } + n.NodeID = stakingCert.NodeID + + return nil +} diff --git a/tests/fixture/tmpnet/node_config.go b/tests/fixture/tmpnet/node_config.go new file mode 100644 index 000000000000..3ebbc01b6c32 --- /dev/null +++ b/tests/fixture/tmpnet/node_config.go @@ -0,0 +1,114 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/ava-labs/avalanchego/utils/perms" +) + +// The Node type is defined in this file node_config.go +// (reading/writing configuration) and node.go (orchestration). + +func (n *Node) getFlagsPath() string { + return filepath.Join(n.getDataDir(), "flags.json") +} + +func (n *Node) readFlags() error { + bytes, err := os.ReadFile(n.getFlagsPath()) + if err != nil { + return fmt.Errorf("failed to read node flags: %w", err) + } + flags := FlagsMap{} + if err := json.Unmarshal(bytes, &flags); err != nil { + return fmt.Errorf("failed to unmarshal node flags: %w", err) + } + n.Flags = flags + return n.EnsureNodeID() +} + +func (n *Node) writeFlags() error { + bytes, err := DefaultJSONMarshal(n.Flags) + if err != nil { + return fmt.Errorf("failed to marshal node flags: %w", err) + } + if err := os.WriteFile(n.getFlagsPath(), bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write node flags: %w", err) + } + return nil +} + +func (n *Node) getConfigPath() string { + return filepath.Join(n.getDataDir(), defaultConfigFilename) +} + +func (n *Node) readConfig() error { + bytes, err := os.ReadFile(n.getConfigPath()) + if err != nil { + return fmt.Errorf("failed to read node config: %w", err) + } + if err := json.Unmarshal(bytes, n); err != nil { + return fmt.Errorf("failed to unmarshal node config: %w", err) + } + return nil +} + +type serializedNodeConfig struct { + IsEphemeral bool + RuntimeConfig *NodeRuntimeConfig +} + +func (n *Node) writeConfig() error { + config := serializedNodeConfig{ + IsEphemeral: n.IsEphemeral, + RuntimeConfig: n.RuntimeConfig, + } + bytes, err := DefaultJSONMarshal(config) + if err != nil { + return fmt.Errorf("failed to marshal node config: %w", err) + } + if err := os.WriteFile(n.getConfigPath(), bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write node config: %w", err) + } + return nil +} + +func (n *Node) Read() error { + if err := n.readFlags(); err != nil { + return err + } + if err := n.readConfig(); err != nil { + return err + } + return n.readState() +} + +func (n *Node) Write() error { + if err := os.MkdirAll(n.getDataDir(), perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create node dir: %w", err) + } + + if err := n.writeFlags(); err != nil { + return nil + } + return n.writeConfig() +} + +func (n *Node) writeMetricsSnapshot(data []byte) error { + metricsDir := filepath.Join(n.getDataDir(), "metrics") + if err := os.MkdirAll(metricsDir, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create metrics dir: %w", err) + } + // Create a compatible filesystem from the current timestamp + ts := time.Now().UTC().Format(time.RFC3339) + ts = strings.ReplaceAll(strings.ReplaceAll(ts, ":", ""), "-", "") + metricsPath := filepath.Join(metricsDir, ts) + return os.WriteFile(metricsPath, data, perms.ReadWrite) +} diff --git a/tests/fixture/tmpnet/node_process.go b/tests/fixture/tmpnet/node_process.go new file mode 100644 index 000000000000..b9fae2f9508c --- /dev/null +++ b/tests/fixture/tmpnet/node_process.go @@ -0,0 +1,258 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "net" + "os" + "os/exec" + "path/filepath" + "syscall" + "time" + + "github.com/ava-labs/avalanchego/api/health" + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/node" +) + +const ( + AvalancheGoPathEnvName = "CAMINOGO_BIN_PATH" + + defaultNodeInitTimeout = 10 * time.Second +) + +var errNodeAlreadyRunning = errors.New("failed to start node: node is already running") + +func checkNodeHealth(ctx context.Context, uri string) (bool, error) { + // Check that the node is reporting healthy + health, err := health.NewClient(uri).Health(ctx, nil) + if err == nil { + return health.Healthy, nil + } + + switch t := err.(type) { + case *net.OpError: + if t.Op == "read" { + // Connection refused - potentially recoverable + return false, nil + } + case syscall.Errno: + if t == syscall.ECONNREFUSED { + // Connection refused - potentially recoverable + return false, nil + } + } + // Assume all other errors are not recoverable + return false, fmt.Errorf("failed to query node health: %w", err) +} + +// Defines local-specific node configuration. Supports setting default +// and node-specific values. +type NodeProcess struct { + node *Node + + // PID of the node process + pid int +} + +func (p *NodeProcess) setProcessContext(processContext node.NodeProcessContext) { + p.pid = processContext.PID + p.node.URI = processContext.URI + p.node.StakingAddress = processContext.StakingAddress +} + +func (p *NodeProcess) readState() error { + path := p.getProcessContextPath() + if _, err := os.Stat(path); errors.Is(err, fs.ErrNotExist) { + // The absence of the process context file indicates the node is not running + p.setProcessContext(node.NodeProcessContext{}) + return nil + } + + bytes, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to read node process context: %w", err) + } + processContext := node.NodeProcessContext{} + if err := json.Unmarshal(bytes, &processContext); err != nil { + return fmt.Errorf("failed to unmarshal node process context: %w", err) + } + p.setProcessContext(processContext) + return nil +} + +// Start waits for the process context to be written which +// indicates that the node will be accepting connections on +// its staking port. The network will start faster with this +// synchronization due to the avoidance of exponential backoff +// if a node tries to connect to a beacon that is not ready. +func (p *NodeProcess) Start(w io.Writer) error { + // Avoid attempting to start an already running node. + proc, err := p.getProcess() + if err != nil { + return fmt.Errorf("failed to retrieve existing process: %w", err) + } + if proc != nil { + return errNodeAlreadyRunning + } + + // Ensure a stale process context file is removed so that the + // creation of a new file can indicate node start. + if err := os.Remove(p.getProcessContextPath()); err != nil && !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("failed to remove stale process context file: %w", err) + } + + cmd := exec.Command(p.node.RuntimeConfig.AvalancheGoPath, "--config-file", p.node.getFlagsPath()) // #nosec G204 + if err := cmd.Start(); err != nil { + return err + } + + // Determine appropriate level of node description detail + dataDir := p.node.getDataDir() + nodeDescription := fmt.Sprintf("node %q", p.node.NodeID) + if p.node.IsEphemeral { + nodeDescription = "ephemeral " + nodeDescription + } + nonDefaultNodeDir := filepath.Base(dataDir) != p.node.NodeID.String() + if nonDefaultNodeDir { + // Only include the data dir if its base is not the default (the node ID) + nodeDescription = fmt.Sprintf("%s with path: %s", nodeDescription, dataDir) + } + + go func() { + if err := cmd.Wait(); err != nil { + if err.Error() != "signal: killed" { + _, _ = fmt.Fprintf(w, "%s finished with error: %v\n", nodeDescription, err) + } + } + _, _ = fmt.Fprintf(w, "%s exited\n", nodeDescription) + }() + + // A node writes a process context file on start. If the file is not + // found in a reasonable amount of time, the node is unlikely to have + // started successfully. + if err := p.waitForProcessContext(context.Background()); err != nil { + return fmt.Errorf("failed to start local node: %w", err) + } + + _, err = fmt.Fprintf(w, "Started %s\n", nodeDescription) + return err +} + +// Signals the node process to stop. +func (p *NodeProcess) InitiateStop() error { + proc, err := p.getProcess() + if err != nil { + return fmt.Errorf("failed to retrieve process to stop: %w", err) + } + if proc == nil { + // Already stopped + return nil + } + if err := proc.Signal(syscall.SIGTERM); err != nil { + return fmt.Errorf("failed to send SIGTERM to pid %d: %w", p.pid, err) + } + return nil +} + +// Waits for the node process to stop. +func (p *NodeProcess) WaitForStopped(ctx context.Context) error { + ticker := time.NewTicker(defaultNodeTickerInterval) + defer ticker.Stop() + for { + proc, err := p.getProcess() + if err != nil { + return fmt.Errorf("failed to retrieve process: %w", err) + } + if proc == nil { + return nil + } + + select { + case <-ctx.Done(): + return fmt.Errorf("failed to see node process stop %q before timeout: %w", p.node.NodeID, ctx.Err()) + case <-ticker.C: + } + } +} + +func (p *NodeProcess) IsHealthy(ctx context.Context) (bool, error) { + // Check that the node process is running as a precondition for + // checking health. getProcess will also ensure that the node's + // API URI is current. + proc, err := p.getProcess() + if err != nil { + return false, fmt.Errorf("failed to determine process status: %w", err) + } + if proc == nil { + return false, ErrNotRunning + } + + return checkNodeHealth(ctx, p.node.URI) +} + +func (p *NodeProcess) getProcessContextPath() string { + return filepath.Join(p.node.getDataDir(), config.DefaultProcessContextFilename) +} + +func (p *NodeProcess) waitForProcessContext(ctx context.Context) error { + ticker := time.NewTicker(defaultNodeTickerInterval) + defer ticker.Stop() + + ctx, cancel := context.WithTimeout(ctx, defaultNodeInitTimeout) + defer cancel() + for len(p.node.URI) == 0 { + err := p.readState() + if err != nil { + return fmt.Errorf("failed to read process context for node %q: %w", p.node.NodeID, err) + } + + select { + case <-ctx.Done(): + return fmt.Errorf("failed to load process context for node %q before timeout: %w", p.node.NodeID, ctx.Err()) + case <-ticker.C: + } + } + return nil +} + +// Retrieve the node process if it is running. As part of determining +// process liveness, the node's process context will be refreshed if +// live or cleared if not running. +func (p *NodeProcess) getProcess() (*os.Process, error) { + // Read the process context to ensure freshness. The node may have + // stopped or been restarted since last read. + if err := p.readState(); err != nil { + return nil, fmt.Errorf("failed to read process context: %w", err) + } + + if p.pid == 0 { + // Process is not running + return nil, nil + } + + proc, err := os.FindProcess(p.pid) + if err != nil { + return nil, fmt.Errorf("failed to find process: %w", err) + } + + // Sending 0 will not actually send a signal but will perform + // error checking. + err = proc.Signal(syscall.Signal(0)) + if err == nil { + // Process is running + return proc, nil + } + if errors.Is(err, os.ErrProcessDone) { + // Process is not running + return nil, nil + } + return nil, fmt.Errorf("failed to determine process status: %w", err) +} diff --git a/tests/fixture/tmpnet/subnet.go b/tests/fixture/tmpnet/subnet.go new file mode 100644 index 000000000000..1cfa0dc12aa0 --- /dev/null +++ b/tests/fixture/tmpnet/subnet.go @@ -0,0 +1,333 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/perms" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" +) + +const defaultSubnetDirName = "subnets" + +type Chain struct { + // Set statically + VMID ids.ID + Config string + Genesis []byte + + // Set at runtime + ChainID ids.ID + PreFundedKey *secp256k1.PrivateKey +} + +// Write the chain configuration to the specified directory. +func (c *Chain) WriteConfig(chainDir string) error { + if len(c.Config) == 0 { + return nil + } + + chainConfigDir := filepath.Join(chainDir, c.ChainID.String()) + if err := os.MkdirAll(chainConfigDir, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create chain config dir: %w", err) + } + + path := filepath.Join(chainConfigDir, defaultConfigFilename) + if err := os.WriteFile(path, []byte(c.Config), perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write chain config: %w", err) + } + + return nil +} + +type Subnet struct { + // A unique string that can be used to refer to the subnet across different temporary + // networks (since the SubnetID will be different every time the subnet is created) + Name string + + // The ID of the transaction that created the subnet + SubnetID ids.ID + + // The private key that owns the subnet + OwningKey *secp256k1.PrivateKey + + // IDs of the nodes responsible for validating the subnet + ValidatorIDs []ids.NodeID + + Chains []*Chain +} + +// Retrieves a wallet configured for use with the subnet +func (s *Subnet) GetWallet(ctx context.Context, uri string) (primary.Wallet, error) { + keychain := secp256k1fx.NewKeychain(s.OwningKey) + + // Only fetch the subnet transaction if a subnet ID is present. This won't be true when + // the wallet is first used to create the subnet. + txIDs := set.Set[ids.ID]{} + if s.SubnetID != ids.Empty { + txIDs.Add(s.SubnetID) + } + + return primary.MakeWallet(ctx, &primary.WalletConfig{ + URI: uri, + AVAXKeychain: keychain, + EthKeychain: keychain, + PChainTxsToFetch: txIDs, + }) +} + +// Issues the subnet creation transaction and retains the result. The URI of a node is +// required to issue the transaction. +func (s *Subnet) Create(ctx context.Context, uri string) error { + wallet, err := s.GetWallet(ctx, uri) + if err != nil { + return err + } + pWallet := wallet.P() + + subnetTx, err := pWallet.IssueCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + s.OwningKey.Address(), + }, + }, + common.WithContext(ctx), + ) + if err != nil { + return fmt.Errorf("failed to create subnet %s: %w", s.Name, err) + } + s.SubnetID = subnetTx.ID() + + return nil +} + +func (s *Subnet) CreateChains(ctx context.Context, w io.Writer, uri string) error { + wallet, err := s.GetWallet(ctx, uri) + if err != nil { + return err + } + pWallet := wallet.P() + + if _, err := fmt.Fprintf(w, "Creating chains for subnet %q\n", s.Name); err != nil { + return err + } + + for _, chain := range s.Chains { + createChainTx, err := pWallet.IssueCreateChainTx( + s.SubnetID, + chain.Genesis, + chain.VMID, + nil, + "", + common.WithContext(ctx), + ) + if err != nil { + return fmt.Errorf("failed to create chain: %w", err) + } + chain.ChainID = createChainTx.ID() + + if _, err := fmt.Fprintf(w, " created chain %q for VM %q on subnet %q\n", chain.ChainID, chain.VMID, s.Name); err != nil { + return err + } + } + return nil +} + +// Add validators to the subnet +func (s *Subnet) AddValidators(ctx context.Context, w io.Writer, nodes []*Node) error { + apiURI := nodes[0].URI + + wallet, err := s.GetWallet(ctx, apiURI) + if err != nil { + return err + } + pWallet := wallet.P() + + // Collect the end times for current validators to reuse for subnet validators + pvmClient := platformvm.NewClient(apiURI) + validators, err := pvmClient.GetCurrentValidators(ctx, constants.PrimaryNetworkID, nil) + if err != nil { + return err + } + endTimes := make(map[ids.NodeID]uint64) + for _, validator := range validators { + endTimes[validator.NodeID] = validator.EndTime + } + + startTime := time.Now().Add(DefaultValidatorStartTimeDiff) + for _, node := range nodes { + endTime, ok := endTimes[node.NodeID] + if !ok { + return fmt.Errorf("failed to find end time for %s", node.NodeID) + } + + _, err := pWallet.IssueAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: node.NodeID, + Start: uint64(startTime.Unix()), + End: endTime, + Wght: units.Schmeckle, + }, + Subnet: s.SubnetID, + }, + common.WithContext(ctx), + ) + if err != nil { + return err + } + + if _, err := fmt.Fprintf(w, " added %s as validator for subnet `%s`\n", node.NodeID, s.Name); err != nil { + return err + } + + s.ValidatorIDs = append(s.ValidatorIDs, node.NodeID) + } + + return nil +} + +// Write the subnet configuration to disk +func (s *Subnet) Write(subnetDir string, chainDir string) error { + if err := os.MkdirAll(subnetDir, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create subnet dir: %w", err) + } + path := filepath.Join(subnetDir, s.Name+".json") + + // Since subnets are expected to be serialized for the first time + // without their chains having been created (i.e. chains will have + // empty IDs), use the absence of chain IDs as a prompt for a + // subnet name uniquness check. + if len(s.Chains) > 0 && s.Chains[0].ChainID == ids.Empty { + _, err := os.Stat(path) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + return fmt.Errorf("a subnet with name %s already exists", s.Name) + } + } + + bytes, err := DefaultJSONMarshal(s) + if err != nil { + return fmt.Errorf("failed to marshal subnet %s: %w", s.Name, err) + } + if err := os.WriteFile(path, bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write subnet %s: %w", s.Name, err) + } + + for _, chain := range s.Chains { + if err := chain.WriteConfig(chainDir); err != nil { + return err + } + } + + return nil +} + +func waitForActiveValidators( + ctx context.Context, + w io.Writer, + pChainClient platformvm.Client, + subnet *Subnet, +) error { + ticker := time.NewTicker(DefaultPollingInterval) + defer ticker.Stop() + + if _, err := fmt.Fprintf(w, "Waiting for validators of subnet %q to become active\n", subnet.Name); err != nil { + return err + } + + if _, err := fmt.Fprintf(w, " "); err != nil { + return err + } + + for { + if _, err := fmt.Fprintf(w, "."); err != nil { + return err + } + validators, err := pChainClient.GetCurrentValidators(ctx, subnet.SubnetID, nil) + if err != nil { + return err + } + validatorSet := set.NewSet[ids.NodeID](len(validators)) + for _, validator := range validators { + validatorSet.Add(validator.NodeID) + } + allActive := true + for _, validatorID := range subnet.ValidatorIDs { + if !validatorSet.Contains(validatorID) { + allActive = false + } + } + if allActive { + if _, err := fmt.Fprintf(w, "\n saw the expected active validators of subnet %q\n", subnet.Name); err != nil { + return err + } + return nil + } + + select { + case <-ctx.Done(): + return fmt.Errorf("failed to see the expected active validators of subnet %q before timeout", subnet.Name) + case <-ticker.C: + } + } +} + +// Reads subnets from [network dir]/subnets/[subnet name].json +func readSubnets(subnetDir string) ([]*Subnet, error) { + if _, err := os.Stat(subnetDir); os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, err + } + + entries, err := os.ReadDir(subnetDir) + if err != nil { + return nil, fmt.Errorf("failed to read subnet dir: %w", err) + } + + subnets := []*Subnet{} + for _, entry := range entries { + if entry.IsDir() { + // Looking only for files + continue + } + if filepath.Ext(entry.Name()) != ".json" { + // Subnet files should have a .json extension + continue + } + + subnetPath := filepath.Join(subnetDir, entry.Name()) + bytes, err := os.ReadFile(subnetPath) + if err != nil { + return nil, fmt.Errorf("failed to read subnet file %s: %w", subnetPath, err) + } + subnet := &Subnet{} + if err := json.Unmarshal(bytes, subnet); err != nil { + return nil, fmt.Errorf("failed to unmarshal subnet from %s: %w", subnetPath, err) + } + subnets = append(subnets, subnet) + } + + return subnets, nil +} diff --git a/tests/fixture/tmpnet/utils.go b/tests/fixture/tmpnet/utils.go new file mode 100644 index 000000000000..b363bdec8671 --- /dev/null +++ b/tests/fixture/tmpnet/utils.go @@ -0,0 +1,89 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" +) + +const ( + DefaultNodeTickerInterval = 50 * time.Millisecond +) + +var ErrNotRunning = errors.New("not running") + +// WaitForHealthy blocks until Node.IsHealthy returns true or an error (including context timeout) is observed. +func WaitForHealthy(ctx context.Context, node *Node) error { + if _, ok := ctx.Deadline(); !ok { + return fmt.Errorf("unable to wait for health for node %q with a context without a deadline", node.NodeID) + } + ticker := time.NewTicker(DefaultNodeTickerInterval) + defer ticker.Stop() + + for { + healthy, err := node.IsHealthy(ctx) + if err != nil && !errors.Is(err, ErrNotRunning) { + return fmt.Errorf("failed to wait for health of node %q: %w", node.NodeID, err) + } + if healthy { + return nil + } + + select { + case <-ctx.Done(): + return fmt.Errorf("failed to wait for health of node %q before timeout: %w", node.NodeID, ctx.Err()) + case <-ticker.C: + } + } +} + +// NodeURI associates a node ID with its API URI. +type NodeURI struct { + NodeID ids.NodeID + URI string +} + +func GetNodeURIs(nodes []*Node) []NodeURI { + uris := make([]NodeURI, 0, len(nodes)) + for _, node := range nodes { + if node.IsEphemeral { + // Avoid returning URIs for nodes whose lifespan is indeterminate + continue + } + // Only append URIs that are not empty. A node may have an + // empty URI if it is not currently running. + if len(node.URI) > 0 { + uris = append(uris, NodeURI{ + NodeID: node.NodeID, + URI: node.URI, + }) + } + } + return uris +} + +// Marshal to json with default prefix and indent. +func DefaultJSONMarshal(v interface{}) ([]byte, error) { + return json.MarshalIndent(v, "", " ") +} + +// Helper simplifying creation of a set of private keys +func NewPrivateKeys(keyCount int) ([]*secp256k1.PrivateKey, error) { + keys := make([]*secp256k1.PrivateKey, 0, keyCount) + for i := 0; i < keyCount; i++ { + key, err := secp256k1.NewPrivateKey() + if err != nil { + return nil, fmt.Errorf("failed to generate private key: %w", err) + } + keys = append(keys, key) + } + return keys, nil +} diff --git a/tests/http.go b/tests/http.go index 77c309f16f86..073b6d2df126 100644 --- a/tests/http.go +++ b/tests/http.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tests diff --git a/tests/upgrade/upgrade_test.go b/tests/upgrade/upgrade_test.go index 0237f1e998d2..c117cd51c21a 100644 --- a/tests/upgrade/upgrade_test.go +++ b/tests/upgrade/upgrade_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package upgrade @@ -6,7 +6,6 @@ package upgrade import ( "flag" "fmt" - "strings" "testing" "github.com/onsi/ginkgo/v2" @@ -15,8 +14,8 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/config" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" ) func TestUpgrade(t *testing.T) { @@ -48,30 +47,19 @@ var _ = ginkgo.Describe("[Upgrade]", func() { require := require.New(ginkgo.GinkgoT()) ginkgo.It("can upgrade versions", func() { - // TODO(marun) How many nodes should the target network have to best validate upgrade? - network := e2e.StartLocalNetwork(avalancheGoExecPath, e2e.DefaultNetworkDir) + network := &tmpnet.Network{} + e2e.StartNetwork(network, e2e.DefaultNetworkDir, avalancheGoExecPath, "" /* pluginDir */) ginkgo.By(fmt.Sprintf("restarting all nodes with %q binary", avalancheGoExecPathToUpgradeTo)) for _, node := range network.Nodes { - ginkgo.By(fmt.Sprintf("restarting node %q with %q binary", node.GetID(), avalancheGoExecPathToUpgradeTo)) - require.NoError(node.Stop()) - - // A node must start with sufficient bootstrap nodes to represent a quorum. Since the node's current - // bootstrap configuration may not satisfy this requirement (i.e. if on network start the node was one of - // the first validators), updating the node to bootstrap from all running validators maximizes the - // chances of a successful start. - // - // TODO(marun) Refactor node start to do this automatically - bootstrapIPs, bootstrapIDs, err := network.GetBootstrapIPsAndIDs() - require.NoError(err) - require.NotEmpty(bootstrapIDs) - node.Flags[config.BootstrapIDsKey] = strings.Join(bootstrapIDs, ",") - node.Flags[config.BootstrapIPsKey] = strings.Join(bootstrapIPs, ",") - require.NoError(node.WriteConfig()) - - require.NoError(node.Start(ginkgo.GinkgoWriter, avalancheGoExecPath)) - - ginkgo.By(fmt.Sprintf("waiting for node %q to report healthy after restart", node.GetID())) + ginkgo.By(fmt.Sprintf("restarting node %q with %q binary", node.NodeID, avalancheGoExecPathToUpgradeTo)) + require.NoError(node.Stop(e2e.DefaultContext())) + + node.RuntimeConfig.AvalancheGoPath = avalancheGoExecPathToUpgradeTo + + require.NoError(network.StartNode(e2e.DefaultContext(), ginkgo.GinkgoWriter, node)) + + ginkgo.By(fmt.Sprintf("waiting for node %q to report healthy after restart", node.NodeID)) e2e.WaitForHealthy(node) } diff --git a/tools/camino-network-runner b/tools/camino-network-runner deleted file mode 160000 index 07d845d68b74..000000000000 --- a/tools/camino-network-runner +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 07d845d68b74d7abe99f7fd49ce0681071142fd7 diff --git a/tools/cert/main.go b/tools/cert/main.go index c0bb1896786a..76a4e7465ac8 100644 --- a/tools/cert/main.go +++ b/tools/cert/main.go @@ -13,7 +13,6 @@ import ( "github.com/decred/dcrd/dcrec/secp256k1/v4" - "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils/cb58" utilsSecp256k1 "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" @@ -72,7 +71,7 @@ func main() { os.Exit(1) } - id, err := peer.CertToID(cert.Leaf) + id, err := staking.TLSCertToID(cert.Leaf) if err != nil { fmt.Printf("cannot extract nodeID from certificate: %s\n", err) os.Exit(1) diff --git a/trace/exporter.go b/trace/exporter.go index 252200975caf..4cca5fe3e53e 100644 --- a/trace/exporter.go +++ b/trace/exporter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package trace diff --git a/trace/exporter_type.go b/trace/exporter_type.go index 52d0124fe2b7..206731acc3bd 100644 --- a/trace/exporter_type.go +++ b/trace/exporter_type.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package trace diff --git a/trace/noop.go b/trace/noop.go index faa512b3429e..153934b143af 100644 --- a/trace/noop.go +++ b/trace/noop.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package trace diff --git a/trace/tracer.go b/trace/tracer.go index c511ff3bb0c9..1c8d40e8347f 100644 --- a/trace/tracer.go +++ b/trace/tracer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package trace diff --git a/utils/atomic.go b/utils/atomic.go index 2d75a4f47d6f..3bb125ee8af6 100644 --- a/utils/atomic.go +++ b/utils/atomic.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils diff --git a/utils/atomic_test.go b/utils/atomic_test.go index 1af2ba490f2f..3fa74063c18a 100644 --- a/utils/atomic_test.go +++ b/utils/atomic_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils diff --git a/utils/bag/bag.go b/utils/bag/bag.go index 496969a01b17..a9af1acbcf49 100644 --- a/utils/bag/bag.go +++ b/utils/bag/bag.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bag diff --git a/utils/bag/bag_benchmark_test.go b/utils/bag/bag_benchmark_test.go index 833ce755af93..e17b27b891bc 100644 --- a/utils/bag/bag_benchmark_test.go +++ b/utils/bag/bag_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bag diff --git a/utils/bag/bag_test.go b/utils/bag/bag_test.go index 1a42486560a6..3b6e0faa07f0 100644 --- a/utils/bag/bag_test.go +++ b/utils/bag/bag_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bag @@ -201,8 +201,8 @@ func TestBagString(t *testing.T) { bag.AddCount(elt0, 1337) - expected := "Bag[int]: (Size = 1337)\n" + - " 123: 1337" + expected := `Bag[int]: (Size = 1337) + 123: 1337` require.Equal(t, expected, bag.String()) } diff --git a/utils/bag/unique_bag.go b/utils/bag/unique_bag.go index 751159f16d9b..f5d679a5b816 100644 --- a/utils/bag/unique_bag.go +++ b/utils/bag/unique_bag.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bag diff --git a/utils/bag/unique_bag_test.go b/utils/bag/unique_bag_test.go index d15ecbf3a5cf..1562b5c9f04c 100644 --- a/utils/bag/unique_bag_test.go +++ b/utils/bag/unique_bag_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bag diff --git a/utils/beacon/beacon.go b/utils/beacon/beacon.go index 47e41032677e..38ac6df5b0f5 100644 --- a/utils/beacon/beacon.go +++ b/utils/beacon/beacon.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package beacon diff --git a/utils/beacon/set.go b/utils/beacon/set.go index 243f8399a915..8b6970b55421 100644 --- a/utils/beacon/set.go +++ b/utils/beacon/set.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package beacon diff --git a/utils/beacon/set_test.go b/utils/beacon/set_test.go index 2dc240404988..976d0582e3ff 100644 --- a/utils/beacon/set_test.go +++ b/utils/beacon/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package beacon @@ -16,9 +16,9 @@ import ( func TestSet(t *testing.T) { require := require.New(t) - id0 := ids.NodeID{0} - id1 := ids.NodeID{1} - id2 := ids.NodeID{2} + id0 := ids.BuildTestNodeID([]byte{0}) + id1 := ids.BuildTestNodeID([]byte{1}) + id2 := ids.BuildTestNodeID([]byte{2}) ip0 := ips.IPPort{ IP: net.IPv4zero, diff --git a/utils/bimap/bimap.go b/utils/bimap/bimap.go new file mode 100644 index 000000000000..d0651ff36cd2 --- /dev/null +++ b/utils/bimap/bimap.go @@ -0,0 +1,141 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bimap + +import ( + "bytes" + "encoding/json" + "errors" + + "github.com/ava-labs/avalanchego/utils" +) + +var ( + _ json.Marshaler = (*BiMap[int, int])(nil) + _ json.Unmarshaler = (*BiMap[int, int])(nil) + + nullBytes = []byte("null") + errNotBijective = errors.New("map not bijective") +) + +type Entry[K, V any] struct { + Key K + Value V +} + +// BiMap is a bi-directional map. +type BiMap[K, V comparable] struct { + keyToValue map[K]V + valueToKey map[V]K +} + +// New creates a new empty bimap. +func New[K, V comparable]() *BiMap[K, V] { + return &BiMap[K, V]{ + keyToValue: make(map[K]V), + valueToKey: make(map[V]K), + } +} + +// Put the key value pair into the map. If either [key] or [val] was previously +// in the map, the previous entries will be removed and returned. +// +// Note: Unlike normal maps, it's possible that Put removes 0, 1, or 2 existing +// entries to ensure that mappings are one-to-one. +func (m *BiMap[K, V]) Put(key K, val V) []Entry[K, V] { + var removed []Entry[K, V] + oldVal, oldValDeleted := m.DeleteKey(key) + if oldValDeleted { + removed = append(removed, Entry[K, V]{ + Key: key, + Value: oldVal, + }) + } + oldKey, oldKeyDeleted := m.DeleteValue(val) + if oldKeyDeleted { + removed = append(removed, Entry[K, V]{ + Key: oldKey, + Value: val, + }) + } + m.keyToValue[key] = val + m.valueToKey[val] = key + return removed +} + +// GetKey that maps to the provided value. +func (m *BiMap[K, V]) GetKey(val V) (K, bool) { + key, ok := m.valueToKey[val] + return key, ok +} + +// GetValue that is mapped to the provided key. +func (m *BiMap[K, V]) GetValue(key K) (V, bool) { + val, ok := m.keyToValue[key] + return val, ok +} + +// HasKey returns true if [key] is in the map. +func (m *BiMap[K, _]) HasKey(key K) bool { + _, ok := m.keyToValue[key] + return ok +} + +// HasValue returns true if [val] is in the map. +func (m *BiMap[_, V]) HasValue(val V) bool { + _, ok := m.valueToKey[val] + return ok +} + +// DeleteKey removes [key] from the map and returns the value it mapped to. +func (m *BiMap[K, V]) DeleteKey(key K) (V, bool) { + val, ok := m.keyToValue[key] + if !ok { + return utils.Zero[V](), false + } + delete(m.keyToValue, key) + delete(m.valueToKey, val) + return val, true +} + +// DeleteValue removes [val] from the map and returns the key that mapped to it. +func (m *BiMap[K, V]) DeleteValue(val V) (K, bool) { + key, ok := m.valueToKey[val] + if !ok { + return utils.Zero[K](), false + } + delete(m.keyToValue, key) + delete(m.valueToKey, val) + return key, true +} + +// Len return the number of entries in this map. +func (m *BiMap[K, V]) Len() int { + return len(m.keyToValue) +} + +func (m *BiMap[K, V]) MarshalJSON() ([]byte, error) { + return json.Marshal(m.keyToValue) +} + +func (m *BiMap[K, V]) UnmarshalJSON(b []byte) error { + if bytes.Equal(b, nullBytes) { + return nil + } + var keyToValue map[K]V + if err := json.Unmarshal(b, &keyToValue); err != nil { + return err + } + valueToKey := make(map[V]K, len(keyToValue)) + for k, v := range keyToValue { + valueToKey[v] = k + } + if len(keyToValue) != len(valueToKey) { + return errNotBijective + } + + m.keyToValue = keyToValue + m.valueToKey = valueToKey + return nil +} diff --git a/utils/bimap/bimap_test.go b/utils/bimap/bimap_test.go new file mode 100644 index 000000000000..9b4433a51c70 --- /dev/null +++ b/utils/bimap/bimap_test.go @@ -0,0 +1,356 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bimap + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBiMapPut(t *testing.T) { + tests := []struct { + name string + state *BiMap[int, int] + key int + value int + expectedRemoved []Entry[int, int] + expectedState *BiMap[int, int] + }{ + { + name: "none removed", + state: New[int, int](), + key: 1, + value: 2, + expectedRemoved: nil, + expectedState: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 2, + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + }, + { + name: "key removed", + state: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 2, + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + key: 1, + value: 3, + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Value: 2, + }, + }, + expectedState: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 3, + }, + valueToKey: map[int]int{ + 3: 1, + }, + }, + }, + { + name: "value removed", + state: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 2, + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + key: 3, + value: 2, + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Value: 2, + }, + }, + expectedState: &BiMap[int, int]{ + keyToValue: map[int]int{ + 3: 2, + }, + valueToKey: map[int]int{ + 2: 3, + }, + }, + }, + { + name: "key and value removed", + state: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 2, + 3: 4, + }, + valueToKey: map[int]int{ + 2: 1, + 4: 3, + }, + }, + key: 1, + value: 4, + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Value: 2, + }, + { + Key: 3, + Value: 4, + }, + }, + expectedState: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 4, + }, + valueToKey: map[int]int{ + 4: 1, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + removed := test.state.Put(test.key, test.value) + require.Equal(test.expectedRemoved, removed) + require.Equal(test.expectedState, test.state) + }) + } +} + +func TestBiMapHasValueAndGetKey(t *testing.T) { + m := New[int, int]() + require.Empty(t, m.Put(1, 2)) + + tests := []struct { + name string + value int + expectedKey int + expectedExists bool + }{ + { + name: "fetch unknown", + value: 3, + expectedKey: 0, + expectedExists: false, + }, + { + name: "fetch known value", + value: 2, + expectedKey: 1, + expectedExists: true, + }, + { + name: "fetch known key", + value: 1, + expectedKey: 0, + expectedExists: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + exists := m.HasValue(test.value) + require.Equal(test.expectedExists, exists) + + key, exists := m.GetKey(test.value) + require.Equal(test.expectedKey, key) + require.Equal(test.expectedExists, exists) + }) + } +} + +func TestBiMapHasKeyAndGetValue(t *testing.T) { + m := New[int, int]() + require.Empty(t, m.Put(1, 2)) + + tests := []struct { + name string + key int + expectedValue int + expectedExists bool + }{ + { + name: "fetch unknown", + key: 3, + expectedValue: 0, + expectedExists: false, + }, + { + name: "fetch known key", + key: 1, + expectedValue: 2, + expectedExists: true, + }, + { + name: "fetch known value", + key: 2, + expectedValue: 0, + expectedExists: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + exists := m.HasKey(test.key) + require.Equal(test.expectedExists, exists) + + value, exists := m.GetValue(test.key) + require.Equal(test.expectedValue, value) + require.Equal(test.expectedExists, exists) + }) + } +} + +func TestBiMapDeleteKey(t *testing.T) { + tests := []struct { + name string + state *BiMap[int, int] + key int + expectedValue int + expectedRemoved bool + expectedState *BiMap[int, int] + }{ + { + name: "none removed", + state: New[int, int](), + key: 1, + expectedValue: 0, + expectedRemoved: false, + expectedState: New[int, int](), + }, + { + name: "key removed", + state: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 2, + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + key: 1, + expectedValue: 2, + expectedRemoved: true, + expectedState: New[int, int](), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + value, removed := test.state.DeleteKey(test.key) + require.Equal(test.expectedValue, value) + require.Equal(test.expectedRemoved, removed) + require.Equal(test.expectedState, test.state) + }) + } +} + +func TestBiMapDeleteValue(t *testing.T) { + tests := []struct { + name string + state *BiMap[int, int] + value int + expectedKey int + expectedRemoved bool + expectedState *BiMap[int, int] + }{ + { + name: "none removed", + state: New[int, int](), + value: 1, + expectedKey: 0, + expectedRemoved: false, + expectedState: New[int, int](), + }, + { + name: "key removed", + state: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 2, + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + value: 2, + expectedKey: 1, + expectedRemoved: true, + expectedState: New[int, int](), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + key, removed := test.state.DeleteValue(test.value) + require.Equal(test.expectedKey, key) + require.Equal(test.expectedRemoved, removed) + require.Equal(test.expectedState, test.state) + }) + } +} + +func TestBiMapLen(t *testing.T) { + require := require.New(t) + + m := New[int, int]() + require.Zero(m.Len()) + + m.Put(1, 2) + require.Equal(1, m.Len()) + + m.Put(2, 3) + require.Equal(2, m.Len()) + + m.Put(1, 3) + require.Equal(1, m.Len()) + + m.DeleteKey(1) + require.Zero(m.Len()) +} + +func TestBiMapJSON(t *testing.T) { + require := require.New(t) + + expectedMap := New[int, int]() + expectedMap.Put(1, 2) + expectedMap.Put(2, 3) + + jsonBytes, err := json.Marshal(expectedMap) + require.NoError(err) + + expectedJSONBytes := []byte(`{"1":2,"2":3}`) + require.Equal(expectedJSONBytes, jsonBytes) + + var unmarshalledMap BiMap[int, int] + require.NoError(json.Unmarshal(jsonBytes, &unmarshalledMap)) + require.Equal(expectedMap, &unmarshalledMap) +} + +func TestBiMapInvalidJSON(t *testing.T) { + require := require.New(t) + + invalidJSONBytes := []byte(`{"1":2,"2":2}`) + var unmarshalledMap BiMap[int, int] + err := json.Unmarshal(invalidJSONBytes, &unmarshalledMap) + require.ErrorIs(err, errNotBijective) +} diff --git a/utils/bloom/bloom_filter.go b/utils/bloom/bloom_filter.go deleted file mode 100644 index 498c57d3552f..000000000000 --- a/utils/bloom/bloom_filter.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package bloom - -import ( - "errors" - "sync" - - "github.com/spaolacci/murmur3" - - streakKnife "github.com/holiman/bloomfilter/v2" -) - -var errMaxBytes = errors.New("too large") - -type Filter interface { - // Add adds to filter, assumed thread safe - Add(...[]byte) - - // Check checks filter, assumed thread safe - Check([]byte) bool -} - -func New(maxN uint64, p float64, maxBytes uint64) (Filter, error) { - neededBytes := bytesSteakKnifeFilter(maxN, p) - if neededBytes > maxBytes { - return nil, errMaxBytes - } - return newSteakKnifeFilter(maxN, p) -} - -type steakKnifeFilter struct { - lock sync.RWMutex - filter *streakKnife.Filter -} - -func bytesSteakKnifeFilter(maxN uint64, p float64) uint64 { - m := streakKnife.OptimalM(maxN, p) - k := streakKnife.OptimalK(m, maxN) - - // This is pulled from bloomFilter.newBits and bloomfilter.newRandKeys. The - // calculation is the size of the bitset which would be created from this - // filter. - mSize := (m + 63) / 64 - totalSize := mSize + k - - return totalSize * 8 // 8 == sizeof(uint64)) -} - -func newSteakKnifeFilter(maxN uint64, p float64) (Filter, error) { - m := streakKnife.OptimalM(maxN, p) - k := streakKnife.OptimalK(m, maxN) - - filter, err := streakKnife.New(m, k) - return &steakKnifeFilter{filter: filter}, err -} - -func (f *steakKnifeFilter) Add(bl ...[]byte) { - f.lock.Lock() - defer f.lock.Unlock() - - for _, b := range bl { - h := murmur3.New64() - _, _ = h.Write(b) - f.filter.Add(h) - } -} - -func (f *steakKnifeFilter) Check(b []byte) bool { - f.lock.RLock() - defer f.lock.RUnlock() - - h := murmur3.New64() - _, _ = h.Write(b) - return f.filter.Contains(h) -} diff --git a/utils/bloom/filter.go b/utils/bloom/filter.go new file mode 100644 index 000000000000..7a0e3026087e --- /dev/null +++ b/utils/bloom/filter.go @@ -0,0 +1,147 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "math/bits" + "sync" +) + +const ( + minHashes = 1 + maxHashes = 16 // Supports a false positive probability of 2^-16 when using optimal size values + minEntries = 1 + + bitsPerByte = 8 + bytesPerUint64 = 8 + hashRotation = 17 +) + +var ( + errInvalidNumHashes = errors.New("invalid num hashes") + errTooFewHashes = errors.New("too few hashes") + errTooManyHashes = errors.New("too many hashes") + errTooFewEntries = errors.New("too few entries") +) + +type Filter struct { + // numBits is always equal to [bitsPerByte * len(entries)] + numBits uint64 + + lock sync.RWMutex + hashSeeds []uint64 + entries []byte + count int +} + +// New creates a new Filter with the specified number of hashes and bytes for +// entries. The returned bloom filter is safe for concurrent usage. +func New(numHashes, numEntries int) (*Filter, error) { + if numEntries < minEntries { + return nil, errTooFewEntries + } + + hashSeeds, err := newHashSeeds(numHashes) + if err != nil { + return nil, err + } + + return &Filter{ + numBits: uint64(numEntries * bitsPerByte), + hashSeeds: hashSeeds, + entries: make([]byte, numEntries), + count: 0, + }, nil +} + +func (f *Filter) Add(hash uint64) { + f.lock.Lock() + defer f.lock.Unlock() + + _ = 1 % f.numBits // hint to the compiler that numBits is not 0 + for _, seed := range f.hashSeeds { + hash = bits.RotateLeft64(hash, hashRotation) ^ seed + index := hash % f.numBits + byteIndex := index / bitsPerByte + bitIndex := index % bitsPerByte + f.entries[byteIndex] |= 1 << bitIndex + } + f.count++ +} + +// Count returns the number of elements that have been added to the bloom +// filter. +func (f *Filter) Count() int { + f.lock.RLock() + defer f.lock.RUnlock() + + return f.count +} + +func (f *Filter) Contains(hash uint64) bool { + f.lock.RLock() + defer f.lock.RUnlock() + + return contains(f.hashSeeds, f.entries, hash) +} + +func (f *Filter) Marshal() []byte { + f.lock.RLock() + defer f.lock.RUnlock() + + return marshal(f.hashSeeds, f.entries) +} + +func newHashSeeds(count int) ([]uint64, error) { + switch { + case count < minHashes: + return nil, fmt.Errorf("%w: %d < %d", errTooFewHashes, count, minHashes) + case count > maxHashes: + return nil, fmt.Errorf("%w: %d > %d", errTooManyHashes, count, maxHashes) + } + + bytes := make([]byte, count*bytesPerUint64) + if _, err := rand.Reader.Read(bytes); err != nil { + return nil, err + } + + seeds := make([]uint64, count) + for i := range seeds { + seeds[i] = binary.BigEndian.Uint64(bytes[i*bytesPerUint64:]) + } + return seeds, nil +} + +func contains(hashSeeds []uint64, entries []byte, hash uint64) bool { + var ( + numBits = bitsPerByte * uint64(len(entries)) + _ = 1 % numBits // hint to the compiler that numBits is not 0 + accumulator byte = 1 + ) + for seedIndex := 0; seedIndex < len(hashSeeds) && accumulator != 0; seedIndex++ { + hash = bits.RotateLeft64(hash, hashRotation) ^ hashSeeds[seedIndex] + index := hash % numBits + byteIndex := index / bitsPerByte + bitIndex := index % bitsPerByte + accumulator &= entries[byteIndex] >> bitIndex + } + return accumulator != 0 +} + +func marshal(hashSeeds []uint64, entries []byte) []byte { + numHashes := len(hashSeeds) + entriesOffset := 1 + numHashes*bytesPerUint64 + + bytes := make([]byte, entriesOffset+len(entries)) + bytes[0] = byte(numHashes) + for i, seed := range hashSeeds { + binary.BigEndian.PutUint64(bytes[1+i*bytesPerUint64:], seed) + } + copy(bytes[entriesOffset:], entries) + return bytes +} diff --git a/utils/bloom/filter_test.go b/utils/bloom/filter_test.go new file mode 100644 index 000000000000..856797f8ab50 --- /dev/null +++ b/utils/bloom/filter_test.go @@ -0,0 +1,96 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/units" +) + +func TestNewErrors(t *testing.T) { + tests := []struct { + numHashes int + numEntries int + err error + }{ + { + numHashes: 0, + numEntries: 1, + err: errTooFewHashes, + }, + { + numHashes: 17, + numEntries: 1, + err: errTooManyHashes, + }, + { + numHashes: 8, + numEntries: 0, + err: errTooFewEntries, + }, + } + for _, test := range tests { + t.Run(test.err.Error(), func(t *testing.T) { + _, err := New(test.numHashes, test.numEntries) + require.ErrorIs(t, err, test.err) + }) + } +} + +func TestNormalUsage(t *testing.T) { + require := require.New(t) + + toAdd := make([]uint64, 1024) + for i := range toAdd { + toAdd[i] = rand.Uint64() //#nosec G404 + } + + initialNumHashes, initialNumBytes := OptimalParameters(1024, 0.01) + filter, err := New(initialNumHashes, initialNumBytes) + require.NoError(err) + + for i, elem := range toAdd { + filter.Add(elem) + for _, elem := range toAdd[:i] { + require.True(filter.Contains(elem)) + } + } + + require.Equal(len(toAdd), filter.Count()) + + filterBytes := filter.Marshal() + parsedFilter, err := Parse(filterBytes) + require.NoError(err) + + for _, elem := range toAdd { + require.True(parsedFilter.Contains(elem)) + } + + parsedFilterBytes := parsedFilter.Marshal() + require.Equal(filterBytes, parsedFilterBytes) +} + +func BenchmarkAdd(b *testing.B) { + f, err := New(8, 16*units.KiB) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f.Add(1) + } +} + +func BenchmarkMarshal(b *testing.B) { + f, err := New(OptimalParameters(10_000, .01)) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f.Marshal() + } +} diff --git a/utils/bloom/hasher.go b/utils/bloom/hasher.go new file mode 100644 index 000000000000..d5e3f5a6f5ec --- /dev/null +++ b/utils/bloom/hasher.go @@ -0,0 +1,31 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "crypto/sha256" + "encoding/binary" +) + +func Add(f *Filter, key, salt []byte) { + f.Add(Hash(key, salt)) +} + +func Contains(c Checker, key, salt []byte) bool { + return c.Contains(Hash(key, salt)) +} + +type Checker interface { + Contains(hash uint64) bool +} + +func Hash(key, salt []byte) uint64 { + hash := sha256.New() + // sha256.Write never returns errors + _, _ = hash.Write(key) + _, _ = hash.Write(salt) + + output := make([]byte, 0, sha256.Size) + return binary.BigEndian.Uint64(hash.Sum(output)) +} diff --git a/utils/bloom/hasher_test.go b/utils/bloom/hasher_test.go new file mode 100644 index 000000000000..b262f1dbb5dc --- /dev/null +++ b/utils/bloom/hasher_test.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/units" +) + +func TestCollisionResistance(t *testing.T) { + require := require.New(t) + + f, err := New(8, 16*units.KiB) + require.NoError(err) + + Add(f, []byte("hello world?"), []byte("so salty")) + collision := Contains(f, []byte("hello world!"), []byte("so salty")) + require.False(collision) +} + +func BenchmarkHash(b *testing.B) { + key := ids.GenerateTestID() + salt := ids.GenerateTestID() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + Hash(key[:], salt[:]) + } +} diff --git a/utils/bloom/metrics.go b/utils/bloom/metrics.go new file mode 100644 index 000000000000..7e33edc5c069 --- /dev/null +++ b/utils/bloom/metrics.go @@ -0,0 +1,70 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/utils" +) + +// Metrics is a collection of commonly useful metrics when using a long-lived +// bloom filter. +type Metrics struct { + Count prometheus.Gauge + NumHashes prometheus.Gauge + NumEntries prometheus.Gauge + MaxCount prometheus.Gauge + ResetCount prometheus.Counter +} + +func NewMetrics( + namespace string, + registerer prometheus.Registerer, +) (*Metrics, error) { + m := &Metrics{ + Count: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "count", + Help: "Number of additions that have been performed to the bloom", + }), + NumHashes: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "hashes", + Help: "Number of hashes in the bloom", + }), + NumEntries: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "entries", + Help: "Number of bytes allocated to slots in the bloom", + }), + MaxCount: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "max_count", + Help: "Maximum number of additions that should be performed to the bloom before resetting", + }), + ResetCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "reset_count", + Help: "Number times the bloom has been reset", + }), + } + err := utils.Err( + registerer.Register(m.Count), + registerer.Register(m.NumHashes), + registerer.Register(m.NumEntries), + registerer.Register(m.MaxCount), + registerer.Register(m.ResetCount), + ) + return m, err +} + +// Reset the metrics to align with the provided bloom filter and max count. +func (m *Metrics) Reset(newFilter *Filter, maxCount int) { + m.Count.Set(float64(newFilter.Count())) + m.NumHashes.Set(float64(len(newFilter.hashSeeds))) + m.NumEntries.Set(float64(len(newFilter.entries))) + m.MaxCount.Set(float64(maxCount)) + m.ResetCount.Inc() +} diff --git a/utils/bloom/optimal.go b/utils/bloom/optimal.go new file mode 100644 index 000000000000..fc434ca57987 --- /dev/null +++ b/utils/bloom/optimal.go @@ -0,0 +1,115 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "math" + + safemath "github.com/ava-labs/avalanchego/utils/math" +) + +const ln2Squared = math.Ln2 * math.Ln2 + +// OptimalParameters calculates the optimal [numHashes] and [numEntries] that +// should be allocated for a bloom filter which will contain [count] and target +// [falsePositiveProbability]. +func OptimalParameters(count int, falsePositiveProbability float64) (int, int) { + numEntries := OptimalEntries(count, falsePositiveProbability) + numHashes := OptimalHashes(numEntries, count) + return numHashes, numEntries +} + +// OptimalHashes calculates the number of hashes which will minimize the false +// positive probability of a bloom filter with [numEntries] after [count] +// additions. +// +// It is guaranteed to return a value in the range [minHashes, maxHashes]. +// +// ref: https://en.wikipedia.org/wiki/Bloom_filter +func OptimalHashes(numEntries, count int) int { + switch { + case numEntries < minEntries: + return minHashes + case count <= 0: + return maxHashes + } + + numHashes := math.Ceil(float64(numEntries) * bitsPerByte * math.Ln2 / float64(count)) + // Converting a floating-point value to an int produces an undefined value + // if the floating-point value cannot be represented as an int. To avoid + // this undefined behavior, we explicitly check against MaxInt here. + // + // ref: https://go.dev/ref/spec#Conversions + if numHashes >= maxHashes { + return maxHashes + } + return safemath.Max(int(numHashes), minHashes) +} + +// OptimalEntries calculates the optimal number of entries to use when creating +// a new Bloom filter when targenting a size of [count] with +// [falsePositiveProbability] assuming that the optimal number of hashes is +// used. +// +// It is guaranteed to return a value in the range [minEntries, MaxInt]. +// +// ref: https://en.wikipedia.org/wiki/Bloom_filter +func OptimalEntries(count int, falsePositiveProbability float64) int { + switch { + case count <= 0: + return minEntries + case falsePositiveProbability >= 1: + return minEntries + case falsePositiveProbability <= 0: + return math.MaxInt + } + + entriesInBits := -float64(count) * math.Log(falsePositiveProbability) / ln2Squared + entries := (entriesInBits + bitsPerByte - 1) / bitsPerByte + // Converting a floating-point value to an int produces an undefined value + // if the floating-point value cannot be represented as an int. To avoid + // this undefined behavior, we explicitly check against MaxInt here. + // + // ref: https://go.dev/ref/spec#Conversions + if entries >= math.MaxInt { + return math.MaxInt + } + return safemath.Max(int(entries), minEntries) +} + +// EstimateCount estimates the number of additions a bloom filter with +// [numHashes] and [numEntries] must have to reach [falsePositiveProbability]. +// This is derived by inversing a lower-bound on the probability of false +// positives. For values where numBits >> numHashes, the predicted probability +// is fairly accurate. +// +// It is guaranteed to return a value in the range [0, MaxInt]. +// +// ref: https://tsapps.nist.gov/publication/get_pdf.cfm?pub_id=903775 +func EstimateCount(numHashes, numEntries int, falsePositiveProbability float64) int { + switch { + case numHashes < minHashes: + return 0 + case numEntries < minEntries: + return 0 + case falsePositiveProbability <= 0: + return 0 + case falsePositiveProbability >= 1: + return math.MaxInt + } + + invNumHashes := 1 / float64(numHashes) + numBits := float64(numEntries * 8) + exp := 1 - math.Pow(falsePositiveProbability, invNumHashes) + count := math.Ceil(-math.Log(exp) * numBits * invNumHashes) + // Converting a floating-point value to an int produces an undefined value + // if the floating-point value cannot be represented as an int. To avoid + // this undefined behavior, we explicitly check against MaxInt here. + // + // ref: https://go.dev/ref/spec#Conversions + if count >= math.MaxInt { + return math.MaxInt + } + return int(count) +} diff --git a/utils/bloom/optimal_test.go b/utils/bloom/optimal_test.go new file mode 100644 index 000000000000..b52356d5b307 --- /dev/null +++ b/utils/bloom/optimal_test.go @@ -0,0 +1,203 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "fmt" + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +const largestFloat64LessThan1 float64 = 1 - 1e-16 + +func TestOptimalHashes(t *testing.T) { + tests := []struct { + numEntries int + count int + expectedHashes int + }{ + { // invalid params + numEntries: 0, + count: 1024, + expectedHashes: minHashes, + }, + { // invalid params + numEntries: 1024, + count: 0, + expectedHashes: maxHashes, + }, + { + numEntries: math.MaxInt, + count: 1, + expectedHashes: maxHashes, + }, + { + numEntries: 1, + count: math.MaxInt, + expectedHashes: minHashes, + }, + { + numEntries: 1024, + count: 1024, + expectedHashes: 6, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d_%d", test.numEntries, test.count), func(t *testing.T) { + hashes := OptimalHashes(test.numEntries, test.count) + require.Equal(t, test.expectedHashes, hashes) + }) + } +} + +func TestOptimalEntries(t *testing.T) { + tests := []struct { + count int + falsePositiveProbability float64 + expectedEntries int + }{ + { // invalid params + count: 0, + falsePositiveProbability: .5, + expectedEntries: minEntries, + }, + { // invalid params + count: 1, + falsePositiveProbability: 0, + expectedEntries: math.MaxInt, + }, + { // invalid params + count: 1, + falsePositiveProbability: 1, + expectedEntries: minEntries, + }, + { + count: math.MaxInt, + falsePositiveProbability: math.SmallestNonzeroFloat64, + expectedEntries: math.MaxInt, + }, + { + count: 1024, + falsePositiveProbability: largestFloat64LessThan1, + expectedEntries: minEntries, + }, + { + count: 1024, + falsePositiveProbability: .01, + expectedEntries: 1227, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d_%f", test.count, test.falsePositiveProbability), func(t *testing.T) { + entries := OptimalEntries(test.count, test.falsePositiveProbability) + require.Equal(t, test.expectedEntries, entries) + }) + } +} + +func TestEstimateEntries(t *testing.T) { + tests := []struct { + numHashes int + numEntries int + falsePositiveProbability float64 + expectedEntries int + }{ + { // invalid params + numHashes: 0, + numEntries: 2_048, + falsePositiveProbability: .5, + expectedEntries: 0, + }, + { // invalid params + numHashes: 1, + numEntries: 0, + falsePositiveProbability: .5, + expectedEntries: 0, + }, + { // invalid params + numHashes: 1, + numEntries: 1, + falsePositiveProbability: 2, + expectedEntries: math.MaxInt, + }, + { // invalid params + numHashes: 1, + numEntries: 1, + falsePositiveProbability: -1, + expectedEntries: 0, + }, + { + numHashes: 8, + numEntries: 2_048, + falsePositiveProbability: 0, + expectedEntries: 0, + }, + { // params from OptimalParameters(10_000, .01) + numHashes: 7, + numEntries: 11_982, + falsePositiveProbability: .01, + expectedEntries: 9_993, + }, + { // params from OptimalParameters(100_000, .001) + numHashes: 10, + numEntries: 179_720, + falsePositiveProbability: .001, + expectedEntries: 100_000, + }, + { // params from OptimalParameters(10_000, .01) + numHashes: 7, + numEntries: 11_982, + falsePositiveProbability: .05, + expectedEntries: 14_449, + }, + { // params from OptimalParameters(10_000, .01) + numHashes: 7, + numEntries: 11_982, + falsePositiveProbability: 1, + expectedEntries: math.MaxInt, + }, + { // params from OptimalParameters(10_000, .01) + numHashes: 7, + numEntries: 11_982, + falsePositiveProbability: math.SmallestNonzeroFloat64, + expectedEntries: 0, + }, + { // params from OptimalParameters(10_000, .01) + numHashes: 7, + numEntries: 11_982, + falsePositiveProbability: largestFloat64LessThan1, + expectedEntries: math.MaxInt, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d_%d_%f", test.numHashes, test.numEntries, test.falsePositiveProbability), func(t *testing.T) { + entries := EstimateCount(test.numHashes, test.numEntries, test.falsePositiveProbability) + require.Equal(t, test.expectedEntries, entries) + }) + } +} + +func FuzzOptimalHashes(f *testing.F) { + f.Fuzz(func(t *testing.T, numEntries, count int) { + hashes := OptimalHashes(numEntries, count) + require.GreaterOrEqual(t, hashes, minHashes) + require.LessOrEqual(t, hashes, maxHashes) + }) +} + +func FuzzOptimalEntries(f *testing.F) { + f.Fuzz(func(t *testing.T, count int, falsePositiveProbability float64) { + entries := OptimalEntries(count, falsePositiveProbability) + require.GreaterOrEqual(t, entries, minEntries) + }) +} + +func FuzzEstimateEntries(f *testing.F) { + f.Fuzz(func(t *testing.T, numHashes, numEntries int, falsePositiveProbability float64) { + entries := EstimateCount(numHashes, numEntries, falsePositiveProbability) + require.GreaterOrEqual(t, entries, 0) + }) +} diff --git a/utils/bloom/read_filter.go b/utils/bloom/read_filter.go new file mode 100644 index 000000000000..075d77ed7a38 --- /dev/null +++ b/utils/bloom/read_filter.go @@ -0,0 +1,65 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "encoding/binary" + "fmt" +) + +var ( + EmptyFilter = &ReadFilter{ + hashSeeds: make([]uint64, minHashes), + entries: make([]byte, minEntries), + } + FullFilter = &ReadFilter{ + hashSeeds: make([]uint64, minHashes), + entries: make([]byte, minEntries), + } +) + +func init() { + for i := range FullFilter.entries { + FullFilter.entries[i] = 0xFF + } +} + +type ReadFilter struct { + hashSeeds []uint64 + entries []byte +} + +// Parse [bytes] into a read-only bloom filter. +func Parse(bytes []byte) (*ReadFilter, error) { + if len(bytes) == 0 { + return nil, errInvalidNumHashes + } + numHashes := bytes[0] + entriesOffset := 1 + int(numHashes)*bytesPerUint64 + switch { + case numHashes < minHashes: + return nil, fmt.Errorf("%w: %d < %d", errTooFewHashes, numHashes, minHashes) + case numHashes > maxHashes: + return nil, fmt.Errorf("%w: %d > %d", errTooManyHashes, numHashes, maxHashes) + case len(bytes) < entriesOffset+minEntries: // numEntries = len(bytes) - entriesOffset + return nil, errTooFewEntries + } + + f := &ReadFilter{ + hashSeeds: make([]uint64, numHashes), + entries: bytes[entriesOffset:], + } + for i := range f.hashSeeds { + f.hashSeeds[i] = binary.BigEndian.Uint64(bytes[1+i*bytesPerUint64:]) + } + return f, nil +} + +func (f *ReadFilter) Contains(hash uint64) bool { + return contains(f.hashSeeds, f.entries, hash) +} + +func (f *ReadFilter) Marshal() []byte { + return marshal(f.hashSeeds, f.entries) +} diff --git a/utils/bloom/read_filter_test.go b/utils/bloom/read_filter_test.go new file mode 100644 index 000000000000..8ea83db092ca --- /dev/null +++ b/utils/bloom/read_filter_test.go @@ -0,0 +1,112 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func NewMaliciousFilter(numHashes, numEntries int) *Filter { + f := &Filter{ + numBits: uint64(numEntries * bitsPerByte), + hashSeeds: make([]uint64, numHashes), + entries: make([]byte, numEntries), + count: 0, + } + for i := range f.entries { + f.entries[i] = math.MaxUint8 + } + return f +} + +func TestParseErrors(t *testing.T) { + tests := []struct { + bytes []byte + err error + }{ + { + bytes: nil, + err: errInvalidNumHashes, + }, + { + bytes: NewMaliciousFilter(0, 1).Marshal(), + err: errTooFewHashes, + }, + { + bytes: NewMaliciousFilter(17, 1).Marshal(), + err: errTooManyHashes, + }, + { + bytes: NewMaliciousFilter(1, 0).Marshal(), + err: errTooFewEntries, + }, + { + bytes: []byte{ + 0x01, // num hashes = 1 + }, + err: errTooFewEntries, + }, + } + for _, test := range tests { + t.Run(test.err.Error(), func(t *testing.T) { + _, err := Parse(test.bytes) + require.ErrorIs(t, err, test.err) + }) + } +} + +func BenchmarkParse(b *testing.B) { + f, err := New(OptimalParameters(10_000, .01)) + require.NoError(b, err) + bytes := f.Marshal() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = Parse(bytes) + } +} + +func BenchmarkContains(b *testing.B) { + f := NewMaliciousFilter(maxHashes, 1) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f.Contains(1) + } +} + +func FuzzParseThenMarshal(f *testing.F) { + f.Fuzz(func(t *testing.T, bytes []byte) { + f, err := Parse(bytes) + if err != nil { + return + } + + marshalledBytes := marshal(f.hashSeeds, f.entries) + require.Equal(t, bytes, marshalledBytes) + }) +} + +func FuzzMarshalThenParse(f *testing.F) { + f.Fuzz(func(t *testing.T, numHashes int, entries []byte) { + require := require.New(t) + + hashSeeds, err := newHashSeeds(numHashes) + if err != nil { + return + } + if len(entries) < minEntries { + return + } + + marshalledBytes := marshal(hashSeeds, entries) + rf, err := Parse(marshalledBytes) + require.NoError(err) + require.Equal(hashSeeds, rf.hashSeeds) + require.Equal(entries, rf.entries) + }) +} diff --git a/utils/buffer/bounded_nonblocking_queue.go b/utils/buffer/bounded_nonblocking_queue.go index 0b5d5f945d52..f8b0030e9687 100644 --- a/utils/buffer/bounded_nonblocking_queue.go +++ b/utils/buffer/bounded_nonblocking_queue.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package buffer diff --git a/utils/buffer/bounded_nonblocking_queue_test.go b/utils/buffer/bounded_nonblocking_queue_test.go index e6a6fdac3e49..323ea92589be 100644 --- a/utils/buffer/bounded_nonblocking_queue_test.go +++ b/utils/buffer/bounded_nonblocking_queue_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package buffer diff --git a/utils/buffer/unbounded_blocking_deque.go b/utils/buffer/unbounded_blocking_deque.go index 078d8d908ee8..a6c7fb66d6e1 100644 --- a/utils/buffer/unbounded_blocking_deque.go +++ b/utils/buffer/unbounded_blocking_deque.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package buffer diff --git a/utils/buffer/unbounded_blocking_deque_test.go b/utils/buffer/unbounded_blocking_deque_test.go index 054d3a2e6bed..1f22db9916b9 100644 --- a/utils/buffer/unbounded_blocking_deque_test.go +++ b/utils/buffer/unbounded_blocking_deque_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package buffer diff --git a/utils/buffer/unbounded_deque.go b/utils/buffer/unbounded_deque.go index 336f0869c907..873f33f14817 100644 --- a/utils/buffer/unbounded_deque.go +++ b/utils/buffer/unbounded_deque.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package buffer diff --git a/utils/buffer/unbounded_deque_test.go b/utils/buffer/unbounded_deque_test.go index 5b759da1c0e9..dfdac4a53412 100644 --- a/utils/buffer/unbounded_deque_test.go +++ b/utils/buffer/unbounded_deque_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package buffer diff --git a/utils/bytes.go b/utils/bytes.go index c025c4915c9e..a32f353cf75e 100644 --- a/utils/bytes.go +++ b/utils/bytes.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils diff --git a/utils/cb58/cb58.go b/utils/cb58/cb58.go index 4d9cbd6f7449..27d8265cd2f9 100644 --- a/utils/cb58/cb58.go +++ b/utils/cb58/cb58.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cb58 diff --git a/utils/cb58/cb58_test.go b/utils/cb58/cb58_test.go index 858c0b8783ba..9d28c6f90fa4 100644 --- a/utils/cb58/cb58_test.go +++ b/utils/cb58/cb58_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cb58 diff --git a/utils/compare/compare.go b/utils/compare/compare.go deleted file mode 100644 index 13ec52f386cb..000000000000 --- a/utils/compare/compare.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package compare - -// Returns true iff the slices have the same elements, regardless of order. -func UnsortedEquals[T comparable](a, b []T) bool { - if len(a) != len(b) { - return false - } - m := make(map[T]int, len(a)) - for _, v := range a { - m[v]++ - } - for _, v := range b { - switch count := m[v]; count { - case 0: - // There were more instances of [v] in [b] than [a]. - return false - case 1: - delete(m, v) - default: - m[v] = count - 1 - } - } - return len(m) == 0 -} diff --git a/utils/compare/compare_test.go b/utils/compare/compare_test.go deleted file mode 100644 index e46bc838f72b..000000000000 --- a/utils/compare/compare_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package compare - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestUnsortedEquals(t *testing.T) { - require := require.New(t) - - require.True(UnsortedEquals([]int{}, []int{})) - require.True(UnsortedEquals(nil, []int{})) - require.True(UnsortedEquals([]int{}, nil)) - require.False(UnsortedEquals([]int{1}, nil)) - require.False(UnsortedEquals(nil, []int{1})) - require.True(UnsortedEquals([]int{1}, []int{1})) - require.False(UnsortedEquals([]int{1, 2}, []int{})) - require.False(UnsortedEquals([]int{1, 2}, []int{1})) - require.False(UnsortedEquals([]int{1}, []int{1, 2})) - require.True(UnsortedEquals([]int{2, 1}, []int{1, 2})) - require.True(UnsortedEquals([]int{1, 2}, []int{2, 1})) -} diff --git a/utils/compression/compressor.go b/utils/compression/compressor.go index f0848357a882..c8624f9baf84 100644 --- a/utils/compression/compressor.go +++ b/utils/compression/compressor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression diff --git a/utils/compression/compressor_test.go b/utils/compression/compressor_test.go index 0467c2c1b234..f4f024e550b9 100644 --- a/utils/compression/compressor_test.go +++ b/utils/compression/compressor_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression diff --git a/utils/compression/gzip_compressor.go b/utils/compression/gzip_compressor.go index a17c46f6d6a3..da0b941a47a1 100644 --- a/utils/compression/gzip_compressor.go +++ b/utils/compression/gzip_compressor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression @@ -21,6 +21,7 @@ var ( ErrMsgTooLarge = errors.New("msg too large to be compressed") ) +// TODO: Remove once v1.11.x is out. type gzipCompressor struct { maxSize int64 gzipWriterPool sync.Pool diff --git a/utils/compression/no_compressor.go b/utils/compression/no_compressor.go index 1eb4237d9766..3c444c71d993 100644 --- a/utils/compression/no_compressor.go +++ b/utils/compression/no_compressor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression diff --git a/utils/compression/no_compressor_test.go b/utils/compression/no_compressor_test.go index 95000667658c..3b99a101814d 100644 --- a/utils/compression/no_compressor_test.go +++ b/utils/compression/no_compressor_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression diff --git a/utils/compression/type.go b/utils/compression/type.go index fd58a21f70fa..09b4d64c33ea 100644 --- a/utils/compression/type.go +++ b/utils/compression/type.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression @@ -14,7 +14,7 @@ type Type byte const ( TypeNone Type = iota + 1 - TypeGzip + TypeGzip // Remove once v1.11.x is out. TypeZstd ) diff --git a/utils/compression/type_test.go b/utils/compression/type_test.go index 13d6b313aa48..eacad3bad598 100644 --- a/utils/compression/type_test.go +++ b/utils/compression/type_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression diff --git a/utils/compression/zstd_compressor.go b/utils/compression/zstd_compressor.go index eafc1071845f..b374fa850ee6 100644 --- a/utils/compression/zstd_compressor.go +++ b/utils/compression/zstd_compressor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression diff --git a/utils/constants/acps.go b/utils/constants/acps.go new file mode 100644 index 000000000000..6774828cde31 --- /dev/null +++ b/utils/constants/acps.go @@ -0,0 +1,19 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package constants + +import "github.com/ava-labs/avalanchego/utils/set" + +// CurrentACPs is the set of ACPs that are currently, at the time of release, +// marked as implementable. +// +// See: https://github.com/avalanche-foundation/ACPs/tree/main#readme +var CurrentACPs = set.Of[uint32]( + 23, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/23-p-chain-native-transfers.md + 24, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/24-shanghai-eips.md + 25, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/25-vm-application-errors.md + 30, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/30-avalanche-warp-x-evm.md + 31, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/31-enable-subnet-ownership-transfer.md + 41, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/41-remove-pending-stakers.md +) diff --git a/utils/constants/aliases.go b/utils/constants/aliases.go index dd94bd363925..dd8388246d39 100644 --- a/utils/constants/aliases.go +++ b/utils/constants/aliases.go @@ -1,12 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants -const ( - // ChainAliasPrefix denotes a prefix for an alias that belongs to a blockchain ID. - ChainAliasPrefix string = "bc" - - // VMAliasPrefix denotes a prefix for an alias that belongs to a VM ID. - VMAliasPrefix string = "vm" -) +// ChainAliasPrefix denotes a prefix for an alias that belongs to a blockchain ID. +const ChainAliasPrefix string = "bc" diff --git a/utils/constants/application.go b/utils/constants/application.go index 3a42e0edfa35..56da9b0a2d14 100644 --- a/utils/constants/application.go +++ b/utils/constants/application.go @@ -8,14 +8,14 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants // Variables to be exported // Can be overwritten with -X during build step -var ( +const ( // PlatformName exports the name of the platform PlatformName = "camino" diff --git a/utils/constants/memory.go b/utils/constants/memory.go index c8740ceba6f7..cca6ee7af0f9 100644 --- a/utils/constants/memory.go +++ b/utils/constants/memory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants diff --git a/utils/constants/network_ids.go b/utils/constants/network_ids.go index 473ff6dd00a1..11cfa6d50b42 100644 --- a/utils/constants/network_ids.go +++ b/utils/constants/network_ids.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants diff --git a/utils/constants/network_ids_test.go b/utils/constants/network_ids_test.go index dc969ad157a1..8f81593dbe2e 100644 --- a/utils/constants/network_ids_test.go +++ b/utils/constants/network_ids_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants diff --git a/utils/constants/networking.go b/utils/constants/networking.go index d26f3db1070d..7a4ea89b8241 100644 --- a/utils/constants/networking.go +++ b/utils/constants/networking.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants @@ -32,6 +32,8 @@ const ( DefaultNetworkPeerListNonValidatorGossipSize = 0 DefaultNetworkPeerListPeersGossipSize = 10 DefaultNetworkPeerListGossipFreq = time.Minute + DefaultNetworkPeerListPullGossipFreq = 2 * time.Second + DefaultNetworkPeerListBloomResetFreq = time.Minute // Inbound Connection Throttling DefaultInboundConnUpgradeThrottlerCooldown = 10 * time.Second @@ -71,12 +73,12 @@ const ( DefaultBenchlistMinFailingDuration = 2*time.Minute + 30*time.Second // Router - DefaultAcceptedFrontierGossipFrequency = 10 * time.Second DefaultConsensusAppConcurrency = 2 DefaultConsensusShutdownTimeout = time.Minute + DefaultFrontierPollFrequency = 100 * time.Millisecond DefaultConsensusGossipAcceptedFrontierValidatorSize = 0 DefaultConsensusGossipAcceptedFrontierNonValidatorSize = 0 - DefaultConsensusGossipAcceptedFrontierPeerSize = 15 + DefaultConsensusGossipAcceptedFrontierPeerSize = 1 DefaultConsensusGossipOnAcceptValidatorSize = 0 DefaultConsensusGossipOnAcceptNonValidatorSize = 0 DefaultConsensusGossipOnAcceptPeerSize = 10 diff --git a/utils/constants/vm_ids.go b/utils/constants/vm_ids.go index 4fb887c425f2..9fda498f1f31 100644 --- a/utils/constants/vm_ids.go +++ b/utils/constants/vm_ids.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants diff --git a/utils/context.go b/utils/context.go index 9ff300186881..453c45e948a4 100644 --- a/utils/context.go +++ b/utils/context.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils diff --git a/utils/crypto/bls/bls_benchmark_test.go b/utils/crypto/bls/bls_benchmark_test.go index cd3568005764..b9648b43c04e 100644 --- a/utils/crypto/bls/bls_benchmark_test.go +++ b/utils/crypto/bls/bls_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls diff --git a/utils/crypto/bls/bls_test.go b/utils/crypto/bls/bls_test.go index f3bb05004376..e8a4a45bb97d 100644 --- a/utils/crypto/bls/bls_test.go +++ b/utils/crypto/bls/bls_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls diff --git a/utils/crypto/bls/public.go b/utils/crypto/bls/public.go index 8d8237f83d5e..2c3cca7a0181 100644 --- a/utils/crypto/bls/public.go +++ b/utils/crypto/bls/public.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls diff --git a/utils/crypto/bls/public_test.go b/utils/crypto/bls/public_test.go index 9cd886400b2d..4465b014cff4 100644 --- a/utils/crypto/bls/public_test.go +++ b/utils/crypto/bls/public_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls diff --git a/utils/crypto/bls/secret.go b/utils/crypto/bls/secret.go index 3f385624520c..049938bdaf8e 100644 --- a/utils/crypto/bls/secret.go +++ b/utils/crypto/bls/secret.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls diff --git a/utils/crypto/bls/secret_test.go b/utils/crypto/bls/secret_test.go index c01540ac4f98..d3d46e1aa737 100644 --- a/utils/crypto/bls/secret_test.go +++ b/utils/crypto/bls/secret_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls diff --git a/utils/crypto/bls/signature.go b/utils/crypto/bls/signature.go index cafba33c48e6..0d0d029b796e 100644 --- a/utils/crypto/bls/signature.go +++ b/utils/crypto/bls/signature.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls diff --git a/utils/crypto/bls/signature_test.go b/utils/crypto/bls/signature_test.go index caf613fc18df..3d43282c487a 100644 --- a/utils/crypto/bls/signature_test.go +++ b/utils/crypto/bls/signature_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls diff --git a/utils/crypto/keychain/keychain.go b/utils/crypto/keychain/keychain.go index 5899bb40382a..47d39b59f07c 100644 --- a/utils/crypto/keychain/keychain.go +++ b/utils/crypto/keychain/keychain.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keychain diff --git a/utils/crypto/keychain/keychain_test.go b/utils/crypto/keychain/keychain_test.go index 73aa476122db..1d1dd86b6055 100644 --- a/utils/crypto/keychain/keychain_test.go +++ b/utils/crypto/keychain/keychain_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keychain diff --git a/utils/crypto/keychain/ledger.go b/utils/crypto/keychain/ledger.go index d709ed19c939..955eb4480e24 100644 --- a/utils/crypto/keychain/ledger.go +++ b/utils/crypto/keychain/ledger.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keychain diff --git a/utils/crypto/keychain/mock_ledger.go b/utils/crypto/keychain/mock_ledger.go index 8f270bfbc032..b082631c416e 100644 --- a/utils/crypto/keychain/mock_ledger.go +++ b/utils/crypto/keychain/mock_ledger.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/utils/crypto/keychain (interfaces: Ledger) +// +// Generated by this command: +// +// mockgen -package=keychain -destination=utils/crypto/keychain/mock_ledger.go github.com/ava-labs/avalanchego/utils/crypto/keychain Ledger +// // Package keychain is a generated GoMock package. package keychain @@ -48,7 +50,7 @@ func (m *MockLedger) Address(arg0 string, arg1 uint32) (ids.ShortID, error) { } // Address indicates an expected call of Address. -func (mr *MockLedgerMockRecorder) Address(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLedgerMockRecorder) Address(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Address", reflect.TypeOf((*MockLedger)(nil).Address), arg0, arg1) } @@ -63,7 +65,7 @@ func (m *MockLedger) Addresses(arg0 []uint32) ([]ids.ShortID, error) { } // Addresses indicates an expected call of Addresses. -func (mr *MockLedgerMockRecorder) Addresses(arg0 interface{}) *gomock.Call { +func (mr *MockLedgerMockRecorder) Addresses(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Addresses", reflect.TypeOf((*MockLedger)(nil).Addresses), arg0) } @@ -92,7 +94,7 @@ func (m *MockLedger) Sign(arg0 []byte, arg1 []uint32) ([][]byte, error) { } // Sign indicates an expected call of Sign. -func (mr *MockLedgerMockRecorder) Sign(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLedgerMockRecorder) Sign(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sign", reflect.TypeOf((*MockLedger)(nil).Sign), arg0, arg1) } @@ -107,7 +109,7 @@ func (m *MockLedger) SignHash(arg0 []byte, arg1 []uint32) ([][]byte, error) { } // SignHash indicates an expected call of SignHash. -func (mr *MockLedgerMockRecorder) SignHash(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLedgerMockRecorder) SignHash(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignHash", reflect.TypeOf((*MockLedger)(nil).SignHash), arg0, arg1) } diff --git a/utils/crypto/ledger/ledger.go b/utils/crypto/ledger/ledger.go index 37de44fec4ea..70f6d4f07b84 100644 --- a/utils/crypto/ledger/ledger.go +++ b/utils/crypto/ledger/ledger.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ledger @@ -8,6 +8,8 @@ import ( ledger "github.com/ava-labs/ledger-avalanche/go" + bip32 "github.com/tyler-smith/go-bip32" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/keychain" "github.com/ava-labs/avalanchego/utils/hashing" @@ -15,7 +17,7 @@ import ( ) const ( - rootPath = "m/44'/9000'/0'" + rootPath = "m/44'/9000'/0'" // BIP44: m / purpose' / coin_type' / account' ledgerBufferLimit = 8192 ledgerPathSize = 9 ) @@ -26,6 +28,7 @@ var _ keychain.Ledger = (*Ledger)(nil) // provides Avalanche-specific access. type Ledger struct { device *ledger.LedgerAvalanche + epk *bip32.Key } func New() (keychain.Ledger, error) { @@ -40,21 +43,37 @@ func addressPath(index uint32) string { } func (l *Ledger) Address(hrp string, addressIndex uint32) (ids.ShortID, error) { - _, hash, err := l.device.GetPubKey(addressPath(addressIndex), true, hrp, "") + resp, err := l.device.GetPubKey(addressPath(addressIndex), true, hrp, "") if err != nil { return ids.ShortEmpty, err } - return ids.ToShortID(hash) + return ids.ToShortID(resp.Hash) } func (l *Ledger) Addresses(addressIndices []uint32) ([]ids.ShortID, error) { + if l.epk == nil { + pk, chainCode, err := l.device.GetExtPubKey(rootPath, false, "", "") + if err != nil { + return nil, err + } + l.epk = &bip32.Key{ + Key: pk, + ChainCode: chainCode, + } + } + // derivation path rootPath/0 (BIP44 change level, when set to 0, known as external chain) + externalChain, err := l.epk.NewChildKey(0) + if err != nil { + return nil, err + } addresses := make([]ids.ShortID, len(addressIndices)) - for i, v := range addressIndices { - _, hash, err := l.device.GetPubKey(addressPath(v), false, "", "") + for i, addressIndex := range addressIndices { + // derivation path rootPath/0/v (BIP44 address index level) + address, err := externalChain.NewChildKey(addressIndex) if err != nil { return nil, err } - copy(addresses[i][:], hash) + copy(addresses[i][:], hashing.PubkeyBytesToAddress(address.Key)) } return addresses, nil } diff --git a/utils/crypto/ledger/ledger_test.go b/utils/crypto/ledger/ledger_test.go index 118dc8758d1b..160b26365f84 100644 --- a/utils/crypto/ledger/ledger_test.go +++ b/utils/crypto/ledger/ledger_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ledger diff --git a/utils/crypto/secp256k1/camino_secp256k1.go b/utils/crypto/secp256k1/camino_secp256k1.go index 0f50c421a179..68ed08a32320 100644 --- a/utils/crypto/secp256k1/camino_secp256k1.go +++ b/utils/crypto/secp256k1/camino_secp256k1.go @@ -4,6 +4,7 @@ package secp256k1 import ( + "crypto" rsa "crypto/rsa" x509 "crypto/x509" "crypto/x509/pkix" @@ -20,9 +21,10 @@ import ( var oidLocalKeyID = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 21} var ( - errWrongCertType = errors.New("wrong certificate type") - errNoSignature = errors.New("failed to extract signature from certificate") - errRecoverFailed = errors.New("failed to recover public key") + errNoSignature = errors.New("failed to extract signature from certificate") + errRecoverFailed = errors.New("failed to recover public key") + errNotRSAPublicKey = errors.New("certificate public key is not rsa public key") + ErrWrongExtensionType = errors.New("wrong extension type") ) // Takes a RSA privateKey and builds using it's hash an secp256k1 private key. @@ -49,26 +51,34 @@ func SignRsaPublicKey(privKey *secp256k1.PrivateKey, pubKey *rsa.PublicKey) *pki // This is the reverse what has been done in RsaPrivateKeyToSecp256PrivateKey // It returns the marshalled public key func RecoverSecp256PublicKey(cert *x509.Certificate) ([]byte, error) { - // Recover RSA public key from certificate - rPubKey := cert.PublicKey.(*rsa.PublicKey) - if rPubKey == nil { - return nil, errWrongCertType - } - - // Locate the signature in certificate - var signature []byte for _, ext := range cert.Extensions { if ext.Id.Equal(oidLocalKeyID) { - signature = ext.Value - break + return recoverSecp256PublicKeyFromExtension(&ext, cert.PublicKey) //nolint:gosec } } - if signature == nil { + return nil, errNoSignature +} + +func RecoverSecp256PublicKeyFromExtension(ext *pkix.Extension, publicKey crypto.PublicKey) ([]byte, error) { + if !ext.Id.Equal(oidLocalKeyID) { + return nil, ErrWrongExtensionType + } + + return recoverSecp256PublicKeyFromExtension(ext, publicKey) +} + +func recoverSecp256PublicKeyFromExtension(ext *pkix.Extension, publicKey crypto.PublicKey) ([]byte, error) { + if ext.Value == nil { return nil, errNoSignature } - data := hashing.ComputeHash256(x509.MarshalPKCS1PublicKey(rPubKey)) - sPubKey, _, err := ecdsa.RecoverCompact(signature, data) + rsaPubKey, ok := publicKey.(*rsa.PublicKey) + if !ok { + return nil, errNotRSAPublicKey + } + + data := hashing.ComputeHash256(x509.MarshalPKCS1PublicKey(rsaPubKey)) + sPubKey, _, err := ecdsa.RecoverCompact(ext.Value, data) if err != nil { return nil, errRecoverFailed } diff --git a/utils/crypto/secp256k1/rfc6979_test.go b/utils/crypto/secp256k1/rfc6979_test.go index 5d9ee8b4f033..7efc019a3429 100644 --- a/utils/crypto/secp256k1/rfc6979_test.go +++ b/utils/crypto/secp256k1/rfc6979_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1 diff --git a/utils/crypto/secp256k1/secp256k1.go b/utils/crypto/secp256k1/secp256k1.go index 022cce2861a0..93ef887bf71d 100644 --- a/utils/crypto/secp256k1/secp256k1.go +++ b/utils/crypto/secp256k1/secp256k1.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1 @@ -220,7 +220,7 @@ func (k *PrivateKey) String() string { } func (k *PrivateKey) MarshalJSON() ([]byte, error) { - return []byte("\"" + k.String() + "\""), nil + return []byte(`"` + k.String() + `"`), nil } func (k *PrivateKey) MarshalText() ([]byte, error) { diff --git a/utils/crypto/secp256k1/secp256k1_benchmark_test.go b/utils/crypto/secp256k1/secp256k1_benchmark_test.go index 1d55f38f7d86..ca4f98e38fb5 100644 --- a/utils/crypto/secp256k1/secp256k1_benchmark_test.go +++ b/utils/crypto/secp256k1/secp256k1_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1 diff --git a/utils/crypto/secp256k1/secp256k1_test.go b/utils/crypto/secp256k1/secp256k1_test.go index a2074dff5229..39a915b9f9b3 100644 --- a/utils/crypto/secp256k1/secp256k1_test.go +++ b/utils/crypto/secp256k1/secp256k1_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1 diff --git a/utils/crypto/secp256k1/test_keys.go b/utils/crypto/secp256k1/test_keys.go index 3122f2617ddf..4ceb567469b2 100644 --- a/utils/crypto/secp256k1/test_keys.go +++ b/utils/crypto/secp256k1/test_keys.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1 diff --git a/utils/dynamicip/ifconfig_resolver.go b/utils/dynamicip/ifconfig_resolver.go index 0bbabcb58612..36c8d5adf04c 100644 --- a/utils/dynamicip/ifconfig_resolver.go +++ b/utils/dynamicip/ifconfig_resolver.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip diff --git a/utils/dynamicip/no_updater.go b/utils/dynamicip/no_updater.go index 5c9e38bd54a1..e3e7c6155bd0 100644 --- a/utils/dynamicip/no_updater.go +++ b/utils/dynamicip/no_updater.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip diff --git a/utils/dynamicip/opendns_resolver.go b/utils/dynamicip/opendns_resolver.go index 3bda76c46404..5c39c95535fc 100644 --- a/utils/dynamicip/opendns_resolver.go +++ b/utils/dynamicip/opendns_resolver.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip diff --git a/utils/dynamicip/resolver.go b/utils/dynamicip/resolver.go index b3a341cd2121..45ad3778bc01 100644 --- a/utils/dynamicip/resolver.go +++ b/utils/dynamicip/resolver.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip diff --git a/utils/dynamicip/resolver_test.go b/utils/dynamicip/resolver_test.go index e5e53d40f9f3..6af72a98a50a 100644 --- a/utils/dynamicip/resolver_test.go +++ b/utils/dynamicip/resolver_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip diff --git a/utils/dynamicip/updater.go b/utils/dynamicip/updater.go index 87c99e6db0c5..9a59c9fd25e0 100644 --- a/utils/dynamicip/updater.go +++ b/utils/dynamicip/updater.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip diff --git a/utils/dynamicip/updater_test.go b/utils/dynamicip/updater_test.go index 98ce26b4a189..66c9a21c4c6a 100644 --- a/utils/dynamicip/updater_test.go +++ b/utils/dynamicip/updater_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip diff --git a/utils/error.go b/utils/error.go index b58c60cd001a..0a6a9f323e03 100644 --- a/utils/error.go +++ b/utils/error.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils diff --git a/utils/filesystem/io.go b/utils/filesystem/io.go index 939e635a2ca9..28a0c4aa1e32 100644 --- a/utils/filesystem/io.go +++ b/utils/filesystem/io.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package filesystem diff --git a/utils/filesystem/mock_file.go b/utils/filesystem/mock_file.go index 92b219393765..7b133025621b 100644 --- a/utils/filesystem/mock_file.go +++ b/utils/filesystem/mock_file.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package filesystem diff --git a/utils/filesystem/mock_io.go b/utils/filesystem/mock_io.go index 9ec15c548e39..06b27dd18dd0 100644 --- a/utils/filesystem/mock_io.go +++ b/utils/filesystem/mock_io.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/utils/filesystem (interfaces: Reader) +// +// Generated by this command: +// +// mockgen -package=filesystem -destination=utils/filesystem/mock_io.go github.com/ava-labs/avalanchego/utils/filesystem Reader +// // Package filesystem is a generated GoMock package. package filesystem @@ -47,7 +49,7 @@ func (m *MockReader) ReadDir(arg0 string) ([]fs.DirEntry, error) { } // ReadDir indicates an expected call of ReadDir. -func (mr *MockReaderMockRecorder) ReadDir(arg0 interface{}) *gomock.Call { +func (mr *MockReaderMockRecorder) ReadDir(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadDir", reflect.TypeOf((*MockReader)(nil).ReadDir), arg0) } diff --git a/utils/filesystem/rename.go b/utils/filesystem/rename.go index 3ab7c147d355..578c46fb6c9e 100644 --- a/utils/filesystem/rename.go +++ b/utils/filesystem/rename.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package filesystem diff --git a/utils/filesystem/rename_test.go b/utils/filesystem/rename_test.go index 305a65092727..53c8a503bcf6 100644 --- a/utils/filesystem/rename_test.go +++ b/utils/filesystem/rename_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package filesystem diff --git a/utils/formatting/address/address.go b/utils/formatting/address/address.go index 321fe692bf57..97d4e0552969 100644 --- a/utils/formatting/address/address.go +++ b/utils/formatting/address/address.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package address diff --git a/utils/formatting/address/converter.go b/utils/formatting/address/converter.go index 8a5812a5bb2c..f043ab6a3489 100644 --- a/utils/formatting/address/converter.go +++ b/utils/formatting/address/converter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package address diff --git a/utils/formatting/encoding.go b/utils/formatting/encoding.go index 20ab4df30157..742800fee86e 100644 --- a/utils/formatting/encoding.go +++ b/utils/formatting/encoding.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting @@ -69,7 +69,7 @@ func (enc Encoding) MarshalJSON() ([]byte, error) { if !enc.valid() { return nil, errInvalidEncoding } - return []byte("\"" + enc.String() + "\""), nil + return []byte(`"` + enc.String() + `"`), nil } func (enc *Encoding) UnmarshalJSON(b []byte) error { diff --git a/utils/formatting/encoding_benchmark_test.go b/utils/formatting/encoding_benchmark_test.go index 598ed39310ca..879933418e3e 100644 --- a/utils/formatting/encoding_benchmark_test.go +++ b/utils/formatting/encoding_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting diff --git a/utils/formatting/encoding_test.go b/utils/formatting/encoding_test.go index 29f6c1d5df39..6623e325e9cf 100644 --- a/utils/formatting/encoding_test.go +++ b/utils/formatting/encoding_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting diff --git a/utils/formatting/int_format.go b/utils/formatting/int_format.go index 6cd8c870a43d..7c26655f2aba 100644 --- a/utils/formatting/int_format.go +++ b/utils/formatting/int_format.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting diff --git a/utils/formatting/int_format_test.go b/utils/formatting/int_format_test.go index febf23bca4a2..aa5dce1dc8db 100644 --- a/utils/formatting/int_format_test.go +++ b/utils/formatting/int_format_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting diff --git a/utils/formatting/prefixed_stringer.go b/utils/formatting/prefixed_stringer.go index 15fe720398a8..3c82cddd88a7 100644 --- a/utils/formatting/prefixed_stringer.go +++ b/utils/formatting/prefixed_stringer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting diff --git a/utils/hashing/consistent/hashable.go b/utils/hashing/consistent/hashable.go index df4a08d0ba44..a51ce4dfc817 100644 --- a/utils/hashing/consistent/hashable.go +++ b/utils/hashing/consistent/hashable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package consistent diff --git a/utils/hashing/consistent/ring.go b/utils/hashing/consistent/ring.go index 1ade42ff359f..c99dd276047f 100644 --- a/utils/hashing/consistent/ring.go +++ b/utils/hashing/consistent/ring.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package consistent diff --git a/utils/hashing/consistent/ring_test.go b/utils/hashing/consistent/ring_test.go index ec9e69098b1a..a53e166657b6 100644 --- a/utils/hashing/consistent/ring_test.go +++ b/utils/hashing/consistent/ring_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package consistent @@ -181,7 +181,7 @@ func TestGetMapsToClockwiseNode(t *testing.T) { ring, hasher := setupTest(t, 1) // setup expected calls - calls := make([]*gomock.Call, len(test.ringNodes)+1) + calls := make([]any, len(test.ringNodes)+1) for i, key := range test.ringNodes { calls[i] = hasher.EXPECT().Hash(getHashKey(key.ConsistentHashKey(), 0)).Return(key.hash).Times(1) diff --git a/utils/hashing/hasher.go b/utils/hashing/hasher.go index 7519dfbb69ec..be74c160d00f 100644 --- a/utils/hashing/hasher.go +++ b/utils/hashing/hasher.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package hashing diff --git a/utils/hashing/hashing.go b/utils/hashing/hashing.go index f2c79e235a64..0d09fd457247 100644 --- a/utils/hashing/hashing.go +++ b/utils/hashing/hashing.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package hashing diff --git a/utils/hashing/mock_hasher.go b/utils/hashing/mock_hasher.go index 26b4130c5d72..c2d5ea4b3918 100644 --- a/utils/hashing/mock_hasher.go +++ b/utils/hashing/mock_hasher.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/utils/hashing (interfaces: Hasher) +// +// Generated by this command: +// +// mockgen -package=hashing -destination=utils/hashing/mock_hasher.go github.com/ava-labs/avalanchego/utils/hashing Hasher +// // Package hashing is a generated GoMock package. package hashing @@ -45,7 +47,7 @@ func (m *MockHasher) Hash(arg0 []byte) uint64 { } // Hash indicates an expected call of Hash. -func (mr *MockHasherMockRecorder) Hash(arg0 interface{}) *gomock.Call { +func (mr *MockHasherMockRecorder) Hash(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Hash", reflect.TypeOf((*MockHasher)(nil).Hash), arg0) } diff --git a/utils/heap/map.go b/utils/heap/map.go index dbe06c06446e..1162e95fd15f 100644 --- a/utils/heap/map.go +++ b/utils/heap/map.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package heap diff --git a/utils/heap/map_test.go b/utils/heap/map_test.go index cc774a5a50df..64e3e4e29132 100644 --- a/utils/heap/map_test.go +++ b/utils/heap/map_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package heap diff --git a/utils/heap/queue.go b/utils/heap/queue.go index fc3bebaa0b8d..62687635f93e 100644 --- a/utils/heap/queue.go +++ b/utils/heap/queue.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package heap diff --git a/utils/heap/queue_test.go b/utils/heap/queue_test.go index e7481eddbbe3..66e3417178bd 100644 --- a/utils/heap/queue_test.go +++ b/utils/heap/queue_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package heap diff --git a/utils/heap/set.go b/utils/heap/set.go index 15fab421b278..e1865f1e64f7 100644 --- a/utils/heap/set.go +++ b/utils/heap/set.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package heap diff --git a/utils/heap/set_test.go b/utils/heap/set_test.go index fd93f996d5ff..d475226118ee 100644 --- a/utils/heap/set_test.go +++ b/utils/heap/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package heap diff --git a/utils/ips/claimed_ip_port.go b/utils/ips/claimed_ip_port.go index 4ba4a6085e79..fdd31e44229e 100644 --- a/utils/ips/claimed_ip_port.go +++ b/utils/ips/claimed_ip_port.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips @@ -6,16 +6,14 @@ package ips import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/wrappers" ) -// Can't import these from wrappers package due to circular import. const ( - intLen = 4 - longLen = 8 - ipLen = 18 - idLen = 32 // Certificate length, signature length, IP, timestamp, tx ID - baseIPCertDescLen = 2*intLen + ipLen + longLen + idLen + baseIPCertDescLen = 2*wrappers.IntLen + IPPortLen + wrappers.LongLen + ids.IDLen + preimageLen = ids.IDLen + wrappers.LongLen ) // A self contained proof that a peer is claiming ownership of an IPPort at a @@ -32,12 +30,36 @@ type ClaimedIPPort struct { // actually claimed by the peer in question, and not by a malicious peer // trying to get us to dial bogus IPPorts. Signature []byte - // The txID that added this peer into the validator set - TxID ids.ID + // NodeID derived from the peer certificate. + NodeID ids.NodeID + // GossipID derived from the nodeID and timestamp. + GossipID ids.ID } -// Returns the length of the byte representation of this ClaimedIPPort. -func (i *ClaimedIPPort) BytesLen() int { - // See wrappers.PackPeerTrackInfo. +func NewClaimedIPPort( + cert *staking.Certificate, + ipPort IPPort, + timestamp uint64, + signature []byte, +) *ClaimedIPPort { + ip := &ClaimedIPPort{ + Cert: cert, + IPPort: ipPort, + Timestamp: timestamp, + Signature: signature, + NodeID: cert.NodeID, + } + + packer := wrappers.Packer{ + Bytes: make([]byte, preimageLen), + } + packer.PackFixedBytes(ip.NodeID[:]) + packer.PackLong(timestamp) + ip.GossipID = hashing.ComputeHash256Array(packer.Bytes) + return ip +} + +// Returns the approximate size of the binary representation of this ClaimedIPPort. +func (i *ClaimedIPPort) Size() int { return baseIPCertDescLen + len(i.Cert.Raw) + len(i.Signature) } diff --git a/utils/ips/dynamic_ip_port.go b/utils/ips/dynamic_ip_port.go index 3f30dc0a24b5..0b83ab5924f1 100644 --- a/utils/ips/dynamic_ip_port.go +++ b/utils/ips/dynamic_ip_port.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips diff --git a/utils/ips/ip_port.go b/utils/ips/ip_port.go index 3ca5bfe176d4..a60e6300ed49 100644 --- a/utils/ips/ip_port.go +++ b/utils/ips/ip_port.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips @@ -12,7 +12,10 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) -const nullStr = "null" +const ( + IPPortLen = 16 + wrappers.ShortLen + nullStr = "null" +) var ( errMissingQuotes = errors.New("first and last characters should be quotes") diff --git a/utils/ips/ip_test.go b/utils/ips/ip_test.go index 30a72017e6da..903f26a2d070 100644 --- a/utils/ips/ip_test.go +++ b/utils/ips/ip_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips diff --git a/utils/ips/lookup.go b/utils/ips/lookup.go index 8ae3de470ff0..cdf9176f9568 100644 --- a/utils/ips/lookup.go +++ b/utils/ips/lookup.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips diff --git a/utils/ips/lookup_test.go b/utils/ips/lookup_test.go index 52c0e5eda860..9fecccc54593 100644 --- a/utils/ips/lookup_test.go +++ b/utils/ips/lookup_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips diff --git a/utils/json/codec.go b/utils/json/codec.go index 5871d67fd793..0bf51dcee2c2 100644 --- a/utils/json/codec.go +++ b/utils/json/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json diff --git a/utils/json/float32.go b/utils/json/float32.go index a20ee2bde525..ca35a760e3be 100644 --- a/utils/json/float32.go +++ b/utils/json/float32.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json @@ -8,7 +8,7 @@ import "strconv" type Float32 float32 func (f Float32) MarshalJSON() ([]byte, error) { - return []byte("\"" + strconv.FormatFloat(float64(f), byte('f'), 4, 32) + "\""), nil + return []byte(`"` + strconv.FormatFloat(float64(f), byte('f'), 4, 32) + `"`), nil } func (f *Float32) UnmarshalJSON(b []byte) error { diff --git a/utils/json/float32_test.go b/utils/json/float32_test.go index 3d336927ced5..519ca7f4d561 100644 --- a/utils/json/float32_test.go +++ b/utils/json/float32_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json diff --git a/utils/json/float64.go b/utils/json/float64.go index 4d31459ecf19..80fb8ae738da 100644 --- a/utils/json/float64.go +++ b/utils/json/float64.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json @@ -8,7 +8,7 @@ import "strconv" type Float64 float64 func (f Float64) MarshalJSON() ([]byte, error) { - return []byte("\"" + strconv.FormatFloat(float64(f), byte('f'), 4, 64) + "\""), nil + return []byte(`"` + strconv.FormatFloat(float64(f), byte('f'), 4, 64) + `"`), nil } func (f *Float64) UnmarshalJSON(b []byte) error { diff --git a/utils/json/uint16.go b/utils/json/uint16.go index c2c8b6da378a..03e0f133d1a9 100644 --- a/utils/json/uint16.go +++ b/utils/json/uint16.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json @@ -8,7 +8,7 @@ import "strconv" type Uint16 uint16 func (u Uint16) MarshalJSON() ([]byte, error) { - return []byte("\"" + strconv.FormatUint(uint64(u), 10) + "\""), nil + return []byte(`"` + strconv.FormatUint(uint64(u), 10) + `"`), nil } func (u *Uint16) UnmarshalJSON(b []byte) error { diff --git a/utils/json/uint32.go b/utils/json/uint32.go index 0bd0f28544d1..bae5b8857496 100644 --- a/utils/json/uint32.go +++ b/utils/json/uint32.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json @@ -8,7 +8,7 @@ import "strconv" type Uint32 uint32 func (u Uint32) MarshalJSON() ([]byte, error) { - return []byte("\"" + strconv.FormatUint(uint64(u), 10) + "\""), nil + return []byte(`"` + strconv.FormatUint(uint64(u), 10) + `"`), nil } func (u *Uint32) UnmarshalJSON(b []byte) error { diff --git a/utils/json/uint64.go b/utils/json/uint64.go index ba3189039d67..60bc99887439 100644 --- a/utils/json/uint64.go +++ b/utils/json/uint64.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json @@ -8,7 +8,7 @@ import "strconv" type Uint64 uint64 func (u Uint64) MarshalJSON() ([]byte, error) { - return []byte("\"" + strconv.FormatUint(uint64(u), 10) + "\""), nil + return []byte(`"` + strconv.FormatUint(uint64(u), 10) + `"`), nil } func (u *Uint64) UnmarshalJSON(b []byte) error { diff --git a/utils/json/uint8.go b/utils/json/uint8.go index c43f2786daf6..da2ca5270a83 100644 --- a/utils/json/uint8.go +++ b/utils/json/uint8.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json @@ -8,7 +8,7 @@ import "strconv" type Uint8 uint8 func (u Uint8) MarshalJSON() ([]byte, error) { - return []byte("\"" + strconv.FormatUint(uint64(u), 10) + "\""), nil + return []byte(`"` + strconv.FormatUint(uint64(u), 10) + `"`), nil } func (u *Uint8) UnmarshalJSON(b []byte) error { diff --git a/utils/linkedhashmap/iterator.go b/utils/linkedhashmap/iterator.go index 306e41e872fd..a2869aac2a54 100644 --- a/utils/linkedhashmap/iterator.go +++ b/utils/linkedhashmap/iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkedhashmap diff --git a/utils/linkedhashmap/linkedhashmap.go b/utils/linkedhashmap/linkedhashmap.go index 12e9569c3391..9ae5b83ad7ae 100644 --- a/utils/linkedhashmap/linkedhashmap.go +++ b/utils/linkedhashmap/linkedhashmap.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkedhashmap @@ -17,7 +17,7 @@ var _ LinkedHashmap[int, struct{}] = (*linkedHashmap[int, struct{}])(nil) type Hashmap[K, V any] interface { Put(key K, val V) Get(key K) (val V, exists bool) - Delete(key K) + Delete(key K) (deleted bool) Len() int } @@ -63,11 +63,11 @@ func (lh *linkedHashmap[K, V]) Get(key K) (V, bool) { return lh.get(key) } -func (lh *linkedHashmap[K, V]) Delete(key K) { +func (lh *linkedHashmap[K, V]) Delete(key K) bool { lh.lock.Lock() defer lh.lock.Unlock() - lh.delete(key) + return lh.delete(key) } func (lh *linkedHashmap[K, V]) Len() int { @@ -114,11 +114,13 @@ func (lh *linkedHashmap[K, V]) get(key K) (V, bool) { return utils.Zero[V](), false } -func (lh *linkedHashmap[K, V]) delete(key K) { - if e, ok := lh.entryMap[key]; ok { +func (lh *linkedHashmap[K, V]) delete(key K) bool { + e, ok := lh.entryMap[key] + if ok { lh.entryList.Remove(e) delete(lh.entryMap, key) } + return ok } func (lh *linkedHashmap[K, V]) len() int { diff --git a/utils/linkedhashmap/linkedhashmap_test.go b/utils/linkedhashmap/linkedhashmap_test.go index 8bd7239ed5d9..372bd24baa4c 100644 --- a/utils/linkedhashmap/linkedhashmap_test.go +++ b/utils/linkedhashmap/linkedhashmap_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkedhashmap @@ -62,7 +62,7 @@ func TestLinkedHashmap(t *testing.T) { require.Equal(key1, rkey1, "wrong key") require.Equal(1, val1, "wrong value") - lh.Delete(key0) + require.True(lh.Delete(key0)) require.Equal(1, lh.Len(), "wrong hashmap length") _, exists = lh.Get(key0) @@ -132,7 +132,7 @@ func TestIterator(t *testing.T) { // Should be empty require.False(iter.Next()) // Delete id1 - lh.Delete(id1) + require.True(lh.Delete(id1)) iter = lh.NewIterator() require.NotNil(iter) // Should immediately be exhausted @@ -169,8 +169,8 @@ func TestIterator(t *testing.T) { iter := lh.NewIterator() require.True(iter.Next()) require.True(iter.Next()) - lh.Delete(id1) - lh.Delete(id2) + require.True(lh.Delete(id1)) + require.True(lh.Delete(id2)) require.True(iter.Next()) require.Equal(id3, iter.Key()) require.Equal(3, iter.Value()) diff --git a/utils/logging/color.go b/utils/logging/color.go index 323d1d1373f5..8fb7a8b6a29d 100644 --- a/utils/logging/color.go +++ b/utils/logging/color.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/utils/logging/config.go b/utils/logging/config.go index baeb666d753f..06d7f8ca92c4 100644 --- a/utils/logging/config.go +++ b/utils/logging/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/utils/logging/factory.go b/utils/logging/factory.go index b3426257956f..e6bb90272282 100644 --- a/utils/logging/factory.go +++ b/utils/logging/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/utils/logging/format.go b/utils/logging/format.go index 5ea9de28f087..53313c3dad8f 100644 --- a/utils/logging/format.go +++ b/utils/logging/format.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/utils/logging/level.go b/utils/logging/level.go index 173c606c44e2..7c1696b3202f 100644 --- a/utils/logging/level.go +++ b/utils/logging/level.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/utils/logging/log.go b/utils/logging/log.go index 006c487d608f..b9fc8f796ce7 100644 --- a/utils/logging/log.go +++ b/utils/logging/log.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging import ( "io" + "os" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -67,7 +68,9 @@ func (l *log) Write(p []byte) (int, error) { // TODO: return errors here func (l *log) Stop() { for _, wc := range l.wrappedCores { - _ = wc.Writer.Close() + if wc.Writer != os.Stdout && wc.Writer != os.Stderr { + _ = wc.Writer.Close() + } } } diff --git a/utils/logging/log_test.go b/utils/logging/log_test.go index c968747ba726..cd7396b6ac6a 100644 --- a/utils/logging/log_test.go +++ b/utils/logging/log_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/utils/logging/logger.go b/utils/logging/logger.go index 50d035e02fa8..2ca95bff104c 100644 --- a/utils/logging/logger.go +++ b/utils/logging/logger.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/utils/logging/mock_logger.go b/utils/logging/mock_logger.go deleted file mode 100644 index ba1079d30a09..000000000000 --- a/utils/logging/mock_logger.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/utils/logging (interfaces: Logger) - -// Package logging is a generated GoMock package. -package logging - -import ( - reflect "reflect" - - gomock "go.uber.org/mock/gomock" - zapcore "go.uber.org/zap/zapcore" -) - -// MockLogger is a mock of Logger interface. -type MockLogger struct { - ctrl *gomock.Controller - recorder *MockLoggerMockRecorder -} - -// MockLoggerMockRecorder is the mock recorder for MockLogger. -type MockLoggerMockRecorder struct { - mock *MockLogger -} - -// NewMockLogger creates a new mock instance. -func NewMockLogger(ctrl *gomock.Controller) *MockLogger { - mock := &MockLogger{ctrl: ctrl} - mock.recorder = &MockLoggerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockLogger) EXPECT() *MockLoggerMockRecorder { - return m.recorder -} - -// Debug mocks base method. -func (m *MockLogger) Debug(arg0 string, arg1 ...zapcore.Field) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Debug", varargs...) -} - -// Debug indicates an expected call of Debug. -func (mr *MockLoggerMockRecorder) Debug(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Debug", reflect.TypeOf((*MockLogger)(nil).Debug), varargs...) -} - -// Enabled mocks base method. -func (m *MockLogger) Enabled(arg0 Level) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockLoggerMockRecorder) Enabled(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockLogger)(nil).Enabled), arg0) -} - -// Error mocks base method. -func (m *MockLogger) Error(arg0 string, arg1 ...zapcore.Field) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockLoggerMockRecorder) Error(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockLogger)(nil).Error), varargs...) -} - -// Fatal mocks base method. -func (m *MockLogger) Fatal(arg0 string, arg1 ...zapcore.Field) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Fatal", varargs...) -} - -// Fatal indicates an expected call of Fatal. -func (mr *MockLoggerMockRecorder) Fatal(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fatal", reflect.TypeOf((*MockLogger)(nil).Fatal), varargs...) -} - -// Info mocks base method. -func (m *MockLogger) Info(arg0 string, arg1 ...zapcore.Field) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockLoggerMockRecorder) Info(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockLogger)(nil).Info), varargs...) -} - -// RecoverAndExit mocks base method. -func (m *MockLogger) RecoverAndExit(arg0, arg1 func()) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RecoverAndExit", arg0, arg1) -} - -// RecoverAndExit indicates an expected call of RecoverAndExit. -func (mr *MockLoggerMockRecorder) RecoverAndExit(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecoverAndExit", reflect.TypeOf((*MockLogger)(nil).RecoverAndExit), arg0, arg1) -} - -// RecoverAndPanic mocks base method. -func (m *MockLogger) RecoverAndPanic(arg0 func()) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RecoverAndPanic", arg0) -} - -// RecoverAndPanic indicates an expected call of RecoverAndPanic. -func (mr *MockLoggerMockRecorder) RecoverAndPanic(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecoverAndPanic", reflect.TypeOf((*MockLogger)(nil).RecoverAndPanic), arg0) -} - -// SetLevel mocks base method. -func (m *MockLogger) SetLevel(arg0 Level) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetLevel", arg0) -} - -// SetLevel indicates an expected call of SetLevel. -func (mr *MockLoggerMockRecorder) SetLevel(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLevel", reflect.TypeOf((*MockLogger)(nil).SetLevel), arg0) -} - -// Stop mocks base method. -func (m *MockLogger) Stop() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Stop") -} - -// Stop indicates an expected call of Stop. -func (mr *MockLoggerMockRecorder) Stop() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockLogger)(nil).Stop)) -} - -// StopOnPanic mocks base method. -func (m *MockLogger) StopOnPanic() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "StopOnPanic") -} - -// StopOnPanic indicates an expected call of StopOnPanic. -func (mr *MockLoggerMockRecorder) StopOnPanic() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopOnPanic", reflect.TypeOf((*MockLogger)(nil).StopOnPanic)) -} - -// Trace mocks base method. -func (m *MockLogger) Trace(arg0 string, arg1 ...zapcore.Field) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Trace", varargs...) -} - -// Trace indicates an expected call of Trace. -func (mr *MockLoggerMockRecorder) Trace(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trace", reflect.TypeOf((*MockLogger)(nil).Trace), varargs...) -} - -// Verbo mocks base method. -func (m *MockLogger) Verbo(arg0 string, arg1 ...zapcore.Field) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Verbo", varargs...) -} - -// Verbo indicates an expected call of Verbo. -func (mr *MockLoggerMockRecorder) Verbo(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verbo", reflect.TypeOf((*MockLogger)(nil).Verbo), varargs...) -} - -// Warn mocks base method. -func (m *MockLogger) Warn(arg0 string, arg1 ...zapcore.Field) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Warn", varargs...) -} - -// Warn indicates an expected call of Warn. -func (mr *MockLoggerMockRecorder) Warn(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Warn", reflect.TypeOf((*MockLogger)(nil).Warn), varargs...) -} - -// Write mocks base method. -func (m *MockLogger) Write(arg0 []byte) (int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Write", arg0) - ret0, _ := ret[0].(int) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Write indicates an expected call of Write. -func (mr *MockLoggerMockRecorder) Write(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockLogger)(nil).Write), arg0) -} diff --git a/utils/logging/sanitize.go b/utils/logging/sanitize.go index 05b24ff96be2..18fc4021d965 100644 --- a/utils/logging/sanitize.go +++ b/utils/logging/sanitize.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging @@ -12,7 +12,7 @@ import ( type sanitizedString string func (s sanitizedString) String() string { - return strings.ReplaceAll(string(s), "\n", "\\n") + return strings.ReplaceAll(string(s), "\n", `\n`) } // UserString constructs a field with the given key and the value stripped of @@ -29,7 +29,7 @@ func (s sanitizedStrings) String() string { if i != 0 { _, _ = strs.WriteString(", ") } - _, _ = strs.WriteString(strings.ReplaceAll(str, "\n", "\\n")) + _, _ = strs.WriteString(strings.ReplaceAll(str, "\n", `\n`)) } return strs.String() } diff --git a/utils/logging/test_log.go b/utils/logging/test_log.go index 7df7df276a15..a8a85dc78e07 100644 --- a/utils/logging/test_log.go +++ b/utils/logging/test_log.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/utils/math/averager.go b/utils/math/averager.go index 14147d7ef54f..8573fbc80bb7 100644 --- a/utils/math/averager.go +++ b/utils/math/averager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/utils/math/averager_heap.go b/utils/math/averager_heap.go index 070593f0eeb8..57d046786a99 100644 --- a/utils/math/averager_heap.go +++ b/utils/math/averager_heap.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/utils/math/averager_heap_test.go b/utils/math/averager_heap_test.go index 0586eb77947e..94f160a3a388 100644 --- a/utils/math/averager_heap_test.go +++ b/utils/math/averager_heap_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/utils/math/continuous_averager.go b/utils/math/continuous_averager.go index e60832f23a58..7bc892576b9f 100644 --- a/utils/math/continuous_averager.go +++ b/utils/math/continuous_averager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/utils/math/continuous_averager_benchmark_test.go b/utils/math/continuous_averager_benchmark_test.go index 7a8d30a3736c..3ebee526997d 100644 --- a/utils/math/continuous_averager_benchmark_test.go +++ b/utils/math/continuous_averager_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/utils/math/continuous_averager_test.go b/utils/math/continuous_averager_test.go index 16f0f6913b90..c169f3903066 100644 --- a/utils/math/continuous_averager_test.go +++ b/utils/math/continuous_averager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/utils/math/meter/continuous_meter.go b/utils/math/meter/continuous_meter.go index 4bd3f000524e..378248a15027 100644 --- a/utils/math/meter/continuous_meter.go +++ b/utils/math/meter/continuous_meter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meter diff --git a/utils/math/meter/factory.go b/utils/math/meter/factory.go index 3fe1c3bb04b4..a4d3722e8f3c 100644 --- a/utils/math/meter/factory.go +++ b/utils/math/meter/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meter diff --git a/utils/math/meter/meter.go b/utils/math/meter/meter.go index 07c03e37017a..e9ec6782b8c6 100644 --- a/utils/math/meter/meter.go +++ b/utils/math/meter/meter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meter diff --git a/utils/math/meter/meter_benchmark_test.go b/utils/math/meter/meter_benchmark_test.go index 65f3dcfa56e0..80ed1ad9573c 100644 --- a/utils/math/meter/meter_benchmark_test.go +++ b/utils/math/meter/meter_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meter diff --git a/utils/math/meter/meter_test.go b/utils/math/meter/meter_test.go index 9bffb77d9d1e..cbe1bd806b61 100644 --- a/utils/math/meter/meter_test.go +++ b/utils/math/meter/meter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meter diff --git a/utils/math/safe_math.go b/utils/math/safe_math.go index 834547589806..8397327414a7 100644 --- a/utils/math/safe_math.go +++ b/utils/math/safe_math.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/utils/math/safe_math_test.go b/utils/math/safe_math_test.go index b4ef771eb45e..fc89d2f7f639 100644 --- a/utils/math/safe_math_test.go +++ b/utils/math/safe_math_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/utils/math/sync_averager.go b/utils/math/sync_averager.go index cbe8ba107f7a..92210ab4ca46 100644 --- a/utils/math/sync_averager.go +++ b/utils/math/sync_averager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/utils/maybe/maybe.go b/utils/maybe/maybe.go index b4dfc7f9a452..fd50b41534c7 100644 --- a/utils/maybe/maybe.go +++ b/utils/maybe/maybe.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package maybe diff --git a/utils/maybe/maybe_test.go b/utils/maybe/maybe_test.go index 60b599000d0a..93d5fc32e85a 100644 --- a/utils/maybe/maybe_test.go +++ b/utils/maybe/maybe_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package maybe diff --git a/utils/metric/api_interceptor.go b/utils/metric/api_interceptor.go index ab8e4fd8d70c..67ae1a936742 100644 --- a/utils/metric/api_interceptor.go +++ b/utils/metric/api_interceptor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metric diff --git a/utils/metric/averager.go b/utils/metric/averager.go index da7f0aa5bd48..0f461a567ee7 100644 --- a/utils/metric/averager.go +++ b/utils/metric/averager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metric diff --git a/utils/metric/namespace.go b/utils/metric/namespace.go new file mode 100644 index 000000000000..4371bb1dc077 --- /dev/null +++ b/utils/metric/namespace.go @@ -0,0 +1,17 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metric + +import "strings" + +func AppendNamespace(prefix, suffix string) string { + switch { + case len(prefix) == 0: + return suffix + case len(suffix) == 0: + return prefix + default: + return strings.Join([]string{prefix, suffix}, "_") + } +} diff --git a/utils/metric/namespace_test.go b/utils/metric/namespace_test.go new file mode 100644 index 000000000000..b1daf8ec11b1 --- /dev/null +++ b/utils/metric/namespace_test.go @@ -0,0 +1,46 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metric + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAppendNamespace(t *testing.T) { + tests := []struct { + prefix string + suffix string + expected string + }{ + { + prefix: "avalanchego", + suffix: "isgreat", + expected: "avalanchego_isgreat", + }, + { + prefix: "", + suffix: "sucks", + expected: "sucks", + }, + { + prefix: "sucks", + suffix: "", + expected: "sucks", + }, + { + prefix: "", + suffix: "", + expected: "", + }, + } + for _, test := range tests { + t.Run(strings.Join([]string{test.prefix, test.suffix}, "_"), func(t *testing.T) { + namespace := AppendNamespace(test.prefix, test.suffix) + require.Equal(t, test.expected, namespace) + }) + } +} diff --git a/utils/password/hash.go b/utils/password/hash.go index 19c6f731f414..fc304bd50118 100644 --- a/utils/password/hash.go +++ b/utils/password/hash.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package password diff --git a/utils/password/hash_test.go b/utils/password/hash_test.go index c2d87a9744c4..07f56d0304f3 100644 --- a/utils/password/hash_test.go +++ b/utils/password/hash_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package password diff --git a/utils/password/password.go b/utils/password/password.go index 1efe3ac8a4f9..fa4d240eed2e 100644 --- a/utils/password/password.go +++ b/utils/password/password.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package password diff --git a/utils/password/password_test.go b/utils/password/password_test.go index 0b445fbf8545..2c4f534c857a 100644 --- a/utils/password/password_test.go +++ b/utils/password/password_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package password diff --git a/utils/perms/chmod.go b/utils/perms/chmod.go index 5b4ff4a3b02c..a5a8710b97b8 100644 --- a/utils/perms/chmod.go +++ b/utils/perms/chmod.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package perms diff --git a/utils/perms/create.go b/utils/perms/create.go index 8d91baea0683..123637e13443 100644 --- a/utils/perms/create.go +++ b/utils/perms/create.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package perms diff --git a/utils/perms/perms.go b/utils/perms/perms.go index e89dcc949984..0bb633d900f7 100644 --- a/utils/perms/perms.go +++ b/utils/perms/perms.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package perms diff --git a/utils/perms/write_file.go b/utils/perms/write_file.go index 4671c8bc66fe..f716459ab678 100644 --- a/utils/perms/write_file.go +++ b/utils/perms/write_file.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package perms diff --git a/utils/profiler/continuous.go b/utils/profiler/continuous.go index 548e88779b22..1f698395fd25 100644 --- a/utils/profiler/continuous.go +++ b/utils/profiler/continuous.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package profiler @@ -37,7 +37,7 @@ type continuousProfiler struct { func NewContinuous(dir string, freq time.Duration, maxNumFiles int) ContinuousProfiler { return &continuousProfiler{ - profiler: new(dir), + profiler: newProfiler(dir), freq: freq, maxNumFiles: maxNumFiles, closer: make(chan struct{}), diff --git a/utils/profiler/profiler.go b/utils/profiler/profiler.go index c35606e7df91..00a540cc9766 100644 --- a/utils/profiler/profiler.go +++ b/utils/profiler/profiler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package profiler @@ -23,6 +23,8 @@ const ( ) var ( + _ Profiler = (*profiler)(nil) + errCPUProfilerRunning = errors.New("cpu profiler already running") errCPUProfilerNotRunning = errors.New("cpu profiler doesn't exist") ) @@ -53,10 +55,10 @@ type profiler struct { } func New(dir string) Profiler { - return new(dir) + return newProfiler(dir) } -func new(dir string) *profiler { +func newProfiler(dir string) *profiler { return &profiler{ dir: dir, cpuProfileName: filepath.Join(dir, cpuProfileFile), diff --git a/utils/profiler/profiler_test.go b/utils/profiler/profiler_test.go index 2d7d864aca62..17ae695e38c2 100644 --- a/utils/profiler/profiler_test.go +++ b/utils/profiler/profiler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package profiler diff --git a/utils/resource/metrics.go b/utils/resource/metrics.go index e20458c42fb1..3ce87ade258c 100644 --- a/utils/resource/metrics.go +++ b/utils/resource/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package resource diff --git a/utils/resource/mock_user.go b/utils/resource/mock_user.go index 64b156ac1c1f..d333f2c58e4b 100644 --- a/utils/resource/mock_user.go +++ b/utils/resource/mock_user.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/utils/resource (interfaces: User) +// +// Generated by this command: +// +// mockgen -package=resource -destination=utils/resource/mock_user.go github.com/ava-labs/avalanchego/utils/resource User +// // Package resource is a generated GoMock package. package resource diff --git a/utils/resource/no_usage.go b/utils/resource/no_usage.go index 8a10d11ced2a..baa42437fc11 100644 --- a/utils/resource/no_usage.go +++ b/utils/resource/no_usage.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package resource diff --git a/utils/resource/usage.go b/utils/resource/usage.go index d5a06d990c85..1eedbee04c14 100644 --- a/utils/resource/usage.go +++ b/utils/resource/usage.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package resource diff --git a/utils/resource/usage_test.go b/utils/resource/usage_test.go index 5c1df7814f6a..b0ee74ec07a1 100644 --- a/utils/resource/usage_test.go +++ b/utils/resource/usage_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package resource diff --git a/utils/rpc/json.go b/utils/rpc/json.go index 72ddb3aac8a7..9b87661ea529 100644 --- a/utils/rpc/json.go +++ b/utils/rpc/json.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpc diff --git a/utils/rpc/options.go b/utils/rpc/options.go index ce79bc25920e..79c32c72b152 100644 --- a/utils/rpc/options.go +++ b/utils/rpc/options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpc diff --git a/utils/rpc/requester.go b/utils/rpc/requester.go index 49f3ffa0ef6d..6f2e312f66da 100644 --- a/utils/rpc/requester.go +++ b/utils/rpc/requester.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpc diff --git a/utils/sampler/rand.go b/utils/sampler/rand.go index 7ec14e6b275a..ce62d4a90ffc 100644 --- a/utils/sampler/rand.go +++ b/utils/sampler/rand.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -21,26 +21,15 @@ func newRNG() *rng { return &rng{rng: source} } -func Seed(seed int64) { - globalRNG.Seed(seed) -} - -type source interface { - Seed(uint64) - Uint64() uint64 -} - type rng struct { lock sync.Mutex - rng source + rng Source } -// Seed uses the provided seed value to initialize the generator to a -// deterministic state. -func (r *rng) Seed(seed int64) { - r.lock.Lock() - r.rng.Seed(uint64(seed)) - r.lock.Unlock() +type Source interface { + // Uint64 returns a random number in [0, MaxUint64] and advances the + // generator's state. + Uint64() uint64 } // Uint64Inclusive returns a pseudo-random number in [0,n]. diff --git a/utils/sampler/rand_test.go b/utils/sampler/rand_test.go index 362093a695ac..febffa60a4ec 100644 --- a/utils/sampler/rand_test.go +++ b/utils/sampler/rand_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -12,6 +12,8 @@ import ( "github.com/stretchr/testify/require" "github.com/thepudds/fzgen/fuzzer" + + "gonum.org/v1/gonum/mathext/prng" ) type testSource struct { @@ -208,3 +210,21 @@ func FuzzRNG(f *testing.F) { require.Len(stdSource.nums, len(source.nums)) }) } + +func BenchmarkSeed32(b *testing.B) { + source := prng.NewMT19937() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + source.Seed(0) + } +} + +func BenchmarkSeed64(b *testing.B) { + source := prng.NewMT19937_64() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + source.Seed(0) + } +} diff --git a/utils/sampler/uniform.go b/utils/sampler/uniform.go index 65c97f40b1a8..5ae9a21d8822 100644 --- a/utils/sampler/uniform.go +++ b/utils/sampler/uniform.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -11,14 +11,22 @@ type Uniform interface { // negative the implementation may panic. Sample(length int) ([]uint64, error) - Seed(int64) - ClearSeed() - Reset() Next() (uint64, error) } // NewUniform returns a new sampler func NewUniform() Uniform { - return &uniformReplacer{} + return &uniformReplacer{ + rng: globalRNG, + } +} + +// NewDeterministicUniform returns a new sampler +func NewDeterministicUniform(source Source) Uniform { + return &uniformReplacer{ + rng: &rng{ + rng: source, + }, + } } diff --git a/utils/sampler/uniform_benchmark_test.go b/utils/sampler/uniform_benchmark_test.go index 51d180b33db9..915fe45cc749 100644 --- a/utils/sampler/uniform_benchmark_test.go +++ b/utils/sampler/uniform_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler diff --git a/utils/sampler/uniform_best.go b/utils/sampler/uniform_best.go index 9ce1ed7f7187..21f7870d5bdc 100644 --- a/utils/sampler/uniform_best.go +++ b/utils/sampler/uniform_best.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -29,8 +29,12 @@ type uniformBest struct { func NewBestUniform(expectedSampleSize int) Uniform { return &uniformBest{ samplers: []Uniform{ - &uniformReplacer{}, - &uniformResample{}, + &uniformReplacer{ + rng: globalRNG, + }, + &uniformResample{ + rng: globalRNG, + }, }, maxSampleSize: expectedSampleSize, benchmarkIterations: 100, diff --git a/utils/sampler/uniform_replacer.go b/utils/sampler/uniform_replacer.go index 9ac1811c4b47..98d3e5acbe0d 100644 --- a/utils/sampler/uniform_replacer.go +++ b/utils/sampler/uniform_replacer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -27,15 +27,12 @@ func (m defaultMap) get(key uint64, defaultVal uint64) uint64 { // Sampling is performed in O(count) time and O(count) space. type uniformReplacer struct { rng *rng - seededRNG *rng length uint64 drawn defaultMap drawsCount uint64 } func (s *uniformReplacer) Initialize(length uint64) { - s.rng = globalRNG - s.seededRNG = newRNG() s.length = length s.drawn = make(defaultMap) s.drawsCount = 0 @@ -55,15 +52,6 @@ func (s *uniformReplacer) Sample(count int) ([]uint64, error) { return results, nil } -func (s *uniformReplacer) Seed(seed int64) { - s.rng = s.seededRNG - s.rng.Seed(seed) -} - -func (s *uniformReplacer) ClearSeed() { - s.rng = globalRNG -} - func (s *uniformReplacer) Reset() { maps.Clear(s.drawn) s.drawsCount = 0 diff --git a/utils/sampler/uniform_resample.go b/utils/sampler/uniform_resample.go index 8f09e95f777c..1e48d51fa421 100644 --- a/utils/sampler/uniform_resample.go +++ b/utils/sampler/uniform_resample.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -15,15 +15,12 @@ import "golang.org/x/exp/maps" // // Sampling is performed in O(count) time and O(count) space. type uniformResample struct { - rng *rng - seededRNG *rng - length uint64 - drawn map[uint64]struct{} + rng *rng + length uint64 + drawn map[uint64]struct{} } func (s *uniformResample) Initialize(length uint64) { - s.rng = globalRNG - s.seededRNG = newRNG() s.length = length s.drawn = make(map[uint64]struct{}) } @@ -42,15 +39,6 @@ func (s *uniformResample) Sample(count int) ([]uint64, error) { return results, nil } -func (s *uniformResample) Seed(seed int64) { - s.rng = s.seededRNG - s.rng.Seed(seed) -} - -func (s *uniformResample) ClearSeed() { - s.rng = globalRNG -} - func (s *uniformResample) Reset() { maps.Clear(s.drawn) } diff --git a/utils/sampler/uniform_test.go b/utils/sampler/uniform_test.go index e5b00af31c26..451a26625424 100644 --- a/utils/sampler/uniform_test.go +++ b/utils/sampler/uniform_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -19,12 +19,16 @@ var ( sampler Uniform }{ { - name: "replacer", - sampler: &uniformReplacer{}, + name: "replacer", + sampler: &uniformReplacer{ + rng: globalRNG, + }, }, { - name: "resampler", - sampler: &uniformResample{}, + name: "resampler", + sampler: &uniformResample{ + rng: globalRNG, + }, }, { name: "best", @@ -156,53 +160,3 @@ func UniformLazilySample(t *testing.T, s Uniform) { s.Reset() } } - -func TestSeeding(t *testing.T) { - require := require.New(t) - - s1 := NewBestUniform(30) - s2 := NewBestUniform(30) - - s1.Initialize(50) - s2.Initialize(50) - - s1.Seed(0) - - s1.Reset() - s1Val, err := s1.Next() - require.NoError(err) - - s2.Seed(1) - s2.Reset() - - s1.Seed(0) - v, err := s2.Next() - require.NoError(err) - require.NotEqual(s1Val, v) - - s1.ClearSeed() - - _, err = s1.Next() - require.NoError(err) -} - -func TestSeedingProducesTheSame(t *testing.T) { - require := require.New(t) - - s := NewBestUniform(30) - - s.Initialize(50) - - s.Seed(0) - s.Reset() - - val0, err := s.Next() - require.NoError(err) - - s.Seed(0) - s.Reset() - - val1, err := s.Next() - require.NoError(err) - require.Equal(val0, val1) -} diff --git a/utils/sampler/weighted.go b/utils/sampler/weighted.go index 69212d4351c2..2296da08e97a 100644 --- a/utils/sampler/weighted.go +++ b/utils/sampler/weighted.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler diff --git a/utils/sampler/weighted_array.go b/utils/sampler/weighted_array.go index 0db1dda17af9..e13788422b85 100644 --- a/utils/sampler/weighted_array.go +++ b/utils/sampler/weighted_array.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -19,8 +19,8 @@ type weightedArrayElement struct { } // Note that this sorts in order of decreasing weight. -func (e weightedArrayElement) Less(other weightedArrayElement) bool { - return e.cumulativeWeight > other.cumulativeWeight +func (e weightedArrayElement) Compare(other weightedArrayElement) int { + return utils.Compare(other.cumulativeWeight, e.cumulativeWeight) } // Sampling is performed by executing a modified binary search over the provided diff --git a/utils/sampler/weighted_array_test.go b/utils/sampler/weighted_array_test.go index e10583633436..866a0c7d2f2c 100644 --- a/utils/sampler/weighted_array_test.go +++ b/utils/sampler/weighted_array_test.go @@ -1,27 +1,42 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import ( + "fmt" "testing" "github.com/stretchr/testify/require" ) -func TestWeightedArrayElementLess(t *testing.T) { - require := require.New(t) - - var elt1, elt2 weightedArrayElement - require.False(elt1.Less(elt2)) - require.False(elt2.Less(elt1)) - - elt1 = weightedArrayElement{ - cumulativeWeight: 1, +func TestWeightedArrayElementCompare(t *testing.T) { + tests := []struct { + a weightedArrayElement + b weightedArrayElement + expected int + }{ + { + a: weightedArrayElement{}, + b: weightedArrayElement{}, + expected: 0, + }, + { + a: weightedArrayElement{ + cumulativeWeight: 1, + }, + b: weightedArrayElement{ + cumulativeWeight: 2, + }, + expected: 1, + }, } - elt2 = weightedArrayElement{ - cumulativeWeight: 2, + for _, test := range tests { + t.Run(fmt.Sprintf("%d_%d_%d", test.a.cumulativeWeight, test.b.cumulativeWeight, test.expected), func(t *testing.T) { + require := require.New(t) + + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) } - require.False(elt1.Less(elt2)) - require.True(elt2.Less(elt1)) } diff --git a/utils/sampler/weighted_benchmark_test.go b/utils/sampler/weighted_benchmark_test.go index 22c1a5f7def7..897e001935a1 100644 --- a/utils/sampler/weighted_benchmark_test.go +++ b/utils/sampler/weighted_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler diff --git a/utils/sampler/weighted_best.go b/utils/sampler/weighted_best.go index 276a9b475a9d..59bf60019144 100644 --- a/utils/sampler/weighted_best.go +++ b/utils/sampler/weighted_best.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler diff --git a/utils/sampler/weighted_heap.go b/utils/sampler/weighted_heap.go index 4b7fb84df482..866c23893fc2 100644 --- a/utils/sampler/weighted_heap.go +++ b/utils/sampler/weighted_heap.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -19,17 +19,16 @@ type weightedHeapElement struct { index int } -func (e weightedHeapElement) Less(other weightedHeapElement) bool { +// Compare the elements. Weight is in decreasing order. Index is in increasing +// order. +func (e weightedHeapElement) Compare(other weightedHeapElement) int { // By accounting for the initial index of the weights, this results in a // stable sort. We do this rather than using `sort.Stable` because of the // reported change in performance of the sort used. - if e.weight > other.weight { - return true + if weightCmp := utils.Compare(other.weight, e.weight); weightCmp != 0 { + return weightCmp } - if e.weight < other.weight { - return false - } - return e.index < other.index + return utils.Compare(e.index, other.index) } // Sampling is performed by executing a search over a tree of elements in the diff --git a/utils/sampler/weighted_heap_test.go b/utils/sampler/weighted_heap_test.go index 3187c14fa10a..03aa94dfa2f9 100644 --- a/utils/sampler/weighted_heap_test.go +++ b/utils/sampler/weighted_heap_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -23,57 +23,44 @@ func TestWeightedHeapInitialize(t *testing.T) { } } -func TestWeightedHeapElementLess(t *testing.T) { +func TestWeightedHeapElementCompare(t *testing.T) { type test struct { name string elt1 weightedHeapElement elt2 weightedHeapElement - expected bool + expected int } tests := []test{ { name: "all same", elt1: weightedHeapElement{}, elt2: weightedHeapElement{}, - expected: false, + expected: 0, }, { - name: "first lower weight", + name: "lower weight", elt1: weightedHeapElement{}, elt2: weightedHeapElement{ weight: 1, }, - expected: false, + expected: 1, }, { - name: "first higher weight", - elt1: weightedHeapElement{ - weight: 1, - }, - elt2: weightedHeapElement{}, - expected: true, - }, - { - name: "first higher index", + name: "higher index", elt1: weightedHeapElement{ index: 1, }, elt2: weightedHeapElement{}, - expected: false, - }, - { - name: "second higher index", - elt1: weightedHeapElement{}, - elt2: weightedHeapElement{ - index: 1, - }, - expected: true, + expected: 1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.expected, tt.elt1.Less(tt.elt2)) + require := require.New(t) + + require.Equal(tt.expected, tt.elt1.Compare(tt.elt2)) + require.Equal(-tt.expected, tt.elt2.Compare(tt.elt1)) }) } } diff --git a/utils/sampler/weighted_linear.go b/utils/sampler/weighted_linear.go index a5d0e3b16711..496613aea6dc 100644 --- a/utils/sampler/weighted_linear.go +++ b/utils/sampler/weighted_linear.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -19,8 +19,8 @@ type weightedLinearElement struct { } // Note that this sorts in order of decreasing cumulative weight. -func (e weightedLinearElement) Less(other weightedLinearElement) bool { - return e.cumulativeWeight > other.cumulativeWeight +func (e weightedLinearElement) Compare(other weightedLinearElement) int { + return utils.Compare(other.cumulativeWeight, e.cumulativeWeight) } // Sampling is performed by executing a linear search over the provided elements diff --git a/utils/sampler/weighted_linear_test.go b/utils/sampler/weighted_linear_test.go index b34035017b4a..dd86679de627 100644 --- a/utils/sampler/weighted_linear_test.go +++ b/utils/sampler/weighted_linear_test.go @@ -1,27 +1,42 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import ( + "fmt" "testing" "github.com/stretchr/testify/require" ) -func TestWeightedLinearElementLess(t *testing.T) { - require := require.New(t) - - var elt1, elt2 weightedLinearElement - require.False(elt1.Less(elt2)) - require.False(elt2.Less(elt1)) - - elt1 = weightedLinearElement{ - cumulativeWeight: 1, +func TestWeightedLinearElementCompare(t *testing.T) { + tests := []struct { + a weightedLinearElement + b weightedLinearElement + expected int + }{ + { + a: weightedLinearElement{}, + b: weightedLinearElement{}, + expected: 0, + }, + { + a: weightedLinearElement{ + cumulativeWeight: 1, + }, + b: weightedLinearElement{ + cumulativeWeight: 2, + }, + expected: 1, + }, } - elt2 = weightedLinearElement{ - cumulativeWeight: 2, + for _, test := range tests { + t.Run(fmt.Sprintf("%d_%d_%d", test.a.cumulativeWeight, test.b.cumulativeWeight, test.expected), func(t *testing.T) { + require := require.New(t) + + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) } - require.False(elt1.Less(elt2)) - require.True(elt2.Less(elt1)) } diff --git a/utils/sampler/weighted_test.go b/utils/sampler/weighted_test.go index d67d86251c69..ea08230d175a 100644 --- a/utils/sampler/weighted_test.go +++ b/utils/sampler/weighted_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler diff --git a/utils/sampler/weighted_uniform.go b/utils/sampler/weighted_uniform.go index bff76ead34e4..22dbb6b5ebd5 100644 --- a/utils/sampler/weighted_uniform.go +++ b/utils/sampler/weighted_uniform.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler diff --git a/utils/sampler/weighted_without_replacement.go b/utils/sampler/weighted_without_replacement.go index a6039a65d82c..a6b619928742 100644 --- a/utils/sampler/weighted_without_replacement.go +++ b/utils/sampler/weighted_without_replacement.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -9,15 +9,12 @@ package sampler type WeightedWithoutReplacement interface { Initialize(weights []uint64) error Sample(count int) ([]int, error) - - Seed(int64) - ClearSeed() } // NewWeightedWithoutReplacement returns a new sampler -func NewDeterministicWeightedWithoutReplacement() WeightedWithoutReplacement { +func NewDeterministicWeightedWithoutReplacement(source Source) WeightedWithoutReplacement { return &weightedWithoutReplacementGeneric{ - u: NewUniform(), + u: NewDeterministicUniform(source), w: NewDeterministicWeighted(), } } diff --git a/utils/sampler/weighted_without_replacement_benchmark_test.go b/utils/sampler/weighted_without_replacement_benchmark_test.go index 03459a5e757b..58becf9d2311 100644 --- a/utils/sampler/weighted_without_replacement_benchmark_test.go +++ b/utils/sampler/weighted_without_replacement_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler diff --git a/utils/sampler/weighted_without_replacement_generic.go b/utils/sampler/weighted_without_replacement_generic.go index 17bd8a648185..c45d64d0b2b0 100644 --- a/utils/sampler/weighted_without_replacement_generic.go +++ b/utils/sampler/weighted_without_replacement_generic.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -41,11 +41,3 @@ func (s *weightedWithoutReplacementGeneric) Sample(count int) ([]int, error) { } return indices, nil } - -func (s *weightedWithoutReplacementGeneric) Seed(seed int64) { - s.u.Seed(seed) -} - -func (s *weightedWithoutReplacementGeneric) ClearSeed() { - s.u.ClearSeed() -} diff --git a/utils/sampler/weighted_without_replacement_test.go b/utils/sampler/weighted_without_replacement_test.go index fe2dadaeb5c6..48952d5cac14 100644 --- a/utils/sampler/weighted_without_replacement_test.go +++ b/utils/sampler/weighted_without_replacement_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -23,7 +23,9 @@ var ( { name: "generic with replacer and best", sampler: &weightedWithoutReplacementGeneric{ - u: &uniformReplacer{}, + u: &uniformReplacer{ + rng: globalRNG, + }, w: &weightedBest{ samplers: []Weighted{ &weightedArray{}, diff --git a/utils/set/bits.go b/utils/set/bits.go index 344c8dff6781..a6e74fb6c18e 100644 --- a/utils/set/bits.go +++ b/utils/set/bits.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set diff --git a/utils/set/bits_64.go b/utils/set/bits_64.go index eed00afdedc6..d67c405ac922 100644 --- a/utils/set/bits_64.go +++ b/utils/set/bits_64.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set diff --git a/utils/set/bits_64_test.go b/utils/set/bits_64_test.go index 87374b4f297a..b31bf9979730 100644 --- a/utils/set/bits_64_test.go +++ b/utils/set/bits_64_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set diff --git a/utils/set/bits_test.go b/utils/set/bits_test.go index 5541395a5aa4..12efb343acd6 100644 --- a/utils/set/bits_test.go +++ b/utils/set/bits_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set diff --git a/utils/set/sampleable_set.go b/utils/set/sampleable_set.go index 0d22f40fbf8a..becd228fac43 100644 --- a/utils/set/sampleable_set.go +++ b/utils/set/sampleable_set.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set @@ -176,7 +176,7 @@ func (s *SampleableSet[_]) MarshalJSON() ([]byte, error) { } } // Sort for determinism - utils.SortBytes(elementBytes) + slices.SortFunc(elementBytes, bytes.Compare) // Build the JSON var ( diff --git a/utils/set/sampleable_set_test.go b/utils/set/sampleable_set_test.go index 5f19c13db9fe..31bd07712db5 100644 --- a/utils/set/sampleable_set_test.go +++ b/utils/set/sampleable_set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set diff --git a/utils/set/set.go b/utils/set/set.go index 29eb8fe11bd4..84cb5d46cd95 100644 --- a/utils/set/set.go +++ b/utils/set/set.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set @@ -9,6 +9,7 @@ import ( stdjson "encoding/json" "golang.org/x/exp/maps" + "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/json" @@ -118,27 +119,6 @@ func (s Set[T]) List() []T { return maps.Keys(s) } -// CappedList returns a list of length at most [size]. -// Size should be >= 0. If size < 0, returns nil. -func (s Set[T]) CappedList(size int) []T { - if size < 0 { - return nil - } - if l := s.Len(); l < size { - size = l - } - i := 0 - elts := make([]T, size) - for elt := range s { - if i >= size { - break - } - elts[i] = elt - i++ - } - return elts -} - // Equals returns true if the sets contain the same elements func (s Set[T]) Equals(other Set[T]) bool { return maps.Equal(s, other) @@ -182,7 +162,7 @@ func (s Set[_]) MarshalJSON() ([]byte, error) { i++ } // Sort for determinism - utils.SortBytes(eltBytes) + slices.SortFunc(eltBytes, bytes.Compare) // Build the JSON var ( @@ -205,7 +185,7 @@ func (s Set[_]) MarshalJSON() ([]byte, error) { return jsonBuf.Bytes(), errs.Err } -// Returns an element. If the set is empty, returns false +// Returns a random element. If the set is empty, returns false func (s *Set[T]) Peek() (T, bool) { for elt := range *s { return elt, true diff --git a/utils/set/set_benchmark_test.go b/utils/set/set_benchmark_test.go index c762b72cba01..300b8c8c0a71 100644 --- a/utils/set/set_benchmark_test.go +++ b/utils/set/set_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set diff --git a/utils/set/set_test.go b/utils/set/set_test.go index bcba36944adf..3b0a7e1822f8 100644 --- a/utils/set/set_test.go +++ b/utils/set/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set @@ -87,35 +87,6 @@ func TestOf(t *testing.T) { } } -func TestSetCappedList(t *testing.T) { - require := require.New(t) - s := Set[int]{} - - id := 0 - - require.Empty(s.CappedList(0)) - - s.Add(id) - - require.Empty(s.CappedList(0)) - require.Len(s.CappedList(1), 1) - require.Equal(s.CappedList(1)[0], id) - require.Len(s.CappedList(2), 1) - require.Equal(s.CappedList(2)[0], id) - - id2 := 1 - s.Add(id2) - - require.Empty(s.CappedList(0)) - require.Len(s.CappedList(1), 1) - require.Len(s.CappedList(2), 2) - require.Len(s.CappedList(3), 2) - gotList := s.CappedList(2) - require.Contains(gotList, id) - require.Contains(gotList, id2) - require.NotEqual(gotList[0], gotList[1]) -} - func TestSetClear(t *testing.T) { require := require.New(t) diff --git a/utils/setmap/setmap.go b/utils/setmap/setmap.go new file mode 100644 index 000000000000..a2924894dac5 --- /dev/null +++ b/utils/setmap/setmap.go @@ -0,0 +1,138 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package setmap + +import ( + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/set" +) + +type Entry[K any, V comparable] struct { + Key K + Set set.Set[V] +} + +// SetMap is a map to a set where all sets are non-overlapping. +type SetMap[K, V comparable] struct { + keyToSet map[K]set.Set[V] + valueToKey map[V]K +} + +// New creates a new empty setmap. +func New[K, V comparable]() *SetMap[K, V] { + return &SetMap[K, V]{ + keyToSet: make(map[K]set.Set[V]), + valueToKey: make(map[V]K), + } +} + +// Put the new entry into the map. Removes and returns: +// * The existing entry for [key]. +// * Existing entries where the set overlaps with the [set]. +func (m *SetMap[K, V]) Put(key K, set set.Set[V]) []Entry[K, V] { + removed := m.DeleteOverlapping(set) + if removedSet, ok := m.DeleteKey(key); ok { + removed = append(removed, Entry[K, V]{ + Key: key, + Set: removedSet, + }) + } + + m.keyToSet[key] = set + for val := range set { + m.valueToKey[val] = key + } + return removed +} + +// GetKey that maps to the provided value. +func (m *SetMap[K, V]) GetKey(val V) (K, bool) { + key, ok := m.valueToKey[val] + return key, ok +} + +// GetSet that is mapped to by the provided key. +func (m *SetMap[K, V]) GetSet(key K) (set.Set[V], bool) { + val, ok := m.keyToSet[key] + return val, ok +} + +// HasKey returns true if [key] is in the map. +func (m *SetMap[K, _]) HasKey(key K) bool { + _, ok := m.keyToSet[key] + return ok +} + +// HasValue returns true if [val] is in a set in the map. +func (m *SetMap[_, V]) HasValue(val V) bool { + _, ok := m.valueToKey[val] + return ok +} + +// HasOverlap returns true if [set] overlaps with any of the sets in the map. +func (m *SetMap[_, V]) HasOverlap(set set.Set[V]) bool { + if set.Len() < len(m.valueToKey) { + for val := range set { + if _, ok := m.valueToKey[val]; ok { + return true + } + } + } else { + for val := range m.valueToKey { + if set.Contains(val) { + return true + } + } + } + return false +} + +// DeleteKey removes [key] from the map and returns the set it mapped to. +func (m *SetMap[K, V]) DeleteKey(key K) (set.Set[V], bool) { + set, ok := m.keyToSet[key] + if !ok { + return nil, false + } + + delete(m.keyToSet, key) + for val := range set { + delete(m.valueToKey, val) + } + return set, true +} + +// DeleteValue removes and returns the entry that contained [val]. +func (m *SetMap[K, V]) DeleteValue(val V) (K, set.Set[V], bool) { + key, ok := m.valueToKey[val] + if !ok { + return utils.Zero[K](), nil, false + } + set, _ := m.DeleteKey(key) + return key, set, true +} + +// DeleteOverlapping removes and returns all the entries where the set overlaps +// with [set]. +func (m *SetMap[K, V]) DeleteOverlapping(set set.Set[V]) []Entry[K, V] { + var removed []Entry[K, V] + for val := range set { + if k, removedSet, ok := m.DeleteValue(val); ok { + removed = append(removed, Entry[K, V]{ + Key: k, + Set: removedSet, + }) + } + } + return removed +} + +// Len return the number of sets in the map. +func (m *SetMap[K, V]) Len() int { + return len(m.keyToSet) +} + +// LenValues return the total number of values across all sets in the map. +func (m *SetMap[K, V]) LenValues() int { + return len(m.valueToKey) +} diff --git a/utils/setmap/setmap_test.go b/utils/setmap/setmap_test.go new file mode 100644 index 000000000000..f3e70985451f --- /dev/null +++ b/utils/setmap/setmap_test.go @@ -0,0 +1,450 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package setmap + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/set" +) + +func TestSetMapPut(t *testing.T) { + tests := []struct { + name string + state *SetMap[int, int] + key int + value set.Set[int] + expectedRemoved []Entry[int, int] + expectedState *SetMap[int, int] + }{ + { + name: "none removed", + state: New[int, int](), + key: 1, + value: set.Of(2), + expectedRemoved: nil, + expectedState: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2), + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + }, + { + name: "key removed", + state: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2), + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + key: 1, + value: set.Of(3), + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Set: set.Of(2), + }, + }, + expectedState: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(3), + }, + valueToKey: map[int]int{ + 3: 1, + }, + }, + }, + { + name: "value removed", + state: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2), + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + key: 3, + value: set.Of(2), + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Set: set.Of(2), + }, + }, + expectedState: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 3: set.Of(2), + }, + valueToKey: map[int]int{ + 2: 3, + }, + }, + }, + { + name: "key and value removed", + state: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2), + 3: set.Of(4), + }, + valueToKey: map[int]int{ + 2: 1, + 4: 3, + }, + }, + key: 1, + value: set.Of(4), + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Set: set.Of(2), + }, + { + Key: 3, + Set: set.Of(4), + }, + }, + expectedState: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(4), + }, + valueToKey: map[int]int{ + 4: 1, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + removed := test.state.Put(test.key, test.value) + require.ElementsMatch(test.expectedRemoved, removed) + require.Equal(test.expectedState, test.state) + }) + } +} + +func TestSetMapHasValueAndGetKeyAndSetOverlaps(t *testing.T) { + m := New[int, int]() + require.Empty(t, m.Put(1, set.Of(2))) + + tests := []struct { + name string + value int + expectedKey int + expectedExists bool + }{ + { + name: "fetch unknown", + value: 3, + expectedKey: 0, + expectedExists: false, + }, + { + name: "fetch known value", + value: 2, + expectedKey: 1, + expectedExists: true, + }, + { + name: "fetch known key", + value: 1, + expectedKey: 0, + expectedExists: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + exists := m.HasValue(test.value) + require.Equal(test.expectedExists, exists) + + key, exists := m.GetKey(test.value) + require.Equal(test.expectedKey, key) + require.Equal(test.expectedExists, exists) + }) + } +} + +func TestSetMapHasOverlap(t *testing.T) { + m := New[int, int]() + require.Empty(t, m.Put(1, set.Of(2))) + require.Empty(t, m.Put(2, set.Of(3, 4))) + + tests := []struct { + name string + set set.Set[int] + expectedOverlaps bool + }{ + { + name: "small fetch unknown", + set: set.Of(5), + expectedOverlaps: false, + }, + { + name: "large fetch unknown", + set: set.Of(5, 6, 7, 8), + expectedOverlaps: false, + }, + { + name: "small fetch known", + set: set.Of(3), + expectedOverlaps: true, + }, + { + name: "large fetch known", + set: set.Of(3, 5, 6, 7, 8), + expectedOverlaps: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + overlaps := m.HasOverlap(test.set) + require.Equal(t, test.expectedOverlaps, overlaps) + }) + } +} + +func TestSetMapHasKeyAndGetSet(t *testing.T) { + m := New[int, int]() + require.Empty(t, m.Put(1, set.Of(2))) + + tests := []struct { + name string + key int + expectedValue set.Set[int] + expectedExists bool + }{ + { + name: "fetch unknown", + key: 3, + expectedValue: nil, + expectedExists: false, + }, + { + name: "fetch known key", + key: 1, + expectedValue: set.Of(2), + expectedExists: true, + }, + { + name: "fetch known value", + key: 2, + expectedValue: nil, + expectedExists: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + exists := m.HasKey(test.key) + require.Equal(test.expectedExists, exists) + + value, exists := m.GetSet(test.key) + require.Equal(test.expectedValue, value) + require.Equal(test.expectedExists, exists) + }) + } +} + +func TestSetMapDeleteKey(t *testing.T) { + tests := []struct { + name string + state *SetMap[int, int] + key int + expectedValue set.Set[int] + expectedRemoved bool + expectedState *SetMap[int, int] + }{ + { + name: "none removed", + state: New[int, int](), + key: 1, + expectedValue: nil, + expectedRemoved: false, + expectedState: New[int, int](), + }, + { + name: "key removed", + state: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2), + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + key: 1, + expectedValue: set.Of(2), + expectedRemoved: true, + expectedState: New[int, int](), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + value, removed := test.state.DeleteKey(test.key) + require.Equal(test.expectedValue, value) + require.Equal(test.expectedRemoved, removed) + require.Equal(test.expectedState, test.state) + }) + } +} + +func TestSetMapDeleteValue(t *testing.T) { + tests := []struct { + name string + state *SetMap[int, int] + value int + expectedKey int + expectedSet set.Set[int] + expectedRemoved bool + expectedState *SetMap[int, int] + }{ + { + name: "none removed", + state: New[int, int](), + value: 1, + expectedKey: 0, + expectedSet: nil, + expectedRemoved: false, + expectedState: New[int, int](), + }, + { + name: "key removed", + state: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2), + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + value: 2, + expectedKey: 1, + expectedSet: set.Of(2), + expectedRemoved: true, + expectedState: New[int, int](), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + key, set, removed := test.state.DeleteValue(test.value) + require.Equal(test.expectedKey, key) + require.Equal(test.expectedSet, set) + require.Equal(test.expectedRemoved, removed) + require.Equal(test.expectedState, test.state) + }) + } +} + +func TestSetMapDeleteOverlapping(t *testing.T) { + tests := []struct { + name string + state *SetMap[int, int] + set set.Set[int] + expectedRemoved []Entry[int, int] + expectedState *SetMap[int, int] + }{ + { + name: "none removed", + state: New[int, int](), + set: set.Of(1), + expectedRemoved: nil, + expectedState: New[int, int](), + }, + { + name: "key removed", + state: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2), + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + set: set.Of(2), + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Set: set.Of(2), + }, + }, + expectedState: New[int, int](), + }, + { + name: "multiple keys removed", + state: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2, 3), + 2: set.Of(4), + }, + valueToKey: map[int]int{ + 2: 1, + 3: 1, + 4: 2, + }, + }, + set: set.Of(2, 4), + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Set: set.Of(2, 3), + }, + { + Key: 2, + Set: set.Of(4), + }, + }, + expectedState: New[int, int](), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + removed := test.state.DeleteOverlapping(test.set) + require.ElementsMatch(test.expectedRemoved, removed) + require.Equal(test.expectedState, test.state) + }) + } +} + +func TestSetMapLen(t *testing.T) { + require := require.New(t) + + m := New[int, int]() + require.Zero(m.Len()) + require.Zero(m.LenValues()) + + m.Put(1, set.Of(2)) + require.Equal(1, m.Len()) + require.Equal(1, m.LenValues()) + + m.Put(2, set.Of(3, 4)) + require.Equal(2, m.Len()) + require.Equal(3, m.LenValues()) + + m.Put(1, set.Of(4, 5)) + require.Equal(1, m.Len()) + require.Equal(2, m.LenValues()) + + m.DeleteKey(1) + require.Zero(m.Len()) + require.Zero(m.LenValues()) +} diff --git a/utils/sorting.go b/utils/sorting.go index a448b8f5e7ee..070375811ee3 100644 --- a/utils/sorting.go +++ b/utils/sorting.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils @@ -12,32 +12,23 @@ import ( "github.com/ava-labs/avalanchego/utils/hashing" ) -// TODO can we handle sorting where the Less function relies on a codec? +// TODO can we handle sorting where the Compare function relies on a codec? type Sortable[T any] interface { - Less(T) bool + Compare(T) int } // Sorts the elements of [s]. func Sort[T Sortable[T]](s []T) { - slices.SortFunc(s, T.Less) + slices.SortFunc(s, T.Compare) } // Sorts the elements of [s] based on their hashes. func SortByHash[T ~[]byte](s []T) { - slices.SortFunc(s, func(i, j T) bool { + slices.SortFunc(s, func(i, j T) int { iHash := hashing.ComputeHash256(i) jHash := hashing.ComputeHash256(j) - return bytes.Compare(iHash, jHash) == -1 - }) -} - -// Sorts a 2D byte slice. -// Each byte slice is not sorted internally; the byte slices are sorted relative -// to one another. -func SortBytes[T ~[]byte](s []T) { - slices.SortFunc(s, func(i, j T) bool { - return bytes.Compare(i, j) == -1 + return bytes.Compare(iHash, jHash) }) } @@ -54,7 +45,7 @@ func IsSortedBytes[T ~[]byte](s []T) bool { // Returns true iff the elements in [s] are unique and sorted. func IsSortedAndUnique[T Sortable[T]](s []T) bool { for i := 0; i < len(s)-1; i++ { - if !s[i].Less(s[i+1]) { + if s[i].Compare(s[i+1]) >= 0 { return false } } @@ -88,15 +79,20 @@ func IsSortedAndUniqueByHash[T ~[]byte](s []T) bool { return true } -// Returns true iff the elements in [s] are unique. -func IsUnique[T comparable](s []T) bool { - // Can't use set.Set because it'd be a circular import. - asMap := make(map[T]struct{}, len(s)) - for _, elt := range s { - if _, ok := asMap[elt]; ok { - return false - } - asMap[elt] = struct{}{} +// Compare returns +// +// -1 if x is less than y, +// 0 if x equals y, +// 1 if x is greater than y. +// +// TODO: Remove after updating to go1.21. +func Compare[T constraints.Ordered](x, y T) int { + switch { + case x < y: + return -1 + case x > y: + return 1 + default: + return 0 } - return true } diff --git a/utils/sorting_test.go b/utils/sorting_test.go index 714fd7d87ec6..acab335034ed 100644 --- a/utils/sorting_test.go +++ b/utils/sorting_test.go @@ -1,12 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils import ( - "math/rand" "testing" - "time" "github.com/stretchr/testify/require" ) @@ -15,8 +13,8 @@ var _ Sortable[sortable] = sortable(0) type sortable int -func (s sortable) Less(other sortable) bool { - return s < other +func (s sortable) Compare(other sortable) int { + return Compare(s, other) } func TestSortSliceSortable(t *testing.T) { @@ -59,23 +57,6 @@ func TestSortSliceSortable(t *testing.T) { require.Equal([]sortable{1, 2, 3}, s) } -func TestSortBytesIsSortedBytes(t *testing.T) { - require := require.New(t) - - seed := time.Now().UnixNano() - t.Log("Seed: ", seed) - rand := rand.New(rand.NewSource(seed)) //#nosec G404 - - slices := make([][]byte, 1024) - for j := 0; j < len(slices); j++ { - slices[j] = make([]byte, 32) - _, _ = rand.Read(slices[j]) - } - require.False(IsSortedBytes(slices)) - SortBytes(slices) - require.True(IsSortedBytes(slices)) -} - func TestIsSortedAndUniqueSortable(t *testing.T) { require := require.New(t) @@ -104,31 +85,6 @@ func TestIsSortedAndUniqueSortable(t *testing.T) { require.False(IsSortedAndUnique(s)) } -func TestIsUnique(t *testing.T) { - require := require.New(t) - - var s []int - require.True(IsUnique(s)) - - s = []int{} - require.True(IsUnique(s)) - - s = []int{1} - require.True(IsUnique(s)) - - s = []int{1, 2} - require.True(IsUnique(s)) - - s = []int{1, 1} - require.False(IsUnique(s)) - - s = []int{2, 1} - require.True(IsUnique(s)) - - s = []int{1, 2, 1} - require.False(IsUnique(s)) -} - func TestSortByHash(t *testing.T) { require := require.New(t) diff --git a/utils/stacktrace.go b/utils/stacktrace.go index d68ee4ea69cf..d0e0de56dc9d 100644 --- a/utils/stacktrace.go +++ b/utils/stacktrace.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils diff --git a/utils/storage/storage_common.go b/utils/storage/storage_common.go index cf1fbd3b895f..6fa5692cb86a 100644 --- a/utils/storage/storage_common.go +++ b/utils/storage/storage_common.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package storage diff --git a/utils/storage/storage_unix.go b/utils/storage/storage_unix.go index ae75ed4e833f..247bc2448f36 100644 --- a/utils/storage/storage_unix.go +++ b/utils/storage/storage_unix.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build !windows diff --git a/utils/storage/storage_windows.go b/utils/storage/storage_windows.go index e9242e2d2f1e..2514717c8bea 100644 --- a/utils/storage/storage_windows.go +++ b/utils/storage/storage_windows.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build windows diff --git a/utils/timer/adaptive_timeout_manager.go b/utils/timer/adaptive_timeout_manager.go index a6d00654c064..493769018ba2 100644 --- a/utils/timer/adaptive_timeout_manager.go +++ b/utils/timer/adaptive_timeout_manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer diff --git a/utils/timer/adaptive_timeout_manager_test.go b/utils/timer/adaptive_timeout_manager_test.go index ec9964bd5a7f..40b4186011f4 100644 --- a/utils/timer/adaptive_timeout_manager_test.go +++ b/utils/timer/adaptive_timeout_manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer diff --git a/utils/timer/eta.go b/utils/timer/eta.go index 490574864087..6af353fd1883 100644 --- a/utils/timer/eta.go +++ b/utils/timer/eta.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer diff --git a/utils/timer/meter.go b/utils/timer/meter.go index c78376e1c62f..e0459e92ee2f 100644 --- a/utils/timer/meter.go +++ b/utils/timer/meter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer diff --git a/utils/timer/mockable/clock.go b/utils/timer/mockable/clock.go index 23857a22efac..753da957ec97 100644 --- a/utils/timer/mockable/clock.go +++ b/utils/timer/mockable/clock.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mockable diff --git a/utils/timer/mockable/clock_test.go b/utils/timer/mockable/clock_test.go index eee8922e9e58..a15e71efdcfb 100644 --- a/utils/timer/mockable/clock_test.go +++ b/utils/timer/mockable/clock_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mockable diff --git a/utils/timer/staged_timer.go b/utils/timer/staged_timer.go deleted file mode 100644 index eec885ee63d9..000000000000 --- a/utils/timer/staged_timer.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package timer - -import "time" - -// NewStagedTimer returns a timer that will execute [f] -// when a timeout occurs and execute an additional timeout after -// the returned duration if [f] returns true and some duration. -// -// Deprecated: NewStagedTimer exists for historical compatibility -// and should not be used. -func NewStagedTimer(f func() (time.Duration, bool)) *Timer { - t := NewTimer(nil) - t.handler = func() { - delay, repeat := f() - if repeat { - t.SetTimeoutIn(delay) - } - } - return t -} diff --git a/utils/timer/staged_timer_test.go b/utils/timer/staged_timer_test.go deleted file mode 100644 index bd83ef206078..000000000000 --- a/utils/timer/staged_timer_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package timer - -import ( - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestSingleStagedTimer(t *testing.T) { - wg := sync.WaitGroup{} - wg.Add(1) - ticks := 1 - i := 0 - timer := NewStagedTimer(func() (time.Duration, bool) { - defer wg.Done() - i++ - return 0, false - }) - go timer.Dispatch() - - timer.SetTimeoutIn(time.Millisecond) - wg.Wait() - require.Equal(t, i, ticks) -} - -func TestMultiStageTimer(t *testing.T) { - wg := sync.WaitGroup{} - ticks := 3 - wg.Add(ticks) - - i := 0 - timer := NewStagedTimer(func() (time.Duration, bool) { - defer wg.Done() - i++ - return time.Millisecond, i < ticks - }) - go timer.Dispatch() - - timer.SetTimeoutIn(time.Millisecond) - wg.Wait() - require.Equal(t, i, ticks) -} diff --git a/utils/timer/timer.go b/utils/timer/timer.go index 1b5914fefc76..b4d3453477d9 100644 --- a/utils/timer/timer.go +++ b/utils/timer/timer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer diff --git a/utils/timer/timer_test.go b/utils/timer/timer_test.go index 228b19f28d32..83994f963bf3 100644 --- a/utils/timer/timer_test.go +++ b/utils/timer/timer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer diff --git a/utils/ulimit/ulimit_bsd.go b/utils/ulimit/ulimit_bsd.go index 191b788286d2..bb4c5e150e6c 100644 --- a/utils/ulimit/ulimit_bsd.go +++ b/utils/ulimit/ulimit_bsd.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build freebsd @@ -10,6 +10,8 @@ import ( "fmt" "syscall" + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/utils/logging" ) diff --git a/utils/ulimit/ulimit_darwin.go b/utils/ulimit/ulimit_darwin.go index 9eaab72bd0f6..224d8faf056e 100644 --- a/utils/ulimit/ulimit_darwin.go +++ b/utils/ulimit/ulimit_darwin.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build darwin diff --git a/utils/ulimit/ulimit_unix.go b/utils/ulimit/ulimit_unix.go index 898b361cef92..8b23ab701b53 100644 --- a/utils/ulimit/ulimit_unix.go +++ b/utils/ulimit/ulimit_unix.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build linux || netbsd || openbsd diff --git a/utils/ulimit/ulimit_windows.go b/utils/ulimit/ulimit_windows.go index 7646d6f10d1f..82a88273735b 100644 --- a/utils/ulimit/ulimit_windows.go +++ b/utils/ulimit/ulimit_windows.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build windows diff --git a/utils/units/avax.go b/utils/units/avax.go index 341fd8bea8ad..bbd664928b33 100644 --- a/utils/units/avax.go +++ b/utils/units/avax.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package units diff --git a/utils/units/bytes.go b/utils/units/bytes.go index 93678e957a46..42d2526ae257 100644 --- a/utils/units/bytes.go +++ b/utils/units/bytes.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package units diff --git a/utils/window/window.go b/utils/window/window.go index 245941467983..86dba5b717df 100644 --- a/utils/window/window.go +++ b/utils/window/window.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package window diff --git a/utils/window/window_test.go b/utils/window/window_test.go index d8cf20f870bb..332d20b3b329 100644 --- a/utils/window/window_test.go +++ b/utils/window/window_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package window diff --git a/utils/wrappers/closers.go b/utils/wrappers/closers.go index d366e928cba0..b16e4baa2831 100644 --- a/utils/wrappers/closers.go +++ b/utils/wrappers/closers.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package wrappers diff --git a/utils/wrappers/errors.go b/utils/wrappers/errors.go index 641734da16c0..d887ffb4d20a 100644 --- a/utils/wrappers/errors.go +++ b/utils/wrappers/errors.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package wrappers diff --git a/utils/wrappers/packing.go b/utils/wrappers/packing.go index 221083095cd4..0ee7fd60e4ad 100644 --- a/utils/wrappers/packing.go +++ b/utils/wrappers/packing.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package wrappers diff --git a/utils/wrappers/packing_test.go b/utils/wrappers/packing_test.go index 2372e1ed3cfb..bb3e7fe61d38 100644 --- a/utils/wrappers/packing_test.go +++ b/utils/wrappers/packing_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package wrappers diff --git a/utils/zero.go b/utils/zero.go index 9a9173cf3403..c691ed2e653c 100644 --- a/utils/zero.go +++ b/utils/zero.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils diff --git a/version/application.go b/version/application.go index bc545e54fa4b..2be9d838a89e 100644 --- a/version/application.go +++ b/version/application.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version @@ -6,9 +6,11 @@ package version import ( "errors" "fmt" - "sync/atomic" + "sync" ) +const LegacyAppName = "avalanche" + var ( errDifferentMajor = errors.New("different major version") @@ -16,29 +18,30 @@ var ( ) type Application struct { - Major int `json:"major" yaml:"major"` - Minor int `json:"minor" yaml:"minor"` - Patch int `json:"patch" yaml:"patch"` + Name string `json:"name" yaml:"name"` + Major int `json:"major" yaml:"major"` + Minor int `json:"minor" yaml:"minor"` + Patch int `json:"patch" yaml:"patch"` - str atomic.Value + makeStrOnce sync.Once + str string } // The only difference here between Application and Semantic is that Application -// prepends "avalanche/" rather than "v". +// prepends the client name rather than "v". func (a *Application) String() string { - strIntf := a.str.Load() - if strIntf != nil { - return strIntf.(string) - } + a.makeStrOnce.Do(a.initString) + return a.str +} - str := fmt.Sprintf( - "avalanche/%d.%d.%d", +func (a *Application) initString() { + a.str = fmt.Sprintf( + "%s/%d.%d.%d", + a.Name, a.Major, a.Minor, a.Patch, ) - a.str.Store(str) - return str } func (a *Application) Compatible(o *Application) error { diff --git a/version/application_test.go b/version/application_test.go index 0423e91918e5..deade1816e22 100644 --- a/version/application_test.go +++ b/version/application_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version @@ -14,6 +14,7 @@ func TestNewDefaultApplication(t *testing.T) { require := require.New(t) v := &Application{ + Name: LegacyAppName, Major: 1, Minor: 2, Patch: 3, @@ -33,11 +34,13 @@ func TestComparingVersions(t *testing.T) { }{ { myVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, }, peerVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, @@ -47,11 +50,13 @@ func TestComparingVersions(t *testing.T) { }, { myVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 4, }, peerVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, @@ -61,11 +66,13 @@ func TestComparingVersions(t *testing.T) { }, { myVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, }, peerVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 4, @@ -75,11 +82,13 @@ func TestComparingVersions(t *testing.T) { }, { myVersion: &Application{ + Name: Client, Major: 1, Minor: 3, Patch: 3, }, peerVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, @@ -89,11 +98,13 @@ func TestComparingVersions(t *testing.T) { }, { myVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, }, peerVersion: &Application{ + Name: Client, Major: 1, Minor: 3, Patch: 3, @@ -103,11 +114,13 @@ func TestComparingVersions(t *testing.T) { }, { myVersion: &Application{ + Name: Client, Major: 2, Minor: 2, Patch: 3, }, peerVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, @@ -117,11 +130,13 @@ func TestComparingVersions(t *testing.T) { }, { myVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, }, peerVersion: &Application{ + Name: Client, Major: 2, Minor: 2, Patch: 3, diff --git a/version/compatibility.go b/version/compatibility.go index 9b1e8b05f657..89f71933b2ae 100644 --- a/version/compatibility.go +++ b/version/compatibility.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version diff --git a/version/compatibility.json b/version/compatibility.json index 6b226af6cd92..88be72adc7c0 100644 --- a/version/compatibility.json +++ b/version/compatibility.json @@ -1,5 +1,11 @@ { + "31": [ + "v1.1.19", + "v1.1.18" + ], "30": [ + "v1.1.17", + "v1.1.16", "v1.1.15" ], "29": [ diff --git a/version/compatibility_test.go b/version/compatibility_test.go index 6e9fbf311af4..50c5dd7b4ab8 100644 --- a/version/compatibility_test.go +++ b/version/compatibility_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version @@ -23,17 +23,20 @@ import ( func TestCompatibility(t *testing.T) { v := &Application{ + Name: Client, Major: 1, Minor: 4, Patch: 3, } minCompatable := &Application{ + Name: Client, Major: 1, Minor: 4, Patch: 0, } minCompatableTime := time.Unix(9000, 0) prevMinCompatable := &Application{ + Name: Client, Major: 1, Minor: 3, Patch: 0, @@ -54,6 +57,7 @@ func TestCompatibility(t *testing.T) { }{ { peer: &Application{ + Name: LegacyAppName, Major: 1, Minor: 5, Patch: 0, @@ -62,6 +66,16 @@ func TestCompatibility(t *testing.T) { }, { peer: &Application{ + Name: Client, + Major: 1, + Minor: 5, + Patch: 0, + }, + time: minCompatableTime, + }, + { + peer: &Application{ + Name: Client, Major: 1, Minor: 3, Patch: 5, @@ -70,6 +84,7 @@ func TestCompatibility(t *testing.T) { }, { peer: &Application{ + Name: Client, Major: 0, Minor: 1, Patch: 0, @@ -79,6 +94,7 @@ func TestCompatibility(t *testing.T) { }, { peer: &Application{ + Name: Client, Major: 1, Minor: 3, Patch: 5, @@ -88,6 +104,7 @@ func TestCompatibility(t *testing.T) { }, { peer: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 5, @@ -97,6 +114,7 @@ func TestCompatibility(t *testing.T) { }, { peer: &Application{ + Name: Client, Major: 1, Minor: 1, Patch: 5, diff --git a/version/constants.go b/version/constants.go index 7674cc8cecb2..d4c3bc5f3aec 100644 --- a/version/constants.go +++ b/version/constants.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version @@ -19,12 +19,17 @@ import ( _ "embed" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" ) -// RPCChainVMProtocol should be bumped anytime changes are made which require -// the plugin vm to upgrade to latest avalanchego release to be compatible. -const RPCChainVMProtocol uint = 30 +const ( + Client = "avalanchego" + // RPCChainVMProtocol should be bumped anytime changes are made which + // require the plugin vm to upgrade to latest avalanchego release to be + // compatible. + RPCChainVMProtocol uint = 31 +) // These are globals that describe network upgrades and node versions var ( @@ -35,19 +40,22 @@ var ( Current = &Semantic{ Major: 1, Minor: 1, - Patch: 15, + Patch: 19, } CurrentApp = &Application{ + Name: Client, Major: Current.Major, Minor: Current.Minor, Patch: Current.Patch, } MinimumCompatibleVersion = &Application{ + Name: Client, Major: 1, Minor: 1, Patch: 0, } PrevMinimumCompatibleVersion = &Application{ + Name: Client, Major: 1, Minor: 0, Patch: 0, @@ -76,6 +84,16 @@ var ( DefaultUpgradeTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) + ApricotPhase1Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2021, time.March, 31, 14, 0, 0, 0, time.UTC), + constants.FujiID: time.Date(2021, time.March, 26, 14, 0, 0, 0, time.UTC), + } + + ApricotPhase2Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2021, time.May, 10, 11, 0, 0, 0, time.UTC), + constants.FujiID: time.Date(2021, time.May, 5, 14, 0, 0, 0, time.UTC), + } + ApricotPhase3Times = map[uint32]time.Time{ constants.MainnetID: time.Date(2021, time.August, 24, 14, 0, 0, 0, time.UTC), constants.FujiID: time.Date(2021, time.August, 16, 19, 0, 0, 0, time.UTC), @@ -89,13 +107,17 @@ var ( constants.MainnetID: 793005, constants.FujiID: 47437, } - ApricotPhase4DefaultMinPChainHeight uint64 ApricotPhase5Times = map[uint32]time.Time{ constants.MainnetID: time.Date(2021, time.December, 2, 18, 0, 0, 0, time.UTC), constants.FujiID: time.Date(2021, time.November, 24, 15, 0, 0, 0, time.UTC), } + ApricotPhasePre6Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2022, time.September, 5, 1, 30, 0, 0, time.UTC), + constants.FujiID: time.Date(2022, time.September, 6, 20, 0, 0, 0, time.UTC), + } + SunrisePhase0Times = map[uint32]time.Time{} SunrisePhase0DefaultTime = time.Date(2022, time.May, 16, 8, 0, 0, 0, time.UTC) @@ -104,6 +126,11 @@ var ( constants.FujiID: time.Date(2022, time.September, 6, 20, 0, 0, 0, time.UTC), } + ApricotPhasePost6Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2022, time.September, 7, 3, 0, 0, 0, time.UTC), + constants.FujiID: time.Date(2022, time.September, 7, 6, 0, 0, 0, time.UTC), + } + BanffTimes = map[uint32]time.Time{ constants.MainnetID: time.Date(2022, time.October, 18, 16, 0, 0, 0, time.UTC), constants.FujiID: time.Date(2022, time.October, 3, 14, 0, 0, 0, time.UTC), @@ -129,9 +156,10 @@ var ( constants.MainnetID: time.Date(2023, time.April, 25, 15, 0, 0, 0, time.UTC), constants.FujiID: time.Date(2023, time.April, 6, 15, 0, 0, 0, time.UTC), } + CortinaXChainStopVertexID map[uint32]ids.ID // TODO: update this before release - DTimes = map[uint32]time.Time{ + DurangoTimes = map[uint32]time.Time{ constants.MainnetID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), constants.FujiID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), } @@ -156,6 +184,43 @@ func init() { } RPCChainVMProtocolCompatibility[rpcChainVMProtocol] = versions } + + // The mainnet stop vertex is well known. It can be verified on any fully + // synced node by looking at the parentID of the genesis block. + // + // Ref: https://subnets.avax.network/x-chain/block/0 + mainnetXChainStopVertexID, err := ids.FromString("jrGWDh5Po9FMj54depyunNixpia5PN4aAYxfmNzU8n752Rjga") + if err != nil { + panic(err) + } + + // The fuji stop vertex is well known. It can be verified on any fully + // synced node by looking at the parentID of the genesis block. + // + // Ref: https://subnets-test.avax.network/x-chain/block/0 + fujiXChainStopVertexID, err := ids.FromString("2D1cmbiG36BqQMRyHt4kFhWarmatA1ighSpND3FeFgz3vFVtCZ") + if err != nil { + panic(err) + } + + CortinaXChainStopVertexID = map[uint32]ids.ID{ + constants.MainnetID: mainnetXChainStopVertexID, + constants.FujiID: fujiXChainStopVertexID, + } +} + +func GetApricotPhase1Time(networkID uint32) time.Time { + if upgradeTime, exists := ApricotPhase1Times[networkID]; exists { + return upgradeTime + } + return DefaultUpgradeTime +} + +func GetApricotPhase2Time(networkID uint32) time.Time { + if upgradeTime, exists := ApricotPhase2Times[networkID]; exists { + return upgradeTime + } + return DefaultUpgradeTime } func GetApricotPhase3Time(networkID uint32) time.Time { @@ -172,15 +237,15 @@ func GetApricotPhase4Time(networkID uint32) time.Time { return DefaultUpgradeTime } -func GetApricotPhase4MinPChainHeight(networkID uint32) uint64 { - if minHeight, exists := ApricotPhase4MinPChainHeight[networkID]; exists { - return minHeight +func GetApricotPhase5Time(networkID uint32) time.Time { + if upgradeTime, exists := ApricotPhase5Times[networkID]; exists { + return upgradeTime } - return ApricotPhase4DefaultMinPChainHeight + return DefaultUpgradeTime } -func GetApricotPhase5Time(networkID uint32) time.Time { - if upgradeTime, exists := ApricotPhase5Times[networkID]; exists { +func GetApricotPhasePre6Time(networkID uint32) time.Time { + if upgradeTime, exists := ApricotPhasePre6Times[networkID]; exists { return upgradeTime } return DefaultUpgradeTime @@ -200,6 +265,13 @@ func GetApricotPhase6Time(networkID uint32) time.Time { return DefaultUpgradeTime } +func GetApricotPhasePost6Time(networkID uint32) time.Time { + if upgradeTime, exists := ApricotPhasePost6Times[networkID]; exists { + return upgradeTime + } + return DefaultUpgradeTime +} + func GetBanffTime(networkID uint32) time.Time { if upgradeTime, exists := BanffTimes[networkID]; exists { return upgradeTime @@ -228,8 +300,8 @@ func GetCortinaTime(networkID uint32) time.Time { return DefaultUpgradeTime } -func GetDTime(networkID uint32) time.Time { - if upgradeTime, exists := DTimes[networkID]; exists { +func GetDurangoTime(networkID uint32) time.Time { + if upgradeTime, exists := DurangoTimes[networkID]; exists { return upgradeTime } return DefaultUpgradeTime diff --git a/version/constants_test.go b/version/constants_test.go index 5e409dd91767..aea7ef493510 100644 --- a/version/constants_test.go +++ b/version/constants_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version diff --git a/version/parser.go b/version/parser.go index debb636a92cb..abc150450099 100644 --- a/version/parser.go +++ b/version/parser.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version @@ -34,18 +34,21 @@ func Parse(s string) (*Semantic, error) { }, nil } -func ParseApplication(s string) (*Application, error) { - if !strings.HasPrefix(s, "avalanche/") { +// TODO: Remove after v1.11.x is activated +func ParseLegacyApplication(s string) (*Application, error) { + prefix := fmt.Sprintf("%s/", LegacyAppName) + if !strings.HasPrefix(s, prefix) { return nil, fmt.Errorf("%w: %q", errMissingApplicationPrefix, s) } - s = s[10:] + s = s[len(prefix):] major, minor, patch, err := parseVersions(s) if err != nil { return nil, err } return &Application{ + Name: Client, // Convert the legacy name to the current client name Major: major, Minor: minor, Patch: patch, diff --git a/version/parser_test.go b/version/parser_test.go index 3bc8b5e30589..42adb764c9c2 100644 --- a/version/parser_test.go +++ b/version/parser_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version @@ -65,12 +65,13 @@ func TestParse(t *testing.T) { } } -func TestParseApplication(t *testing.T) { - v, err := ParseApplication("avalanche/1.2.3") +func TestParseLegacyApplication(t *testing.T) { + v, err := ParseLegacyApplication("avalanche/1.2.3") require.NoError(t, err) require.NotNil(t, v) - require.Equal(t, "avalanche/1.2.3", v.String()) + require.Equal(t, "avalanchego/1.2.3", v.String()) + require.Equal(t, "avalanchego", v.Name) require.Equal(t, 1, v.Major) require.Equal(t, 2, v.Minor) require.Equal(t, 3, v.Patch) @@ -85,6 +86,10 @@ func TestParseApplication(t *testing.T) { version: "", expectedErr: errMissingApplicationPrefix, }, + { + version: "avalanchego/v1.2.3", + expectedErr: errMissingApplicationPrefix, + }, { version: "avalanche/", expectedErr: errMissingVersions, @@ -108,7 +113,7 @@ func TestParseApplication(t *testing.T) { } for _, test := range tests { t.Run(test.version, func(t *testing.T) { - _, err := ParseApplication(test.version) + _, err := ParseLegacyApplication(test.version) require.ErrorIs(t, err, test.expectedErr) }) } diff --git a/version/string.go b/version/string.go index 1d8986dbd4ec..92303b682874 100644 --- a/version/string.go +++ b/version/string.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version diff --git a/version/version.go b/version/version.go index 81acdc42e77b..b8fe119b370c 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version @@ -28,7 +28,7 @@ type Semantic struct { } // The only difference here between Semantic and Application is that Semantic -// prepends "v" rather than "avalanche/". +// prepends "v" rather than the client name. func (s *Semantic) String() string { strIntf := s.str.Load() if strIntf != nil { diff --git a/version/version_test.go b/version/version_test.go index d66c1212958c..69c494c88650 100644 --- a/version/version_test.go +++ b/version/version_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version diff --git a/vms/avm/block/block.go b/vms/avm/block/block.go index 331c310274e1..376062c0387e 100644 --- a/vms/avm/block/block.go +++ b/vms/avm/block/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/vms/avm/block/block_test.go b/vms/avm/block/block_test.go index 568eef5f5851..6100f1d6d987 100644 --- a/vms/avm/block/block_test.go +++ b/vms/avm/block/block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -28,9 +28,12 @@ var ( func TestInvalidBlock(t *testing.T) { require := require.New(t) - parser, err := NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - }) + parser, err := NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) require.NoError(err) _, err = parser.ParseBlock(nil) @@ -41,9 +44,12 @@ func TestStandardBlocks(t *testing.T) { // check standard block can be built and parsed require := require.New(t) - parser, err := NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - }) + parser, err := NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) require.NoError(err) blkTimestamp := time.Now() diff --git a/vms/avm/block/builder/builder.go b/vms/avm/block/builder/builder.go index 77bbf38a6a35..80e734812a6c 100644 --- a/vms/avm/block/builder/builder.go +++ b/vms/avm/block/builder/builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/avm/block" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" @@ -82,7 +82,7 @@ func (b *builder) BuildBlock(context.Context) (snowman.Block, error) { nextTimestamp = preferredTimestamp } - stateDiff, err := states.NewDiff(preferredID, b.manager) + stateDiff, err := state.NewDiff(preferredID, b.manager) if err != nil { return nil, err } @@ -93,15 +93,19 @@ func (b *builder) BuildBlock(context.Context) (snowman.Block, error) { remainingSize = targetBlockSize ) for { - tx := b.mempool.Peek(remainingSize) - if tx == nil { + tx, exists := b.mempool.Peek() + // Invariant: [mempool.MaxTxSize] < [targetBlockSize]. This guarantees + // that we will only stop building a block once there are no + // transactions in the mempool or the block is at least + // [targetBlockSize - mempool.MaxTxSize] bytes full. + if !exists || len(tx.Bytes()) > remainingSize { break } - b.mempool.Remove([]*txs.Tx{tx}) + b.mempool.Remove(tx) // Invariant: [tx] has already been syntactically verified. - txDiff, err := wrapState(stateDiff) + txDiff, err := state.NewDiffOn(stateDiff) if err != nil { return nil, err } @@ -166,17 +170,3 @@ func (b *builder) BuildBlock(context.Context) (snowman.Block, error) { return b.manager.NewBlock(statelessBlk), nil } - -type stateGetter struct { - state states.Chain -} - -func (s stateGetter) GetState(ids.ID) (states.Chain, bool) { - return s.state, true -} - -func wrapState(parentState states.Chain) (states.Diff, error) { - return states.NewDiff(ids.Empty, stateGetter{ - state: parentState, - }) -} diff --git a/vms/avm/block/builder/builder_test.go b/vms/avm/block/builder/builder_test.go index fdab9d6cf064..185c93260eca 100644 --- a/vms/avm/block/builder/builder_test.go +++ b/vms/avm/block/builder/builder_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder @@ -29,7 +29,7 @@ import ( "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/metrics" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -108,7 +108,7 @@ func TestBuilderBuildBlock(t *testing.T) { mempool, ) }, - expectedErr: states.ErrMissingParentState, + expectedErr: state.ErrMissingParentState, }, { name: "tx fails semantic verification", @@ -120,7 +120,7 @@ func TestBuilderBuildBlock(t *testing.T) { preferredBlock.EXPECT().Height().Return(preferredHeight) preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) - preferredState := states.NewMockChain(ctrl) + preferredState := state.NewMockChain(ctrl) preferredState.EXPECT().GetLastAccepted().Return(preferredID) preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) @@ -134,11 +134,11 @@ func TestBuilderBuildBlock(t *testing.T) { tx := &txs.Tx{Unsigned: unsignedTx} mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Peek(gomock.Any()).Return(tx) + mempool.EXPECT().Peek().Return(tx, true) mempool.EXPECT().Remove([]*txs.Tx{tx}) mempool.EXPECT().MarkDropped(tx.ID(), errTest) // Second loop iteration - mempool.EXPECT().Peek(gomock.Any()).Return(nil) + mempool.EXPECT().Peek().Return(nil, false) mempool.EXPECT().RequestBuildBlock() return New( @@ -164,7 +164,7 @@ func TestBuilderBuildBlock(t *testing.T) { preferredBlock.EXPECT().Height().Return(preferredHeight) preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) - preferredState := states.NewMockChain(ctrl) + preferredState := state.NewMockChain(ctrl) preferredState.EXPECT().GetLastAccepted().Return(preferredID) preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) @@ -179,11 +179,11 @@ func TestBuilderBuildBlock(t *testing.T) { tx := &txs.Tx{Unsigned: unsignedTx} mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Peek(gomock.Any()).Return(tx) + mempool.EXPECT().Peek().Return(tx, true) mempool.EXPECT().Remove([]*txs.Tx{tx}) mempool.EXPECT().MarkDropped(tx.ID(), errTest) // Second loop iteration - mempool.EXPECT().Peek(gomock.Any()).Return(nil) + mempool.EXPECT().Peek().Return(nil, false) mempool.EXPECT().RequestBuildBlock() return New( @@ -209,7 +209,7 @@ func TestBuilderBuildBlock(t *testing.T) { preferredBlock.EXPECT().Height().Return(preferredHeight) preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) - preferredState := states.NewMockChain(ctrl) + preferredState := state.NewMockChain(ctrl) preferredState.EXPECT().GetLastAccepted().Return(preferredID) preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) @@ -225,11 +225,11 @@ func TestBuilderBuildBlock(t *testing.T) { tx := &txs.Tx{Unsigned: unsignedTx} mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Peek(gomock.Any()).Return(tx) + mempool.EXPECT().Peek().Return(tx, true) mempool.EXPECT().Remove([]*txs.Tx{tx}) mempool.EXPECT().MarkDropped(tx.ID(), errTest) // Second loop iteration - mempool.EXPECT().Peek(gomock.Any()).Return(nil) + mempool.EXPECT().Peek().Return(nil, false) mempool.EXPECT().RequestBuildBlock() return New( @@ -255,7 +255,7 @@ func TestBuilderBuildBlock(t *testing.T) { preferredBlock.EXPECT().Height().Return(preferredHeight) preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) - preferredState := states.NewMockChain(ctrl) + preferredState := state.NewMockChain(ctrl) preferredState.EXPECT().GetLastAccepted().Return(preferredID) preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) @@ -309,14 +309,14 @@ func TestBuilderBuildBlock(t *testing.T) { ) mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Peek(targetBlockSize).Return(tx1) + mempool.EXPECT().Peek().Return(tx1, true) mempool.EXPECT().Remove([]*txs.Tx{tx1}) // Second loop iteration - mempool.EXPECT().Peek(targetBlockSize - len(tx1Bytes)).Return(tx2) + mempool.EXPECT().Peek().Return(tx2, true) mempool.EXPECT().Remove([]*txs.Tx{tx2}) mempool.EXPECT().MarkDropped(tx2.ID(), blkexecutor.ErrConflictingBlockTxs) // Third loop iteration - mempool.EXPECT().Peek(targetBlockSize - len(tx1Bytes)).Return(nil) + mempool.EXPECT().Peek().Return(nil, false) mempool.EXPECT().RequestBuildBlock() // To marshal the tx/block @@ -353,7 +353,7 @@ func TestBuilderBuildBlock(t *testing.T) { clock := &mockable.Clock{} clock.Set(preferredTimestamp.Add(-2 * time.Second)) - preferredState := states.NewMockChain(ctrl) + preferredState := state.NewMockChain(ctrl) preferredState.EXPECT().GetLastAccepted().Return(preferredID) preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) @@ -385,10 +385,10 @@ func TestBuilderBuildBlock(t *testing.T) { tx := &txs.Tx{Unsigned: unsignedTx} mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Peek(gomock.Any()).Return(tx) + mempool.EXPECT().Peek().Return(tx, true) mempool.EXPECT().Remove([]*txs.Tx{tx}) // Second loop iteration - mempool.EXPECT().Peek(gomock.Any()).Return(nil) + mempool.EXPECT().Peek().Return(nil, false) mempool.EXPECT().RequestBuildBlock() // To marshal the tx/block @@ -427,7 +427,7 @@ func TestBuilderBuildBlock(t *testing.T) { clock := &mockable.Clock{} clock.Set(now) - preferredState := states.NewMockChain(ctrl) + preferredState := state.NewMockChain(ctrl) preferredState.EXPECT().GetLastAccepted().Return(preferredID) preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) @@ -459,10 +459,10 @@ func TestBuilderBuildBlock(t *testing.T) { tx := &txs.Tx{Unsigned: unsignedTx} mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Peek(gomock.Any()).Return(tx) + mempool.EXPECT().Peek().Return(tx, true) mempool.EXPECT().Remove([]*txs.Tx{tx}) // Second loop iteration - mempool.EXPECT().Peek(gomock.Any()).Return(nil) + mempool.EXPECT().Peek().Return(nil, false) mempool.EXPECT().RequestBuildBlock() // To marshal the tx/block @@ -510,11 +510,16 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { tx := transactions[0] txID := tx.ID() require.NoError(mempool.Add(tx)) - require.True(mempool.Has(txID)) - parser, err := block.NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - }) + _, ok := mempool.Get(txID) + require.True(ok) + + parser, err := block.NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) require.NoError(err) backend := &txexecutor.Backend{ @@ -526,7 +531,7 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { baseDB := versiondb.New(memdb.New()) - state, err := states.New(baseDB, parser, registerer, trackChecksums) + state, err := state.New(baseDB, parser, registerer, trackChecksums) require.NoError(err) clk := &mockable.Clock{} diff --git a/vms/avm/block/executor/block.go b/vms/avm/block/executor/block.go index 418ca0b539ca..8663f27ba123 100644 --- a/vms/avm/block/executor/block.go +++ b/vms/avm/block/executor/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -17,7 +17,7 @@ import ( "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/vms/avm/block" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs/executor" ) @@ -106,7 +106,7 @@ func (b *Block) Verify(context.Context) error { ) } - stateDiff, err := states.NewDiff(parentID, b.manager) + stateDiff, err := state.NewDiff(parentID, b.manager) if err != nil { return err } @@ -200,7 +200,7 @@ func (b *Block) Verify(context.Context) error { stateDiff.AddBlock(b.Block) b.manager.blkIDToState[blkID] = blockState - b.manager.mempool.Remove(txs) + b.manager.mempool.Remove(txs...) return nil } @@ -220,7 +220,7 @@ func (b *Block) Accept(context.Context) error { } b.manager.lastAccepted = blkID - b.manager.mempool.Remove(txs) + b.manager.mempool.Remove(txs...) blkState, ok := b.manager.blkIDToState[blkID] if !ok { @@ -290,6 +290,10 @@ func (b *Block) Reject(context.Context) error { } } + // If we added transactions to the mempool, we should be willing to build a + // block. + b.manager.mempool.RequestBuildBlock() + b.rejected = true return nil } diff --git a/vms/avm/block/executor/block_test.go b/vms/avm/block/executor/block_test.go index 9d7f291a8f60..0b6738822c6e 100644 --- a/vms/avm/block/executor/block_test.go +++ b/vms/avm/block/executor/block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -24,7 +24,7 @@ import ( "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/metrics" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/executor" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" @@ -153,7 +153,7 @@ func TestBlockVerify(t *testing.T) { parentID := ids.GenerateTestID() mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() - mockState := states.NewMockState(ctrl) + mockState := state.NewMockState(ctrl) mockState.EXPECT().GetBlock(parentID).Return(nil, errTest) return &Block{ Block: mockBlock, @@ -186,7 +186,7 @@ func TestBlockVerify(t *testing.T) { parentID := ids.GenerateTestID() mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() - mockState := states.NewMockState(ctrl) + mockState := state.NewMockState(ctrl) mockParentBlock := block.NewMockBlock(ctrl) mockParentBlock.EXPECT().Height().Return(blockHeight) // Should be blockHeight - 1 mockState.EXPECT().GetBlock(parentID).Return(mockParentBlock, nil) @@ -226,7 +226,7 @@ func TestBlockVerify(t *testing.T) { mockParentBlock := block.NewMockBlock(ctrl) mockParentBlock.EXPECT().Height().Return(blockHeight - 1) - mockParentState := states.NewMockDiff(ctrl) + mockParentState := state.NewMockDiff(ctrl) mockParentState.EXPECT().GetLastAccepted().Return(parentID) mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp.Add(1)) @@ -271,7 +271,7 @@ func TestBlockVerify(t *testing.T) { mockParentBlock := block.NewMockBlock(ctrl) mockParentBlock.EXPECT().Height().Return(blockHeight - 1) - mockParentState := states.NewMockDiff(ctrl) + mockParentState := state.NewMockDiff(ctrl) mockParentState.EXPECT().GetLastAccepted().Return(parentID) mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp) @@ -321,7 +321,7 @@ func TestBlockVerify(t *testing.T) { mockParentBlock := block.NewMockBlock(ctrl) mockParentBlock.EXPECT().Height().Return(blockHeight - 1) - mockParentState := states.NewMockDiff(ctrl) + mockParentState := state.NewMockDiff(ctrl) mockParentState.EXPECT().GetLastAccepted().Return(parentID) mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp) @@ -399,7 +399,7 @@ func TestBlockVerify(t *testing.T) { mockParentBlock := block.NewMockBlock(ctrl) mockParentBlock.EXPECT().Height().Return(blockHeight - 1) - mockParentState := states.NewMockDiff(ctrl) + mockParentState := state.NewMockDiff(ctrl) mockParentState.EXPECT().GetLastAccepted().Return(parentID) mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp) @@ -461,7 +461,7 @@ func TestBlockVerify(t *testing.T) { mockParentBlock := block.NewMockBlock(ctrl) mockParentBlock.EXPECT().Height().Return(blockHeight - 1) - mockParentState := states.NewMockDiff(ctrl) + mockParentState := state.NewMockDiff(ctrl) mockParentState.EXPECT().GetLastAccepted().Return(parentID) mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp) @@ -509,7 +509,7 @@ func TestBlockVerify(t *testing.T) { mockParentBlock := block.NewMockBlock(ctrl) mockParentBlock.EXPECT().Height().Return(blockHeight - 1) - mockParentState := states.NewMockDiff(ctrl) + mockParentState := state.NewMockDiff(ctrl) mockParentState.EXPECT().GetLastAccepted().Return(parentID) mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp) @@ -616,11 +616,11 @@ func TestBlockAccept(t *testing.T) { mempool := mempool.NewMockMempool(ctrl) mempool.EXPECT().Remove(gomock.Any()).AnyTimes() - mockManagerState := states.NewMockState(ctrl) + mockManagerState := state.NewMockState(ctrl) mockManagerState.EXPECT().CommitBatch().Return(nil, errTest) mockManagerState.EXPECT().Abort() - mockOnAcceptState := states.NewMockDiff(ctrl) + mockOnAcceptState := state.NewMockDiff(ctrl) mockOnAcceptState.EXPECT().Apply(mockManagerState) return &Block{ @@ -654,7 +654,7 @@ func TestBlockAccept(t *testing.T) { mempool := mempool.NewMockMempool(ctrl) mempool.EXPECT().Remove(gomock.Any()).AnyTimes() - mockManagerState := states.NewMockState(ctrl) + mockManagerState := state.NewMockState(ctrl) // Note the returned batch is nil but not used // because we mock the call to shared memory mockManagerState.EXPECT().CommitBatch().Return(nil, nil) @@ -663,7 +663,7 @@ func TestBlockAccept(t *testing.T) { mockSharedMemory := atomic.NewMockSharedMemory(ctrl) mockSharedMemory.EXPECT().Apply(gomock.Any(), gomock.Any()).Return(errTest) - mockOnAcceptState := states.NewMockDiff(ctrl) + mockOnAcceptState := state.NewMockDiff(ctrl) mockOnAcceptState.EXPECT().Apply(mockManagerState) return &Block{ @@ -698,7 +698,7 @@ func TestBlockAccept(t *testing.T) { mempool := mempool.NewMockMempool(ctrl) mempool.EXPECT().Remove(gomock.Any()).AnyTimes() - mockManagerState := states.NewMockState(ctrl) + mockManagerState := state.NewMockState(ctrl) // Note the returned batch is nil but not used // because we mock the call to shared memory mockManagerState.EXPECT().CommitBatch().Return(nil, nil) @@ -707,7 +707,7 @@ func TestBlockAccept(t *testing.T) { mockSharedMemory := atomic.NewMockSharedMemory(ctrl) mockSharedMemory.EXPECT().Apply(gomock.Any(), gomock.Any()).Return(nil) - mockOnAcceptState := states.NewMockDiff(ctrl) + mockOnAcceptState := state.NewMockDiff(ctrl) mockOnAcceptState.EXPECT().Apply(mockManagerState) metrics := metrics.NewMockMetrics(ctrl) @@ -748,7 +748,7 @@ func TestBlockAccept(t *testing.T) { mempool := mempool.NewMockMempool(ctrl) mempool.EXPECT().Remove(gomock.Any()).AnyTimes() - mockManagerState := states.NewMockState(ctrl) + mockManagerState := state.NewMockState(ctrl) // Note the returned batch is nil but not used // because we mock the call to shared memory mockManagerState.EXPECT().CommitBatch().Return(nil, nil) @@ -758,7 +758,7 @@ func TestBlockAccept(t *testing.T) { mockSharedMemory := atomic.NewMockSharedMemory(ctrl) mockSharedMemory.EXPECT().Apply(gomock.Any(), gomock.Any()).Return(nil) - mockOnAcceptState := states.NewMockDiff(ctrl) + mockOnAcceptState := state.NewMockDiff(ctrl) mockOnAcceptState.EXPECT().Apply(mockManagerState) metrics := metrics.NewMockMetrics(ctrl) @@ -857,28 +857,27 @@ func TestBlockReject(t *testing.T) { mempool := mempool.NewMockMempool(ctrl) mempool.EXPECT().Add(validTx).Return(nil) // Only add the one that passes verification + mempool.EXPECT().RequestBuildBlock() - preferredID := ids.GenerateTestID() - mockPreferredState := states.NewMockDiff(ctrl) - mockPreferredState.EXPECT().GetLastAccepted().Return(ids.GenerateTestID()).AnyTimes() - mockPreferredState.EXPECT().GetTimestamp().Return(time.Now()).AnyTimes() + lastAcceptedID := ids.GenerateTestID() + mockState := state.NewMockState(ctrl) + mockState.EXPECT().GetLastAccepted().Return(lastAcceptedID).AnyTimes() + mockState.EXPECT().GetTimestamp().Return(time.Now()).AnyTimes() return &Block{ Block: mockBlock, manager: &manager{ - preferred: preferredID, - mempool: mempool, - metrics: metrics.NewMockMetrics(ctrl), + lastAccepted: lastAcceptedID, + mempool: mempool, + metrics: metrics.NewMockMetrics(ctrl), backend: &executor.Backend{ Bootstrapped: true, Ctx: &snow.Context{ Log: logging.NoLog{}, }, }, + state: mockState, blkIDToState: map[ids.ID]*blockState{ - preferredID: { - onAcceptState: mockPreferredState, - }, blockID: {}, }, }, @@ -916,28 +915,27 @@ func TestBlockReject(t *testing.T) { mempool := mempool.NewMockMempool(ctrl) mempool.EXPECT().Add(tx1).Return(nil) mempool.EXPECT().Add(tx2).Return(nil) + mempool.EXPECT().RequestBuildBlock() - preferredID := ids.GenerateTestID() - mockPreferredState := states.NewMockDiff(ctrl) - mockPreferredState.EXPECT().GetLastAccepted().Return(ids.GenerateTestID()).AnyTimes() - mockPreferredState.EXPECT().GetTimestamp().Return(time.Now()).AnyTimes() + lastAcceptedID := ids.GenerateTestID() + mockState := state.NewMockState(ctrl) + mockState.EXPECT().GetLastAccepted().Return(lastAcceptedID).AnyTimes() + mockState.EXPECT().GetTimestamp().Return(time.Now()).AnyTimes() return &Block{ Block: mockBlock, manager: &manager{ - preferred: preferredID, - mempool: mempool, - metrics: metrics.NewMockMetrics(ctrl), + lastAccepted: lastAcceptedID, + mempool: mempool, + metrics: metrics.NewMockMetrics(ctrl), backend: &executor.Backend{ Bootstrapped: true, Ctx: &snow.Context{ Log: logging.NoLog{}, }, }, + state: mockState, blkIDToState: map[ids.ID]*blockState{ - preferredID: { - onAcceptState: mockPreferredState, - }, blockID: {}, }, }, @@ -1014,7 +1012,7 @@ func TestBlockStatus(t *testing.T) { mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(blockID).AnyTimes() - mockState := states.NewMockState(ctrl) + mockState := state.NewMockState(ctrl) mockState.EXPECT().GetBlock(blockID).Return(nil, nil) return &Block{ @@ -1034,7 +1032,7 @@ func TestBlockStatus(t *testing.T) { mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(blockID).AnyTimes() - mockState := states.NewMockState(ctrl) + mockState := state.NewMockState(ctrl) mockState.EXPECT().GetBlock(blockID).Return(nil, database.ErrNotFound) return &Block{ diff --git a/vms/avm/block/executor/manager.go b/vms/avm/block/executor/manager.go index aa99ede6392f..9822743b7fd3 100644 --- a/vms/avm/block/executor/manager.go +++ b/vms/avm/block/executor/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/metrics" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/executor" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" @@ -27,7 +27,7 @@ var ( ) type Manager interface { - states.Versions + state.Versions // Returns the ID of the most recently accepted block. LastAccepted() ids.ID @@ -39,19 +39,19 @@ type Manager interface { GetStatelessBlock(blkID ids.ID) (block.Block, error) NewBlock(block.Block) snowman.Block - // VerifyTx verifies that the transaction can be issued based on the - // currently preferred state. + // VerifyTx verifies that the transaction can be issued based on the currently + // preferred state. This should *not* be used to verify transactions in a block. VerifyTx(tx *txs.Tx) error - // VerifyUniqueInputs verifies that the inputs are not duplicated in the - // provided blk or any of its ancestors pinned in memory. + // VerifyUniqueInputs returns nil iff no blocks in the inclusive + // ancestry of [blkID] consume an input in [inputs]. VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error } func NewManager( mempool mempool.Mempool, metrics metrics.Metrics, - state states.State, + state state.State, backend *executor.Backend, clk *mockable.Clock, onAccept func(*txs.Tx) error, @@ -72,7 +72,7 @@ func NewManager( type manager struct { backend *executor.Backend - state states.State + state state.State metrics metrics.Metrics mempool mempool.Mempool clk *mockable.Clock @@ -93,12 +93,12 @@ type manager struct { type blockState struct { statelessBlock block.Block - onAcceptState states.Diff + onAcceptState state.Diff importedInputs set.Set[ids.ID] atomicRequests map[ids.ID]*atomic.Requests } -func (m *manager) GetState(blkID ids.ID) (states.Chain, bool) { +func (m *manager) GetState(blkID ids.ID) (state.Chain, bool) { // If the block is in the map, it is processing. if state, ok := m.blkIDToState[blkID]; ok { return state.onAcceptState, true @@ -155,7 +155,7 @@ func (m *manager) VerifyTx(tx *txs.Tx) error { return err } - stateDiff, err := states.NewDiff(m.preferred, m) + stateDiff, err := state.NewDiff(m.lastAccepted, m) if err != nil { return err } @@ -174,12 +174,7 @@ func (m *manager) VerifyTx(tx *txs.Tx) error { State: stateDiff, Tx: tx, } - err = tx.Unsigned.Visit(executor) - if err != nil { - return err - } - - return m.VerifyUniqueInputs(m.preferred, executor.Inputs) + return tx.Unsigned.Visit(executor) } func (m *manager) VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error { diff --git a/vms/avm/block/executor/manager_test.go b/vms/avm/block/executor/manager_test.go index c21201417add..012428d582e4 100644 --- a/vms/avm/block/executor/manager_test.go +++ b/vms/avm/block/executor/manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -15,7 +15,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm/block" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/executor" ) @@ -31,7 +31,7 @@ func TestManagerGetStatelessBlock(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - state := states.NewMockState(ctrl) + state := state.NewMockState(ctrl) m := &manager{ state: state, blkIDToState: map[ids.ID]*blockState{}, @@ -73,16 +73,16 @@ func TestManagerGetState(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - state := states.NewMockState(ctrl) + s := state.NewMockState(ctrl) m := &manager{ - state: state, + state: s, blkIDToState: map[ids.ID]*blockState{}, lastAccepted: ids.GenerateTestID(), } // Case: Block is in memory { - diff := states.NewMockDiff(ctrl) + diff := state.NewMockDiff(ctrl) blkID := ids.GenerateTestID() m.blkIDToState[blkID] = &blockState{ onAcceptState: diff, @@ -97,14 +97,14 @@ func TestManagerGetState(t *testing.T) { blkID := ids.GenerateTestID() gotState, ok := m.GetState(blkID) require.False(ok) - require.Equal(state, gotState) + require.Equal(s, gotState) } // Case: Block isn't in memory; block is last accepted { gotState, ok := m.GetState(m.lastAccepted) require.True(ok) - require.Equal(state, gotState) + require.Equal(s, gotState) } } @@ -116,7 +116,6 @@ func TestManagerVerifyTx(t *testing.T) { expectedErr error } - inputID := ids.GenerateTestID() tests := []test{ { name: "not bootstrapped", @@ -161,11 +160,11 @@ func TestManagerVerifyTx(t *testing.T) { } }, managerF: func(ctrl *gomock.Controller) *manager { - preferred := ids.GenerateTestID() + lastAcceptedID := ids.GenerateTestID() // These values don't matter for this test - state := states.NewMockState(ctrl) - state.EXPECT().GetLastAccepted().Return(preferred) + state := state.NewMockState(ctrl) + state.EXPECT().GetLastAccepted().Return(lastAcceptedID) state.EXPECT().GetTimestamp().Return(time.Time{}) return &manager{ @@ -173,8 +172,7 @@ func TestManagerVerifyTx(t *testing.T) { Bootstrapped: true, }, state: state, - lastAccepted: preferred, - preferred: preferred, + lastAccepted: lastAcceptedID, } }, expectedErr: errTestSemanticVerifyFail, @@ -194,11 +192,11 @@ func TestManagerVerifyTx(t *testing.T) { } }, managerF: func(ctrl *gomock.Controller) *manager { - preferred := ids.GenerateTestID() + lastAcceptedID := ids.GenerateTestID() // These values don't matter for this test - state := states.NewMockState(ctrl) - state.EXPECT().GetLastAccepted().Return(preferred) + state := state.NewMockState(ctrl) + state.EXPECT().GetLastAccepted().Return(lastAcceptedID) state.EXPECT().GetTimestamp().Return(time.Time{}) return &manager{ @@ -206,57 +204,10 @@ func TestManagerVerifyTx(t *testing.T) { Bootstrapped: true, }, state: state, - lastAccepted: preferred, - preferred: preferred, - } - }, - expectedErr: errTestExecutionFail, - }, - { - name: "non-unique inputs", - txF: func(ctrl *gomock.Controller) *txs.Tx { - unsigned := txs.NewMockUnsignedTx(ctrl) - // Syntactic verification passes - unsigned.EXPECT().Visit(gomock.Any()).Return(nil) - // Semantic verification passes - unsigned.EXPECT().Visit(gomock.Any()).Return(nil) - // Execution passes - unsigned.EXPECT().Visit(gomock.Any()).DoAndReturn(func(e *executor.Executor) error { - e.Inputs.Add(inputID) - return nil - }) - return &txs.Tx{ - Unsigned: unsigned, - } - }, - managerF: func(ctrl *gomock.Controller) *manager { - lastAcceptedID := ids.GenerateTestID() - - preferredID := ids.GenerateTestID() - preferred := block.NewMockBlock(ctrl) - preferred.EXPECT().Parent().Return(lastAcceptedID).AnyTimes() - - // These values don't matter for this test - diffState := states.NewMockDiff(ctrl) - diffState.EXPECT().GetLastAccepted().Return(preferredID) - diffState.EXPECT().GetTimestamp().Return(time.Time{}) - - return &manager{ - backend: &executor.Backend{ - Bootstrapped: true, - }, - blkIDToState: map[ids.ID]*blockState{ - preferredID: { - statelessBlock: preferred, - onAcceptState: diffState, - importedInputs: set.Of(inputID), - }, - }, lastAccepted: lastAcceptedID, - preferred: preferredID, } }, - expectedErr: ErrConflictingParentTxs, + expectedErr: errTestExecutionFail, }, { name: "happy path", @@ -273,11 +224,11 @@ func TestManagerVerifyTx(t *testing.T) { } }, managerF: func(ctrl *gomock.Controller) *manager { - preferred := ids.GenerateTestID() + lastAcceptedID := ids.GenerateTestID() // These values don't matter for this test - state := states.NewMockState(ctrl) - state.EXPECT().GetLastAccepted().Return(preferred) + state := state.NewMockState(ctrl) + state.EXPECT().GetLastAccepted().Return(lastAcceptedID) state.EXPECT().GetTimestamp().Return(time.Time{}) return &manager{ @@ -285,8 +236,7 @@ func TestManagerVerifyTx(t *testing.T) { Bootstrapped: true, }, state: state, - lastAccepted: preferred, - preferred: preferred, + lastAccepted: lastAcceptedID, } }, expectedErr: nil, diff --git a/vms/avm/block/executor/mock_manager.go b/vms/avm/block/executor/mock_manager.go index b3560f2e8afa..a882ec519fba 100644 --- a/vms/avm/block/executor/mock_manager.go +++ b/vms/avm/block/executor/mock_manager.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/avm/block/executor (interfaces: Manager) +// Source: vms/avm/block/executor/manager.go +// +// Generated by this command: +// +// mockgen -source=vms/avm/block/executor/manager.go -destination=vms/avm/block/executor/mock_manager.go -package=executor -exclude_interfaces= +// // Package executor is a generated GoMock package. package executor @@ -14,7 +16,7 @@ import ( snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" set "github.com/ava-labs/avalanchego/utils/set" block "github.com/ava-labs/avalanchego/vms/avm/block" - states "github.com/ava-labs/avalanchego/vms/avm/states" + state "github.com/ava-labs/avalanchego/vms/avm/state" txs "github.com/ava-labs/avalanchego/vms/avm/txs" gomock "go.uber.org/mock/gomock" ) @@ -43,48 +45,48 @@ func (m *MockManager) EXPECT() *MockManagerMockRecorder { } // GetBlock mocks base method. -func (m *MockManager) GetBlock(arg0 ids.ID) (snowman.Block, error) { +func (m *MockManager) GetBlock(blkID ids.ID) (snowman.Block, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBlock", arg0) + ret := m.ctrl.Call(m, "GetBlock", blkID) ret0, _ := ret[0].(snowman.Block) ret1, _ := ret[1].(error) return ret0, ret1 } // GetBlock indicates an expected call of GetBlock. -func (mr *MockManagerMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetBlock(blkID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockManager)(nil).GetBlock), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockManager)(nil).GetBlock), blkID) } // GetState mocks base method. -func (m *MockManager) GetState(arg0 ids.ID) (states.Chain, bool) { +func (m *MockManager) GetState(blkID ids.ID) (state.Chain, bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetState", arg0) - ret0, _ := ret[0].(states.Chain) + ret := m.ctrl.Call(m, "GetState", blkID) + ret0, _ := ret[0].(state.Chain) ret1, _ := ret[1].(bool) return ret0, ret1 } // GetState indicates an expected call of GetState. -func (mr *MockManagerMockRecorder) GetState(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetState(blkID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockManager)(nil).GetState), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockManager)(nil).GetState), blkID) } // GetStatelessBlock mocks base method. -func (m *MockManager) GetStatelessBlock(arg0 ids.ID) (block.Block, error) { +func (m *MockManager) GetStatelessBlock(blkID ids.ID) (block.Block, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStatelessBlock", arg0) + ret := m.ctrl.Call(m, "GetStatelessBlock", blkID) ret0, _ := ret[0].(block.Block) ret1, _ := ret[1].(error) return ret0, ret1 } // GetStatelessBlock indicates an expected call of GetStatelessBlock. -func (mr *MockManagerMockRecorder) GetStatelessBlock(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetStatelessBlock(blkID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockManager)(nil).GetStatelessBlock), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockManager)(nil).GetStatelessBlock), blkID) } // LastAccepted mocks base method. @@ -110,7 +112,7 @@ func (m *MockManager) NewBlock(arg0 block.Block) snowman.Block { } // NewBlock indicates an expected call of NewBlock. -func (mr *MockManagerMockRecorder) NewBlock(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) NewBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBlock", reflect.TypeOf((*MockManager)(nil).NewBlock), arg0) } @@ -130,41 +132,41 @@ func (mr *MockManagerMockRecorder) Preferred() *gomock.Call { } // SetPreference mocks base method. -func (m *MockManager) SetPreference(arg0 ids.ID) { +func (m *MockManager) SetPreference(blkID ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetPreference", arg0) + m.ctrl.Call(m, "SetPreference", blkID) } // SetPreference indicates an expected call of SetPreference. -func (mr *MockManagerMockRecorder) SetPreference(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) SetPreference(blkID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreference", reflect.TypeOf((*MockManager)(nil).SetPreference), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreference", reflect.TypeOf((*MockManager)(nil).SetPreference), blkID) } // VerifyTx mocks base method. -func (m *MockManager) VerifyTx(arg0 *txs.Tx) error { +func (m *MockManager) VerifyTx(tx *txs.Tx) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyTx", arg0) + ret := m.ctrl.Call(m, "VerifyTx", tx) ret0, _ := ret[0].(error) return ret0 } // VerifyTx indicates an expected call of VerifyTx. -func (mr *MockManagerMockRecorder) VerifyTx(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) VerifyTx(tx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyTx", reflect.TypeOf((*MockManager)(nil).VerifyTx), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyTx", reflect.TypeOf((*MockManager)(nil).VerifyTx), tx) } // VerifyUniqueInputs mocks base method. -func (m *MockManager) VerifyUniqueInputs(arg0 ids.ID, arg1 set.Set[ids.ID]) error { +func (m *MockManager) VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyUniqueInputs", arg0, arg1) + ret := m.ctrl.Call(m, "VerifyUniqueInputs", blkID, inputs) ret0, _ := ret[0].(error) return ret0 } // VerifyUniqueInputs indicates an expected call of VerifyUniqueInputs. -func (mr *MockManagerMockRecorder) VerifyUniqueInputs(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) VerifyUniqueInputs(blkID, inputs any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyUniqueInputs", reflect.TypeOf((*MockManager)(nil).VerifyUniqueInputs), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyUniqueInputs", reflect.TypeOf((*MockManager)(nil).VerifyUniqueInputs), blkID, inputs) } diff --git a/vms/avm/block/mock_block.go b/vms/avm/block/mock_block.go index 770dbae29b7d..bc332e88590c 100644 --- a/vms/avm/block/mock_block.go +++ b/vms/avm/block/mock_block.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/avm/block (interfaces: Block) +// +// Generated by this command: +// +// mockgen -package=block -destination=vms/avm/block/mock_block.go github.com/ava-labs/avalanchego/vms/avm/block Block +// // Package block is a generated GoMock package. package block @@ -90,7 +92,7 @@ func (m *MockBlock) InitCtx(arg0 *snow.Context) { } // InitCtx indicates an expected call of InitCtx. -func (mr *MockBlockMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) InitCtx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockBlock)(nil).InitCtx), arg0) } @@ -160,7 +162,7 @@ func (m *MockBlock) initialize(arg0 []byte, arg1 codec.Manager) error { } // initialize indicates an expected call of initialize. -func (mr *MockBlockMockRecorder) initialize(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) initialize(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "initialize", reflect.TypeOf((*MockBlock)(nil).initialize), arg0, arg1) } diff --git a/vms/avm/block/parser.go b/vms/avm/block/parser.go index 230568149b6d..f0c359a513b0 100644 --- a/vms/avm/block/parser.go +++ b/vms/avm/block/parser.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( - "fmt" "reflect" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/utils" @@ -25,17 +25,14 @@ type Parser interface { ParseBlock(bytes []byte) (Block, error) ParseGenesisBlock(bytes []byte) (Block, error) - - InitializeBlock(block Block) error - InitializeGenesisBlock(block Block) error } type parser struct { txs.Parser } -func NewParser(fxs []fxs.Fx) (Parser, error) { - p, err := txs.NewParser(fxs) +func NewParser(durangoTime time.Time, fxs []fxs.Fx) (Parser, error) { + p, err := txs.NewParser(durangoTime, fxs) if err != nil { return nil, err } @@ -52,12 +49,13 @@ func NewParser(fxs []fxs.Fx) (Parser, error) { } func NewCustomParser( + durangoTime time.Time, typeToFxIndex map[reflect.Type]int, clock *mockable.Clock, log logging.Logger, fxs []fxs.Fx, ) (Parser, error) { - p, err := txs.NewCustomParser(typeToFxIndex, clock, log, fxs) + p, err := txs.NewCustomParser(durangoTime, typeToFxIndex, clock, log, fxs) if err != nil { return nil, err } @@ -88,21 +86,3 @@ func parse(cm codec.Manager, bytes []byte) (Block, error) { } return blk, blk.initialize(bytes, cm) } - -func (p *parser) InitializeBlock(block Block) error { - return initialize(block, p.Codec()) -} - -func (p *parser) InitializeGenesisBlock(block Block) error { - return initialize(block, p.GenesisCodec()) -} - -func initialize(blk Block, cm codec.Manager) error { - // We serialize this block as a pointer so that it can be deserialized into - // a Block - bytes, err := cm.Marshal(CodecVersion, &blk) - if err != nil { - return fmt.Errorf("couldn't marshal block: %w", err) - } - return blk.initialize(bytes, cm) -} diff --git a/vms/avm/block/standard_block.go b/vms/avm/block/standard_block.go index be6f1c7456cd..614c7bdc332c 100644 --- a/vms/avm/block/standard_block.go +++ b/vms/avm/block/standard_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -88,5 +88,16 @@ func NewStandardBlock( Time: uint64(timestamp.Unix()), Transactions: txs, } - return blk, initialize(blk, cm) + + // We serialize this block as a pointer so that it can be deserialized into + // a Block + var blkIntf Block = blk + bytes, err := cm.Marshal(CodecVersion, &blkIntf) + if err != nil { + return nil, fmt.Errorf("couldn't marshal block: %w", err) + } + + blk.BlockID = hashing.ComputeHash256Array(bytes) + blk.bytes = bytes + return blk, nil } diff --git a/vms/avm/camino_service_test.go b/vms/avm/camino_service_test.go deleted file mode 100644 index 9eeb88f79c42..000000000000 --- a/vms/avm/camino_service_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (C) 2022-2024, Chain4Travel AG. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "context" - "net/http" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestGetAssetDescriptionC4T(t *testing.T) { - env := setup(t, &envConfig{}) - env.vm.ctx.Lock.Unlock() - defer stopEnv(t, env) - - type args struct { - in0 *http.Request - args *GetAssetDescriptionArgs - reply *GetAssetDescriptionReply - } - tests := []struct { - name string - args args - expectedErr error - want []string - }{ - { - name: "With given assetId", - args: args{ - in0: nil, - reply: &GetAssetDescriptionReply{}, - args: &GetAssetDescriptionArgs{ - AssetID: env.vm.ctx.AVAXAssetID.String(), - }, - }, - want: []string{"AVAX", "SYMB", env.vm.ctx.AVAXAssetID.String()}, - }, - { - name: "Without assetId", - args: args{ - in0: nil, - reply: &GetAssetDescriptionReply{}, - args: &GetAssetDescriptionArgs{ - AssetID: env.vm.ctx.AVAXAssetID.String(), - }, - }, - want: []string{"AVAX", "SYMB", env.vm.feeAssetID.String()}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := env.service.GetAssetDescription(tt.args.in0, tt.args.args, tt.args.reply) - require.ErrorIs(t, err, tt.expectedErr) - require.Equal(t, tt.want[0], tt.args.reply.Name, "Wrong name returned from GetAssetDescription %s", tt.args.reply.Name) - require.Equal(t, tt.want[1], tt.args.reply.Symbol, "Wrong symbol returned from GetAssetDescription %s", tt.args.reply.Symbol) - require.Equal(t, tt.want[2], tt.args.reply.AssetID.String()) - }) - } -} - -func stopEnv(t *testing.T, env *environment) { - env.vm.ctx.Lock.Lock() - require.NoError(t, env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() -} diff --git a/vms/avm/client.go b/vms/avm/client.go index 8f9ea084c237..63df6543446e 100644 --- a/vms/avm/client.go +++ b/vms/avm/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm diff --git a/vms/avm/client_test.go b/vms/avm/client_test.go index e8013b15d115..28a2d874128a 100644 --- a/vms/avm/client_test.go +++ b/vms/avm/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm diff --git a/vms/avm/config.go b/vms/avm/config.go new file mode 100644 index 000000000000..f7661bbefd18 --- /dev/null +++ b/vms/avm/config.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "encoding/json" + + "github.com/ava-labs/avalanchego/vms/avm/network" +) + +var DefaultConfig = Config{ + Network: network.DefaultConfig, + IndexTransactions: false, + IndexAllowIncomplete: false, + ChecksumsEnabled: false, +} + +type Config struct { + Network network.Config `json:"network"` + IndexTransactions bool `json:"index-transactions"` + IndexAllowIncomplete bool `json:"index-allow-incomplete"` + ChecksumsEnabled bool `json:"checksums-enabled"` +} + +func ParseConfig(configBytes []byte) (Config, error) { + if len(configBytes) == 0 { + return DefaultConfig, nil + } + + config := DefaultConfig + err := json.Unmarshal(configBytes, &config) + return config, err +} diff --git a/vms/avm/config/config.go b/vms/avm/config/config.go index 045b4474ca67..df6e4f7de2ae 100644 --- a/vms/avm/config/config.go +++ b/vms/avm/config/config.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config +import "time" + // Struct collecting all the foundational parameters of the AVM type Config struct { // Fee that is burned by every non-asset creating transaction @@ -10,4 +12,7 @@ type Config struct { // Fee that must be burned by every asset creating transaction CreateAssetTxFee uint64 + + // Time of the Durango network upgrade + DurangoTime time.Time } diff --git a/vms/avm/config_test.go b/vms/avm/config_test.go new file mode 100644 index 000000000000..27481d78b901 --- /dev/null +++ b/vms/avm/config_test.go @@ -0,0 +1,67 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/vms/avm/network" +) + +func TestParseConfig(t *testing.T) { + tests := []struct { + name string + configBytes []byte + expectedConfig Config + }{ + { + name: "unspecified config", + configBytes: nil, + expectedConfig: DefaultConfig, + }, + { + name: "manually specified checksums enabled", + configBytes: []byte(`{"checksums-enabled":true}`), + expectedConfig: Config{ + Network: network.DefaultConfig, + IndexTransactions: DefaultConfig.IndexTransactions, + IndexAllowIncomplete: DefaultConfig.IndexAllowIncomplete, + ChecksumsEnabled: true, + }, + }, + { + name: "manually specified checksums enabled", + configBytes: []byte(`{"network":{"max-validator-set-staleness":1}}`), + expectedConfig: Config{ + Network: network.Config{ + MaxValidatorSetStaleness: time.Nanosecond, + TargetGossipSize: network.DefaultConfig.TargetGossipSize, + PullGossipPollSize: network.DefaultConfig.PullGossipPollSize, + PullGossipFrequency: network.DefaultConfig.PullGossipFrequency, + PullGossipThrottlingPeriod: network.DefaultConfig.PullGossipThrottlingPeriod, + PullGossipThrottlingLimit: network.DefaultConfig.PullGossipThrottlingLimit, + ExpectedBloomFilterElements: network.DefaultConfig.ExpectedBloomFilterElements, + ExpectedBloomFilterFalsePositiveProbability: network.DefaultConfig.ExpectedBloomFilterFalsePositiveProbability, + MaxBloomFilterFalsePositiveProbability: network.DefaultConfig.MaxBloomFilterFalsePositiveProbability, + LegacyPushGossipCacheSize: network.DefaultConfig.LegacyPushGossipCacheSize, + }, + IndexTransactions: DefaultConfig.IndexTransactions, + IndexAllowIncomplete: DefaultConfig.IndexAllowIncomplete, + ChecksumsEnabled: DefaultConfig.ChecksumsEnabled, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + config, err := ParseConfig(test.configBytes) + require.NoError(err) + require.Equal(test.expectedConfig, config) + }) + } +} diff --git a/vms/avm/environment_test.go b/vms/avm/environment_test.go index e1e9e29f630e..236c20875796 100644 --- a/vms/avm/environment_test.go +++ b/vms/avm/environment_test.go @@ -1,13 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( "context" - "errors" "math/rand" "testing" + "time" stdjson "encoding/json" @@ -20,8 +20,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/cb58" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" @@ -67,25 +66,16 @@ var ( }, } - chainID = ids.ID{5, 4, 3, 2, 1} assetID = ids.ID{1, 2, 3} - keys []*secp256k1.PrivateKey - addrs []ids.ShortID // addrs[i] corresponds to keys[i] - - errMissing = errors.New("missing") + keys = secp256k1.TestKeys()[:3] // TODO: Remove [:3] + addrs []ids.ShortID // addrs[i] corresponds to keys[i] ) func init() { - for _, key := range []string{ - "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", - "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", - "cxb7KpGWhDMALTjNNSJ7UQkkomPesyWAPUaWRGdyeBNzR6f35", - } { - keyBytes, _ := cb58.Decode(key) - pk, _ := secp256k1.ToPrivateKey(keyBytes) - keys = append(keys, pk) - addrs = append(addrs, pk.PublicKey().Address()) + addrs = make([]ids.ShortID, len(keys)) + for i, key := range keys { + addrs[i] = key.Address() } } @@ -131,7 +121,8 @@ func setup(tb testing.TB, c *envConfig) *environment { } genesisBytes := buildGenesisTestWithArgs(tb, genesisArgs) - ctx := newContext(tb) + + ctx := snowtest.Context(tb, snowtest.XChainID) baseDB := memdb.New() m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDB)) @@ -167,9 +158,8 @@ func setup(tb testing.TB, c *envConfig) *environment { Config: vmStaticConfig, } - vmDynamicConfig := Config{ - IndexTransactions: true, - } + vmDynamicConfig := DefaultConfig + vmDynamicConfig.IndexTransactions = true if c.vmDynamicConfig != nil { vmDynamicConfig = *c.vmDynamicConfig } @@ -232,40 +222,6 @@ func setup(tb testing.TB, c *envConfig) *environment { return env } -func newContext(tb testing.TB) *snow.Context { - require := require.New(tb) - - genesisBytes := buildGenesisTest(tb) - tx := getCreateTxFromGenesisTest(tb, genesisBytes, "AVAX") - - ctx := snow.DefaultContextTest() - ctx.NetworkID = constants.UnitTestID - ctx.ChainID = chainID - ctx.AVAXAssetID = tx.ID() - ctx.XChainID = ids.Empty.Prefix(0) - ctx.CChainID = ids.Empty.Prefix(1) - aliaser := ctx.BCLookup.(ids.Aliaser) - - require.NoError(aliaser.Alias(chainID, "X")) - require.NoError(aliaser.Alias(chainID, chainID.String())) - require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) - require.NoError(aliaser.Alias(constants.PlatformChainID, constants.PlatformChainID.String())) - - ctx.ValidatorState = &validators.TestState{ - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - constants.PlatformChainID: ctx.SubnetID, - chainID: ctx.SubnetID, - }[chainID] - if !ok { - return ids.Empty, errMissing - } - return subnetID, nil - }, - } - return ctx -} - // Returns: // // 1. tx in genesis that creates asset @@ -273,9 +229,12 @@ func newContext(tb testing.TB) *snow.Context { func getCreateTxFromGenesisTest(tb testing.TB, genesisBytes []byte, assetName string) *txs.Tx { require := require.New(tb) - parser, err := txs.NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - }) + parser, err := txs.NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) require.NoError(err) cm := parser.GenesisCodec() @@ -296,7 +255,7 @@ func getCreateTxFromGenesisTest(tb testing.TB, genesisBytes []byte, assetName st tx := &txs.Tx{ Unsigned: &assetTx.CreateAssetTx, } - require.NoError(parser.InitializeGenesisTx(tx)) + require.NoError(tx.Initialize(parser.GenesisCodec())) return tx } @@ -320,7 +279,7 @@ func buildGenesisTestWithArgs(tb testing.TB, args *BuildGenesisArgs) []byte { return b } -func newTx(tb testing.TB, genesisBytes []byte, vm *VM, assetName string) *txs.Tx { +func newTx(tb testing.TB, genesisBytes []byte, chainID ids.ID, parser txs.Parser, assetName string) *txs.Tx { require := require.New(tb) createTx := getCreateTxFromGenesisTest(tb, genesisBytes, assetName) @@ -345,14 +304,14 @@ func newTx(tb testing.TB, genesisBytes []byte, vm *VM, assetName string) *txs.Tx }}, }, }} - require.NoError(tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) + require.NoError(tx.SignSECP256K1Fx(parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) return tx } // Sample from a set of addresses and return them raw and formatted as strings. // The size of the sample is between 1 and len(addrs) // If len(addrs) == 0, returns nil -func sampleAddrs(tb testing.TB, vm *VM, addrs []ids.ShortID) ([]ids.ShortID, []string) { +func sampleAddrs(tb testing.TB, addressFormatter avax.AddressManager, addrs []ids.ShortID) ([]ids.ShortID, []string) { require := require.New(tb) sampledAddrs := []ids.ShortID{} @@ -366,7 +325,7 @@ func sampleAddrs(tb testing.TB, vm *VM, addrs []ids.ShortID) ([]ids.ShortID, []s require.NoError(err) for _, index := range indices { addr := addrs[index] - addrStr, err := vm.FormatLocalAddress(addr) + addrStr, err := addressFormatter.FormatLocalAddress(addr) require.NoError(err) sampledAddrs = append(sampledAddrs, addr) @@ -524,30 +483,31 @@ func makeCustomAssetGenesis(tb testing.TB) *BuildGenesisArgs { } } -// issueAndAccept expects the context lock to be held +// issueAndAccept expects the context lock not to be held func issueAndAccept( require *require.Assertions, vm *VM, issuer <-chan common.Message, tx *txs.Tx, ) { - txID, err := vm.IssueTx(tx.Bytes()) + txID, err := vm.issueTx(tx) require.NoError(err) require.Equal(tx.ID(), txID) buildAndAccept(require, vm, issuer, txID) } -// buildAndAccept expects the context lock to be held +// buildAndAccept expects the context lock not to be held func buildAndAccept( require *require.Assertions, vm *VM, issuer <-chan common.Message, txID ids.ID, ) { - vm.ctx.Lock.Unlock() require.Equal(common.PendingTxs, <-issuer) + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() blkIntf, err := vm.BuildBlock(context.Background()) require.NoError(err) diff --git a/vms/avm/factory.go b/vms/avm/factory.go index 1e2c6f68a10f..ee71cac0346f 100644 --- a/vms/avm/factory.go +++ b/vms/avm/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm diff --git a/vms/avm/fx_test.go b/vms/avm/fx_test.go index ee0cdbfd8157..7cea92cf3194 100644 --- a/vms/avm/fx_test.go +++ b/vms/avm/fx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm diff --git a/vms/avm/fxs/fx.go b/vms/avm/fxs/fx.go index 512a3bf0da31..2749ee4500a3 100644 --- a/vms/avm/fxs/fx.go +++ b/vms/avm/fxs/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package fxs diff --git a/vms/avm/genesis.go b/vms/avm/genesis.go index 506d2465d691..b2d6e7409152 100644 --- a/vms/avm/genesis.go +++ b/vms/avm/genesis.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -19,6 +19,6 @@ type GenesisAsset struct { txs.CreateAssetTx `serialize:"true"` } -func (g *GenesisAsset) Less(other *GenesisAsset) bool { - return g.Alias < other.Alias +func (g *GenesisAsset) Compare(other *GenesisAsset) int { + return utils.Compare(g.Alias, other.Alias) } diff --git a/vms/avm/genesis_test.go b/vms/avm/genesis_test.go index 10c7aac40295..2e5da96fc63e 100644 --- a/vms/avm/genesis_test.go +++ b/vms/avm/genesis_test.go @@ -1,27 +1,42 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( + "fmt" "testing" "github.com/stretchr/testify/require" ) -func TestGenesisAssetLess(t *testing.T) { - require := require.New(t) - - var g1, g2 GenesisAsset - require.False(g1.Less(&g2)) - require.False(g2.Less(&g1)) - - g1 = GenesisAsset{ - Alias: "a", +func TestGenesisAssetCompare(t *testing.T) { + tests := []struct { + a *GenesisAsset + b *GenesisAsset + expected int + }{ + { + a: &GenesisAsset{}, + b: &GenesisAsset{}, + expected: 0, + }, + { + a: &GenesisAsset{ + Alias: "a", + }, + b: &GenesisAsset{ + Alias: "aa", + }, + expected: -1, + }, } - g2 = GenesisAsset{ - Alias: "aa", + for _, test := range tests { + t.Run(fmt.Sprintf("%s_%s_%d", test.a.Alias, test.b.Alias, test.expected), func(t *testing.T) { + require := require.New(t) + + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) } - require.True(g1.Less(&g2)) - require.False(g2.Less(&g1)) } diff --git a/vms/avm/health.go b/vms/avm/health.go index 725418b1ec8f..6cb2e14b0776 100644 --- a/vms/avm/health.go +++ b/vms/avm/health.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm diff --git a/vms/avm/index_test.go b/vms/avm/index_test.go index 74b306c16c0f..03a2fd863c6a 100644 --- a/vms/avm/index_test.go +++ b/vms/avm/index_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -52,12 +52,15 @@ func TestIndexTransaction_Ordered(t *testing.T) { env.vm.state.AddUTXO(utxo) // make transaction - tx := buildTX(utxoID, txAssetID, addr) + tx := buildTX(env.vm.ctx.XChainID, utxoID, txAssetID, addr) require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - // issue transaction + env.vm.ctx.Lock.Unlock() + issueAndAccept(require, env.vm, env.issuer, tx) + env.vm.ctx.Lock.Lock() + txs = append(txs, tx) } @@ -93,12 +96,16 @@ func TestIndexTransaction_MultipleTransactions(t *testing.T) { env.vm.state.AddUTXO(utxo) // make transaction - tx := buildTX(utxoID, txAssetID, addr) + tx := buildTX(env.vm.ctx.XChainID, utxoID, txAssetID, addr) require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + env.vm.ctx.Lock.Unlock() + // issue transaction issueAndAccept(require, env.vm, env.issuer, tx) + env.vm.ctx.Lock.Lock() + addressTxMap[addr] = tx } @@ -142,12 +149,15 @@ func TestIndexTransaction_MultipleAddresses(t *testing.T) { env.vm.state.AddUTXO(utxo) // make transaction - tx := buildTX(utxoID, txAssetID, addrs...) + tx := buildTX(env.vm.ctx.XChainID, utxoID, txAssetID, addrs...) require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - // issue transaction + env.vm.ctx.Lock.Unlock() + issueAndAccept(require, env.vm, env.issuer, tx) + env.vm.ctx.Lock.Lock() + assertIndexedTX(t, env.vm.db, 0, addr, txAssetID.ID, tx.ID()) assertLatestIdx(t, env.vm.db, addr, txAssetID.ID, 1) } @@ -258,7 +268,7 @@ func buildUTXO(utxoID avax.UTXOID, txAssetID avax.Asset, addr ids.ShortID) *avax } } -func buildTX(utxoID avax.UTXOID, txAssetID avax.Asset, address ...ids.ShortID) *txs.Tx { +func buildTX(chainID ids.ID, utxoID avax.UTXOID, txAssetID avax.Asset, address ...ids.ShortID) *txs.Tx { return &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, diff --git a/vms/avm/metrics/metrics.go b/vms/avm/metrics/metrics.go index 59c4e159a901..9e4053e1fcc6 100644 --- a/vms/avm/metrics/metrics.go +++ b/vms/avm/metrics/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics diff --git a/vms/avm/metrics/mock_metrics.go b/vms/avm/metrics/mock_metrics.go index b83a065e3fa0..2ae4a0786bf0 100644 --- a/vms/avm/metrics/mock_metrics.go +++ b/vms/avm/metrics/mock_metrics.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/avm/metrics (interfaces: Metrics) +// +// Generated by this command: +// +// mockgen -package=metrics -destination=vms/avm/metrics/mock_metrics.go github.com/ava-labs/avalanchego/vms/avm/metrics Metrics +// // Package metrics is a generated GoMock package. package metrics @@ -47,7 +49,7 @@ func (m *MockMetrics) AfterRequest(arg0 *rpc.RequestInfo) { } // AfterRequest indicates an expected call of AfterRequest. -func (mr *MockMetricsMockRecorder) AfterRequest(arg0 interface{}) *gomock.Call { +func (mr *MockMetricsMockRecorder) AfterRequest(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AfterRequest", reflect.TypeOf((*MockMetrics)(nil).AfterRequest), arg0) } @@ -97,7 +99,7 @@ func (m *MockMetrics) InterceptRequest(arg0 *rpc.RequestInfo) *http.Request { } // InterceptRequest indicates an expected call of InterceptRequest. -func (mr *MockMetricsMockRecorder) InterceptRequest(arg0 interface{}) *gomock.Call { +func (mr *MockMetricsMockRecorder) InterceptRequest(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptRequest", reflect.TypeOf((*MockMetrics)(nil).InterceptRequest), arg0) } @@ -111,7 +113,7 @@ func (m *MockMetrics) MarkBlockAccepted(arg0 block.Block) error { } // MarkBlockAccepted indicates an expected call of MarkBlockAccepted. -func (mr *MockMetricsMockRecorder) MarkBlockAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockMetricsMockRecorder) MarkBlockAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkBlockAccepted", reflect.TypeOf((*MockMetrics)(nil).MarkBlockAccepted), arg0) } @@ -125,7 +127,7 @@ func (m *MockMetrics) MarkTxAccepted(arg0 *txs.Tx) error { } // MarkTxAccepted indicates an expected call of MarkTxAccepted. -func (mr *MockMetricsMockRecorder) MarkTxAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockMetricsMockRecorder) MarkTxAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkTxAccepted", reflect.TypeOf((*MockMetrics)(nil).MarkTxAccepted), arg0) } diff --git a/vms/avm/metrics/tx_metrics.go b/vms/avm/metrics/tx_metrics.go index 217eeb18a346..3ae3e8cdea85 100644 --- a/vms/avm/metrics/tx_metrics.go +++ b/vms/avm/metrics/tx_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics diff --git a/vms/avm/network/atomic.go b/vms/avm/network/atomic.go index c6b011dab1cf..0774ed36603e 100644 --- a/vms/avm/network/atomic.go +++ b/vms/avm/network/atomic.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -51,12 +51,14 @@ func (a *atomic) CrossChainAppRequestFailed( ctx context.Context, chainID ids.ID, requestID uint32, + appErr *common.AppError, ) error { h := a.handler.Get() return h.CrossChainAppRequestFailed( ctx, chainID, requestID, + appErr, ) } @@ -96,12 +98,14 @@ func (a *atomic) AppRequestFailed( ctx context.Context, nodeID ids.NodeID, requestID uint32, + appErr *common.AppError, ) error { h := a.handler.Get() return h.AppRequestFailed( ctx, nodeID, requestID, + appErr, ) } diff --git a/vms/avm/network/config.go b/vms/avm/network/config.go new file mode 100644 index 000000000000..8536504d8383 --- /dev/null +++ b/vms/avm/network/config.go @@ -0,0 +1,66 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "time" + + "github.com/ava-labs/avalanchego/utils/units" +) + +var DefaultConfig = Config{ + MaxValidatorSetStaleness: time.Minute, + TargetGossipSize: 20 * units.KiB, + PullGossipPollSize: 1, + PullGossipFrequency: 1500 * time.Millisecond, + PullGossipThrottlingPeriod: 10 * time.Second, + PullGossipThrottlingLimit: 2, + ExpectedBloomFilterElements: 8 * 1024, + ExpectedBloomFilterFalsePositiveProbability: .01, + MaxBloomFilterFalsePositiveProbability: .05, + LegacyPushGossipCacheSize: 512, +} + +type Config struct { + // MaxValidatorSetStaleness limits how old of a validator set the network + // will use for peer sampling and rate limiting. + MaxValidatorSetStaleness time.Duration `json:"max-validator-set-staleness"` + // TargetGossipSize is the number of bytes that will be attempted to be + // sent when pushing transactions and when responded to transaction pull + // requests. + TargetGossipSize int `json:"target-gossip-size"` + // PullGossipPollSize is the number of validators to sample when performing + // a round of pull gossip. + PullGossipPollSize int `json:"pull-gossip-poll-size"` + // PullGossipFrequency is how frequently rounds of pull gossip are + // performed. + PullGossipFrequency time.Duration `json:"pull-gossip-frequency"` + // PullGossipThrottlingPeriod is how large of a window the throttler should + // use. + PullGossipThrottlingPeriod time.Duration `json:"pull-gossip-throttling-period"` + // PullGossipThrottlingLimit is the number of pull querys that are allowed + // by a validator in every throttling window. + PullGossipThrottlingLimit int `json:"pull-gossip-throttling-limit"` + // ExpectedBloomFilterElements is the number of elements to expect when + // creating a new bloom filter. The larger this number is, the larger the + // bloom filter will be. + ExpectedBloomFilterElements int `json:"expected-bloom-filter-elements"` + // ExpectedBloomFilterFalsePositiveProbability is the expected probability + // of a false positive after having inserted ExpectedBloomFilterElements + // into a bloom filter. The smaller this number is, the larger the bloom + // filter will be. + ExpectedBloomFilterFalsePositiveProbability float64 `json:"expected-bloom-filter-false-positive-probability"` + // MaxBloomFilterFalsePositiveProbability is used to determine when the + // bloom filter should be refreshed. Once the expected probability of a + // false positive exceeds this value, the bloom filter will be regenerated. + // The smaller this number is, the more frequently that the bloom filter + // will be regenerated. + MaxBloomFilterFalsePositiveProbability float64 `json:"max-bloom-filter-false-positive-probability"` + // LegacyPushGossipCacheSize tracks the most recently received transactions + // and ensures to only gossip them once. + // + // Deprecated: The legacy push gossip mechanism is deprecated in favor of + // the p2p SDK's push gossip mechanism. + LegacyPushGossipCacheSize int `json:"legacy-push-gossip-cache-size"` +} diff --git a/vms/avm/network/gossip.go b/vms/avm/network/gossip.go new file mode 100644 index 000000000000..0876f122c660 --- /dev/null +++ b/vms/avm/network/gossip.go @@ -0,0 +1,161 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" +) + +var ( + _ p2p.Handler = (*txGossipHandler)(nil) + _ gossip.Set[*txs.Tx] = (*gossipMempool)(nil) + _ gossip.Marshaller[*txs.Tx] = (*txParser)(nil) +) + +// bloomChurnMultiplier is the number used to multiply the size of the mempool +// to determine how large of a bloom filter to create. +const bloomChurnMultiplier = 3 + +// txGossipHandler is the handler called when serving gossip messages +type txGossipHandler struct { + p2p.NoOpHandler + appGossipHandler p2p.Handler + appRequestHandler p2p.Handler +} + +func (t txGossipHandler) AppGossip( + ctx context.Context, + nodeID ids.NodeID, + gossipBytes []byte, +) { + t.appGossipHandler.AppGossip(ctx, nodeID, gossipBytes) +} + +func (t txGossipHandler) AppRequest( + ctx context.Context, + nodeID ids.NodeID, + deadline time.Time, + requestBytes []byte, +) ([]byte, error) { + return t.appRequestHandler.AppRequest(ctx, nodeID, deadline, requestBytes) +} + +type txParser struct { + parser txs.Parser +} + +func (*txParser) MarshalGossip(tx *txs.Tx) ([]byte, error) { + return tx.Bytes(), nil +} + +func (g *txParser) UnmarshalGossip(bytes []byte) (*txs.Tx, error) { + return g.parser.ParseTx(bytes) +} + +func newGossipMempool( + mempool mempool.Mempool, + registerer prometheus.Registerer, + log logging.Logger, + txVerifier TxVerifier, + parser txs.Parser, + minTargetElements int, + targetFalsePositiveProbability, + resetFalsePositiveProbability float64, +) (*gossipMempool, error) { + bloom, err := gossip.NewBloomFilter(registerer, "mempool_bloom_filter", minTargetElements, targetFalsePositiveProbability, resetFalsePositiveProbability) + return &gossipMempool{ + Mempool: mempool, + log: log, + txVerifier: txVerifier, + parser: parser, + bloom: bloom, + }, err +} + +type gossipMempool struct { + mempool.Mempool + log logging.Logger + txVerifier TxVerifier + parser txs.Parser + + lock sync.RWMutex + bloom *gossip.BloomFilter +} + +// Add is called by the p2p SDK when handling transactions that were pushed to +// us and when handling transactions that were pulled from a peer. If this +// returns a nil error while handling push gossip, the p2p SDK will queue the +// transaction to push gossip as well. +func (g *gossipMempool) Add(tx *txs.Tx) error { + txID := tx.ID() + if _, ok := g.Mempool.Get(txID); ok { + return fmt.Errorf("attempted to issue %w: %s ", mempool.ErrDuplicateTx, txID) + } + + if reason := g.Mempool.GetDropReason(txID); reason != nil { + // If the tx is being dropped - just ignore it + // + // TODO: Should we allow re-verification of the transaction even if it + // failed previously? + return reason + } + + // Verify the tx at the currently preferred state + if err := g.txVerifier.VerifyTx(tx); err != nil { + g.Mempool.MarkDropped(txID, err) + return err + } + + return g.AddVerified(tx) +} + +func (g *gossipMempool) AddVerified(tx *txs.Tx) error { + if err := g.Mempool.Add(tx); err != nil { + g.Mempool.MarkDropped(tx.ID(), err) + return err + } + + g.lock.Lock() + defer g.lock.Unlock() + + g.bloom.Add(tx) + reset, err := gossip.ResetBloomFilterIfNeeded(g.bloom, g.Mempool.Len()*bloomChurnMultiplier) + if err != nil { + return err + } + + if reset { + g.log.Debug("resetting bloom filter") + g.Mempool.Iterate(func(tx *txs.Tx) bool { + g.bloom.Add(tx) + return true + }) + } + + g.Mempool.RequestBuildBlock() + return nil +} + +func (g *gossipMempool) Iterate(f func(*txs.Tx) bool) { + g.Mempool.Iterate(f) +} + +func (g *gossipMempool) GetFilter() (bloom []byte, salt []byte) { + g.lock.RLock() + defer g.lock.RUnlock() + + return g.bloom.Marshal() +} diff --git a/vms/avm/network/gossip_test.go b/vms/avm/network/gossip_test.go new file mode 100644 index 000000000000..2eb00dad15dc --- /dev/null +++ b/vms/avm/network/gossip_test.go @@ -0,0 +1,134 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/avm/fxs" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +var _ TxVerifier = (*testVerifier)(nil) + +type testVerifier struct { + err error +} + +func (v testVerifier) VerifyTx(*txs.Tx) error { + return v.err +} + +func TestMarshaller(t *testing.T) { + require := require.New(t) + + parser, err := txs.NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) + require.NoError(err) + + marhsaller := txParser{ + parser: parser, + } + + want := &txs.Tx{Unsigned: &txs.BaseTx{}} + require.NoError(want.Initialize(parser.Codec())) + + bytes, err := marhsaller.MarshalGossip(want) + require.NoError(err) + + got, err := marhsaller.UnmarshalGossip(bytes) + require.NoError(err) + require.Equal(want.GossipID(), got.GossipID()) +} + +func TestGossipMempoolAdd(t *testing.T) { + require := require.New(t) + + metrics := prometheus.NewRegistry() + toEngine := make(chan common.Message, 1) + + baseMempool, err := mempool.New("", metrics, toEngine) + require.NoError(err) + + parser, err := txs.NewParser(time.Time{}, nil) + require.NoError(err) + + mempool, err := newGossipMempool( + baseMempool, + metrics, + logging.NoLog{}, + testVerifier{}, + parser, + DefaultConfig.ExpectedBloomFilterElements, + DefaultConfig.ExpectedBloomFilterFalsePositiveProbability, + DefaultConfig.MaxBloomFilterFalsePositiveProbability, + ) + require.NoError(err) + + tx := &txs.Tx{ + Unsigned: &txs.BaseTx{ + BaseTx: avax.BaseTx{ + Ins: []*avax.TransferableInput{}, + }, + }, + TxID: ids.GenerateTestID(), + } + + require.NoError(mempool.Add(tx)) + require.True(mempool.bloom.Has(tx)) +} + +func TestGossipMempoolAddVerified(t *testing.T) { + require := require.New(t) + + metrics := prometheus.NewRegistry() + toEngine := make(chan common.Message, 1) + + baseMempool, err := mempool.New("", metrics, toEngine) + require.NoError(err) + + parser, err := txs.NewParser(time.Time{}, nil) + require.NoError(err) + + mempool, err := newGossipMempool( + baseMempool, + metrics, + logging.NoLog{}, + testVerifier{ + err: errTest, // We shouldn't be attempting to verify the tx in this flow + }, + parser, + DefaultConfig.ExpectedBloomFilterElements, + DefaultConfig.ExpectedBloomFilterFalsePositiveProbability, + DefaultConfig.MaxBloomFilterFalsePositiveProbability, + ) + require.NoError(err) + + tx := &txs.Tx{ + Unsigned: &txs.BaseTx{ + BaseTx: avax.BaseTx{ + Ins: []*avax.TransferableInput{}, + }, + }, + TxID: ids.GenerateTestID(), + } + + require.NoError(mempool.AddVerified(tx)) + require.True(mempool.bloom.Has(tx)) +} diff --git a/vms/avm/network/network.go b/vms/avm/network/network.go index 1c3e7e5558ae..d88599d928a4 100644 --- a/vms/avm/network/network.go +++ b/vms/avm/network/network.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -6,43 +6,41 @@ package network import ( "context" "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/vms/avm/block/executor" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" "github.com/ava-labs/avalanchego/vms/components/message" ) -// We allow [recentTxsCacheSize] to be fairly large because we only store hashes -// in the cache, not entire transactions. -const recentTxsCacheSize = 512 +const txGossipHandlerID = 0 -var _ Network = (*network)(nil) +var ( + _ common.AppHandler = (*Network)(nil) + _ validators.Connector = (*Network)(nil) +) -type Network interface { - common.AppHandler +type Network struct { + *p2p.Network - // IssueTx verifies the transaction at the currently preferred state, adds - // it to the mempool, and gossips it to the network. - // - // Invariant: Assumes the context lock is held. - IssueTx(context.Context, *txs.Tx) error -} - -type network struct { - // We embed a noop handler for all unhandled messages - common.AppHandler + txPushGossiper gossip.Accumulator[*txs.Tx] + txPullGossiper gossip.Gossiper + txPullGossipFrequency time.Duration ctx *snow.Context parser txs.Parser - manager executor.Manager - mempool mempool.Mempool + mempool *gossipMempool appSender common.AppSender // gossip related attributes @@ -53,26 +51,128 @@ type network struct { func New( ctx *snow.Context, parser txs.Parser, - manager executor.Manager, + txVerifier TxVerifier, mempool mempool.Mempool, appSender common.AppSender, -) Network { - return &network{ - AppHandler: common.NewNoOpAppHandler(ctx.Log), + registerer prometheus.Registerer, + config Config, +) (*Network, error) { + p2pNetwork, err := p2p.NewNetwork(ctx.Log, appSender, registerer, "p2p") + if err != nil { + return nil, err + } + + marshaller := &txParser{ + parser: parser, + } + validators := p2p.NewValidators( + p2pNetwork.Peers, + ctx.Log, + ctx.SubnetID, + ctx.ValidatorState, + config.MaxValidatorSetStaleness, + ) + txGossipClient := p2pNetwork.NewClient( + txGossipHandlerID, + p2p.WithValidatorSampling(validators), + ) + txGossipMetrics, err := gossip.NewMetrics(registerer, "tx") + if err != nil { + return nil, err + } - ctx: ctx, - parser: parser, - manager: manager, - mempool: mempool, - appSender: appSender, + txPushGossiper := gossip.NewPushGossiper[*txs.Tx]( + marshaller, + txGossipClient, + txGossipMetrics, + config.TargetGossipSize, + ) + + gossipMempool, err := newGossipMempool( + mempool, + registerer, + ctx.Log, + txVerifier, + parser, + config.ExpectedBloomFilterElements, + config.ExpectedBloomFilterFalsePositiveProbability, + config.MaxBloomFilterFalsePositiveProbability, + ) + if err != nil { + return nil, err + } + + var txPullGossiper gossip.Gossiper + txPullGossiper = gossip.NewPullGossiper[*txs.Tx]( + ctx.Log, + marshaller, + gossipMempool, + txGossipClient, + txGossipMetrics, + config.PullGossipPollSize, + ) + + // Gossip requests are only served if a node is a validator + txPullGossiper = gossip.ValidatorGossiper{ + Gossiper: txPullGossiper, + NodeID: ctx.NodeID, + Validators: validators, + } + + handler := gossip.NewHandler[*txs.Tx]( + ctx.Log, + marshaller, + txPushGossiper, + gossipMempool, + txGossipMetrics, + config.TargetGossipSize, + ) + + validatorHandler := p2p.NewValidatorHandler( + p2p.NewThrottlerHandler( + handler, + p2p.NewSlidingWindowThrottler( + config.PullGossipThrottlingPeriod, + config.PullGossipThrottlingLimit, + ), + ctx.Log, + ), + validators, + ctx.Log, + ) + + // We allow pushing txs between all peers, but only serve gossip requests + // from validators + txGossipHandler := txGossipHandler{ + appGossipHandler: handler, + appRequestHandler: validatorHandler, + } + + if err := p2pNetwork.AddHandler(txGossipHandlerID, txGossipHandler); err != nil { + return nil, err + } + + return &Network{ + Network: p2pNetwork, + txPushGossiper: txPushGossiper, + txPullGossiper: txPullGossiper, + txPullGossipFrequency: config.PullGossipFrequency, + ctx: ctx, + parser: parser, + mempool: gossipMempool, + appSender: appSender, recentTxs: &cache.LRU[ids.ID, struct{}]{ - Size: recentTxsCacheSize, + Size: config.LegacyPushGossipCacheSize, }, - } + }, nil } -func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []byte) error { +func (n *Network) Gossip(ctx context.Context) { + gossip.Every(ctx, n.ctx.Log, n.txPullGossiper, n.txPullGossipFrequency) +} + +func (n *Network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []byte) error { n.ctx.Log.Debug("called AppGossip message handler", zap.Stringer("nodeID", nodeID), zap.Int("messageLen", len(msgBytes)), @@ -80,10 +180,11 @@ func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []b msgIntf, err := message.Parse(msgBytes) if err != nil { - n.ctx.Log.Debug("dropping AppGossip message", + n.ctx.Log.Debug("forwarding AppGossip message to SDK network", zap.String("reason", "failed to parse message"), ) - return nil + + return n.Network.AppGossip(ctx, nodeID, msgBytes) } msg, ok := msgIntf.(*message.Tx) @@ -103,85 +204,76 @@ func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []b ) return nil } - txID := tx.ID() - // We need to grab the context lock here to avoid racy behavior with - // transaction verification + mempool modifications. - // - // Invariant: tx should not be referenced again without the context lock - // held to avoid any data races. - n.ctx.Lock.Lock() - err = n.issueTx(tx) - n.ctx.Lock.Unlock() - if err == nil { - n.gossipTx(ctx, txID, msgBytes) + if err := n.mempool.Add(tx); err == nil { + txID := tx.ID() + n.txPushGossiper.Add(tx) + if err := n.txPushGossiper.Gossip(ctx); err != nil { + n.ctx.Log.Error("failed to gossip tx", + zap.Stringer("txID", tx.ID()), + zap.Error(err), + ) + } + n.gossipTxMessage(ctx, txID, msgBytes) } return nil } -func (n *network) IssueTx(ctx context.Context, tx *txs.Tx) error { - if err := n.issueTx(tx); err != nil { +// IssueTx attempts to add a tx to the mempool, after verifying it. If the tx is +// added to the mempool, it will attempt to push gossip the tx to random peers +// in the network using both the legacy and p2p SDK. +// +// If the tx is already in the mempool, mempool.ErrDuplicateTx will be +// returned. +// If the tx is not added to the mempool, an error will be returned. +func (n *Network) IssueTx(ctx context.Context, tx *txs.Tx) error { + if err := n.mempool.Add(tx); err != nil { return err } + return n.gossipTx(ctx, tx) +} - txBytes := tx.Bytes() - msg := &message.Tx{ - Tx: txBytes, - } - msgBytes, err := message.Build(msg) - if err != nil { +// IssueVerifiedTx attempts to add a tx to the mempool, without first verifying +// it. If the tx is added to the mempool, it will attempt to push gossip the tx +// to random peers in the network using both the legacy and p2p SDK. +// +// If the tx is already in the mempool, mempool.ErrDuplicateTx will be +// returned. +// If the tx is not added to the mempool, an error will be returned. +func (n *Network) IssueVerifiedTx(ctx context.Context, tx *txs.Tx) error { + if err := n.mempool.AddVerified(tx); err != nil { return err } - - txID := tx.ID() - n.gossipTx(ctx, txID, msgBytes) - return nil + return n.gossipTx(ctx, tx) } -// returns nil if the tx is in the mempool -func (n *network) issueTx(tx *txs.Tx) error { - txID := tx.ID() - if n.mempool.Has(txID) { - // The tx is already in the mempool - return nil - } - - if reason := n.mempool.GetDropReason(txID); reason != nil { - // If the tx is being dropped - just ignore it - // - // TODO: Should we allow re-verification of the transaction even if it - // failed previously? - return reason - } - - // Verify the tx at the currently preferred state - if err := n.manager.VerifyTx(tx); err != nil { - n.ctx.Log.Debug("tx failed verification", - zap.Stringer("txID", txID), +// gossipTx pushes the tx to peers using both the legacy and p2p SDK. +func (n *Network) gossipTx(ctx context.Context, tx *txs.Tx) error { + n.txPushGossiper.Add(tx) + if err := n.txPushGossiper.Gossip(ctx); err != nil { + n.ctx.Log.Error("failed to gossip tx", + zap.Stringer("txID", tx.ID()), zap.Error(err), ) - - n.mempool.MarkDropped(txID, err) - return err } - if err := n.mempool.Add(tx); err != nil { - n.ctx.Log.Debug("tx failed to be added to the mempool", - zap.Stringer("txID", txID), - zap.Error(err), - ) - - n.mempool.MarkDropped(txID, err) + txBytes := tx.Bytes() + msg := &message.Tx{ + Tx: txBytes, + } + msgBytes, err := message.Build(msg) + if err != nil { return err } - n.mempool.RequestBuildBlock() + txID := tx.ID() + n.gossipTxMessage(ctx, txID, msgBytes) return nil } -func (n *network) gossipTx(ctx context.Context, txID ids.ID, msgBytes []byte) { - // This lock is just to ensure there isn't racy behavior between checking if - // the tx was gossiped and marking the tx as gossiped. +// gossipTxMessage pushes the tx message to peers using the legacy format. +// If the tx was recently gossiped, this function does nothing. +func (n *Network) gossipTxMessage(ctx context.Context, txID ids.ID, msgBytes []byte) { n.recentTxsLock.Lock() _, has := n.recentTxs.Get(txID) n.recentTxs.Put(txID, struct{}{}) diff --git a/vms/avm/network/network_test.go b/vms/avm/network/network_test.go index 6d779eadcc28..da66dff9b2ac 100644 --- a/vms/avm/network/network_test.go +++ b/vms/avm/network/network_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -7,6 +7,9 @@ import ( "context" "errors" "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" @@ -27,7 +30,22 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var errTest = errors.New("test error") +var ( + testConfig = Config{ + MaxValidatorSetStaleness: time.Second, + TargetGossipSize: 1, + PullGossipPollSize: 1, + PullGossipFrequency: time.Second, + PullGossipThrottlingPeriod: time.Second, + PullGossipThrottlingLimit: 1, + ExpectedBloomFilterElements: 10, + ExpectedBloomFilterFalsePositiveProbability: .1, + MaxBloomFilterFalsePositiveProbability: .5, + LegacyPushGossipCacheSize: 512, + } + + errTest = errors.New("test error") +) func TestNetworkAppGossip(t *testing.T) { testTx := &txs.Tx{ @@ -41,37 +59,31 @@ func TestNetworkAppGossip(t *testing.T) { }, } - parser, err := txs.NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - }) + parser, err := txs.NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) require.NoError(t, err) - require.NoError(t, parser.InitializeTx(testTx)) + require.NoError(t, testTx.Initialize(parser.Codec())) type test struct { - name string - msgBytesFunc func() []byte - mempoolFunc func(*gomock.Controller) mempool.Mempool - appSenderFunc func(*gomock.Controller) common.AppSender + name string + msgBytesFunc func() []byte + mempoolFunc func(*gomock.Controller) mempool.Mempool + txVerifierFunc func(*gomock.Controller) TxVerifier + appSenderFunc func(*gomock.Controller) common.AppSender } tests := []test{ { - // Shouldn't attempt to issue or gossip the tx name: "invalid message bytes", msgBytesFunc: func() []byte { return []byte{0x00} }, - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - // Unused in this test - return nil - }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Unused in this test - return nil - }, }, { - // Shouldn't attempt to issue or gossip the tx name: "invalid tx bytes", msgBytesFunc: func() []byte { msg := message.Tx{ @@ -81,18 +93,42 @@ func TestNetworkAppGossip(t *testing.T) { require.NoError(t, err) return msgBytes }, + }, + { + name: "tx already in mempool", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: testTx.Bytes(), + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - // Unused in this test - return mempool.NewMockMempool(ctrl) + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(testTx, true) + return mempool }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Unused in this test - return common.NewMockSender(ctrl) + }, + { + name: "tx previously dropped", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: testTx.Bytes(), + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) + return mempool }, }, { - // Issue returns nil because mempool has tx. We should gossip the tx. - name: "issuance succeeds", + name: "transaction invalid", msgBytesFunc: func() []byte { msg := message.Tx{ Tx: testTx.Bytes(), @@ -103,18 +139,19 @@ func TestNetworkAppGossip(t *testing.T) { }, mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Has(gomock.Any()).Return(true) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) return mempool }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - appSender := common.NewMockSender(ctrl) - appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()) - return appSender + txVerifierFunc: func(ctrl *gomock.Controller) TxVerifier { + txVerifier := executor.NewMockManager(ctrl) + txVerifier.EXPECT().VerifyTx(gomock.Any()).Return(errTest) + return txVerifier }, }, { - // Issue returns error because tx was dropped. We shouldn't gossip the tx. - name: "issuance fails", + name: "happy path", msgBytesFunc: func() []byte { msg := message.Tx{ Tx: testTx.Bytes(), @@ -125,13 +162,22 @@ func TestNetworkAppGossip(t *testing.T) { }, mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Has(gomock.Any()).Return(false) - mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + mempool.EXPECT().Add(gomock.Any()).Return(nil) + mempool.EXPECT().Len().Return(0) + mempool.EXPECT().RequestBuildBlock() return mempool }, + txVerifierFunc: func(ctrl *gomock.Controller) TxVerifier { + txVerifier := executor.NewMockManager(ctrl) + txVerifier.EXPECT().VerifyTx(gomock.Any()).Return(nil) + return txVerifier + }, appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Unused in this test - return common.NewMockSender(ctrl) + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil).Times(2) + return appSender }, }, } @@ -141,22 +187,49 @@ func TestNetworkAppGossip(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - parser, err := txs.NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - &nftfx.Fx{}, - &propertyfx.Fx{}, - }) + parser, err := txs.NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + &nftfx.Fx{}, + &propertyfx.Fx{}, + }, + ) require.NoError(err) - n := New( + mempoolFunc := func(ctrl *gomock.Controller) mempool.Mempool { + return mempool.NewMockMempool(ctrl) + } + if tt.mempoolFunc != nil { + mempoolFunc = tt.mempoolFunc + } + + txVerifierFunc := func(ctrl *gomock.Controller) TxVerifier { + return executor.NewMockManager(ctrl) + } + if tt.txVerifierFunc != nil { + txVerifierFunc = tt.txVerifierFunc + } + + appSenderFunc := func(ctrl *gomock.Controller) common.AppSender { + return common.NewMockSender(ctrl) + } + if tt.appSenderFunc != nil { + appSenderFunc = tt.appSenderFunc + } + + n, err := New( &snow.Context{ Log: logging.NoLog{}, }, parser, - executor.NewMockManager(ctrl), // Manager is unused in this test - tt.mempoolFunc(ctrl), - tt.appSenderFunc(ctrl), + txVerifierFunc(ctrl), + mempoolFunc(ctrl), + appSenderFunc(ctrl), + prometheus.NewRegistry(), + testConfig, ) + require.NoError(err) require.NoError(n.AppGossip(context.Background(), ids.GenerateTestNodeID(), tt.msgBytesFunc())) }) } @@ -164,11 +237,11 @@ func TestNetworkAppGossip(t *testing.T) { func TestNetworkIssueTx(t *testing.T) { type test struct { - name string - mempoolFunc func(*gomock.Controller) mempool.Mempool - managerFunc func(*gomock.Controller) executor.Manager - appSenderFunc func(*gomock.Controller) common.AppSender - expectedErr error + name string + mempoolFunc func(*gomock.Controller) mempool.Mempool + txVerifierFunc func(*gomock.Controller) TxVerifier + appSenderFunc func(*gomock.Controller) common.AppSender + expectedErr error } tests := []test{ @@ -176,56 +249,34 @@ func TestNetworkIssueTx(t *testing.T) { name: "mempool has transaction", mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Has(gomock.Any()).Return(true) + mempool.EXPECT().Get(gomock.Any()).Return(nil, true) return mempool }, - managerFunc: func(ctrl *gomock.Controller) executor.Manager { - // Unused in this test - return executor.NewMockManager(ctrl) - }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Should gossip the tx - appSender := common.NewMockSender(ctrl) - appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil) - return appSender - }, - expectedErr: nil, + expectedErr: mempool.ErrDuplicateTx, }, { name: "transaction marked as dropped in mempool", mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) return mempool }, - managerFunc: func(ctrl *gomock.Controller) executor.Manager { - // Unused in this test - return executor.NewMockManager(ctrl) - }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Shouldn't gossip the tx - return common.NewMockSender(ctrl) - }, expectedErr: errTest, }, { name: "transaction invalid", mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) return mempool }, - managerFunc: func(ctrl *gomock.Controller) executor.Manager { - manager := executor.NewMockManager(ctrl) - manager.EXPECT().VerifyTx(gomock.Any()).Return(errTest) - return manager - }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Shouldn't gossip the tx - return common.NewMockSender(ctrl) + txVerifierFunc: func(ctrl *gomock.Controller) TxVerifier { + txVerifier := executor.NewMockManager(ctrl) + txVerifier.EXPECT().VerifyTx(gomock.Any()).Return(errTest) + return txVerifier }, expectedErr: errTest, }, @@ -233,20 +284,16 @@ func TestNetworkIssueTx(t *testing.T) { name: "can't add transaction to mempool", mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) mempool.EXPECT().Add(gomock.Any()).Return(errTest) mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) return mempool }, - managerFunc: func(ctrl *gomock.Controller) executor.Manager { - manager := executor.NewMockManager(ctrl) - manager.EXPECT().VerifyTx(gomock.Any()).Return(nil) - return manager - }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Shouldn't gossip the tx - return common.NewMockSender(ctrl) + txVerifierFunc: func(ctrl *gomock.Controller) TxVerifier { + txVerifier := executor.NewMockManager(ctrl) + txVerifier.EXPECT().VerifyTx(gomock.Any()).Return(nil) + return txVerifier }, expectedErr: errTest, }, @@ -254,21 +301,21 @@ func TestNetworkIssueTx(t *testing.T) { name: "happy path", mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) mempool.EXPECT().Add(gomock.Any()).Return(nil) + mempool.EXPECT().Len().Return(0) mempool.EXPECT().RequestBuildBlock() return mempool }, - managerFunc: func(ctrl *gomock.Controller) executor.Manager { - manager := executor.NewMockManager(ctrl) - manager.EXPECT().VerifyTx(gomock.Any()).Return(nil) - return manager + txVerifierFunc: func(ctrl *gomock.Controller) TxVerifier { + txVerifier := executor.NewMockManager(ctrl) + txVerifier.EXPECT().VerifyTx(gomock.Any()).Return(nil) + return txVerifier }, appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Should gossip the tx appSender := common.NewMockSender(ctrl) - appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil).Times(2) return appSender }, expectedErr: nil, @@ -280,40 +327,154 @@ func TestNetworkIssueTx(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - parser, err := txs.NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - &nftfx.Fx{}, - &propertyfx.Fx{}, - }) + parser, err := txs.NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + &nftfx.Fx{}, + &propertyfx.Fx{}, + }, + ) require.NoError(err) - n := New( + mempoolFunc := func(ctrl *gomock.Controller) mempool.Mempool { + return mempool.NewMockMempool(ctrl) + } + if tt.mempoolFunc != nil { + mempoolFunc = tt.mempoolFunc + } + + txVerifierFunc := func(ctrl *gomock.Controller) TxVerifier { + return executor.NewMockManager(ctrl) + } + if tt.txVerifierFunc != nil { + txVerifierFunc = tt.txVerifierFunc + } + + appSenderFunc := func(ctrl *gomock.Controller) common.AppSender { + return common.NewMockSender(ctrl) + } + if tt.appSenderFunc != nil { + appSenderFunc = tt.appSenderFunc + } + + n, err := New( &snow.Context{ Log: logging.NoLog{}, }, parser, - tt.managerFunc(ctrl), - tt.mempoolFunc(ctrl), - tt.appSenderFunc(ctrl), + txVerifierFunc(ctrl), + mempoolFunc(ctrl), + appSenderFunc(ctrl), + prometheus.NewRegistry(), + testConfig, ) + require.NoError(err) err = n.IssueTx(context.Background(), &txs.Tx{}) require.ErrorIs(err, tt.expectedErr) }) } } +func TestNetworkIssueVerifiedTx(t *testing.T) { + type test struct { + name string + mempoolFunc func(*gomock.Controller) mempool.Mempool + appSenderFunc func(*gomock.Controller) common.AppSender + expectedErr error + } + + tests := []test{ + { + name: "can't add transaction to mempool", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Add(gomock.Any()).Return(errTest) + mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) + return mempool + }, + expectedErr: errTest, + }, + { + name: "happy path", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Add(gomock.Any()).Return(nil) + mempool.EXPECT().Len().Return(0) + mempool.EXPECT().RequestBuildBlock() + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil).Times(2) + return appSender + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + parser, err := txs.NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + &nftfx.Fx{}, + &propertyfx.Fx{}, + }, + ) + require.NoError(err) + + mempoolFunc := func(ctrl *gomock.Controller) mempool.Mempool { + return mempool.NewMockMempool(ctrl) + } + if tt.mempoolFunc != nil { + mempoolFunc = tt.mempoolFunc + } + + appSenderFunc := func(ctrl *gomock.Controller) common.AppSender { + return common.NewMockSender(ctrl) + } + if tt.appSenderFunc != nil { + appSenderFunc = tt.appSenderFunc + } + + n, err := New( + &snow.Context{ + Log: logging.NoLog{}, + }, + parser, + executor.NewMockManager(ctrl), // Should never verify a tx + mempoolFunc(ctrl), + appSenderFunc(ctrl), + prometheus.NewRegistry(), + testConfig, + ) + require.NoError(err) + err = n.IssueVerifiedTx(context.Background(), &txs.Tx{}) + require.ErrorIs(err, tt.expectedErr) + }) + } +} + func TestNetworkGossipTx(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - parser, err := txs.NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - }) + parser, err := txs.NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) require.NoError(err) appSender := common.NewMockSender(ctrl) - nIntf := New( + n, err := New( &snow.Context{ Log: logging.NoLog{}, }, @@ -321,19 +482,20 @@ func TestNetworkGossipTx(t *testing.T) { executor.NewMockManager(ctrl), mempool.NewMockMempool(ctrl), appSender, + prometheus.NewRegistry(), + testConfig, ) - require.IsType(&network{}, nIntf) - n := nIntf.(*network) + require.NoError(err) // Case: Tx was recently gossiped txID := ids.GenerateTestID() n.recentTxs.Put(txID, struct{}{}) - n.gossipTx(context.Background(), txID, []byte{}) + n.gossipTxMessage(context.Background(), txID, []byte{}) // Didn't make a call to SendAppGossip // Case: Tx was not recently gossiped msgBytes := []byte{1, 2, 3} appSender.EXPECT().SendAppGossip(gomock.Any(), msgBytes).Return(nil) - n.gossipTx(context.Background(), ids.GenerateTestID(), msgBytes) + n.gossipTxMessage(context.Background(), ids.GenerateTestID(), msgBytes) // Did make a call to SendAppGossip } diff --git a/vms/avm/network/tx_verifier.go b/vms/avm/network/tx_verifier.go new file mode 100644 index 000000000000..09f869283448 --- /dev/null +++ b/vms/avm/network/tx_verifier.go @@ -0,0 +1,36 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "sync" + + "github.com/ava-labs/avalanchego/vms/avm/txs" +) + +var _ TxVerifier = (*LockedTxVerifier)(nil) + +type TxVerifier interface { + // VerifyTx verifies that the transaction should be issued into the mempool. + VerifyTx(tx *txs.Tx) error +} + +type LockedTxVerifier struct { + lock sync.Locker + txVerifier TxVerifier +} + +func (l *LockedTxVerifier) VerifyTx(tx *txs.Tx) error { + l.lock.Lock() + defer l.lock.Unlock() + + return l.txVerifier.VerifyTx(tx) +} + +func NewLockedTxVerifier(lock sync.Locker, txVerifier TxVerifier) *LockedTxVerifier { + return &LockedTxVerifier{ + lock: lock, + txVerifier: txVerifier, + } +} diff --git a/vms/avm/pubsub_filterer.go b/vms/avm/pubsub_filterer.go index 242970347901..caf0ba348393 100644 --- a/vms/avm/pubsub_filterer.go +++ b/vms/avm/pubsub_filterer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm diff --git a/vms/avm/pubsub_filterer_test.go b/vms/avm/pubsub_filterer_test.go index 95f4fc3cd229..0059b2218e39 100644 --- a/vms/avm/pubsub_filterer_test.go +++ b/vms/avm/pubsub_filterer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm diff --git a/vms/avm/service.go b/vms/avm/service.go index 0daa5942f984..2b0d65c0fd20 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -216,16 +216,16 @@ func (s *Service) IssueTx(_ *http.Request, args *api.FormattedTx, reply *api.JSO return fmt.Errorf("problem decoding transaction: %w", err) } - s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() - - txID, err := s.vm.IssueTx(txBytes) + tx, err := s.vm.parser.ParseTx(txBytes) if err != nil { + s.vm.ctx.Log.Debug("failed to parse tx", + zap.Error(err), + ) return err } - reply.TxID = txID - return nil + reply.TxID, err = s.vm.issueTx(tx) + return err } // GetTxStatusReply defines the GetTxStatus replies returned from the API @@ -727,14 +727,30 @@ func (s *Service) CreateAsset(_ *http.Request, args *CreateAssetArgs, reply *Ass zap.Int("numMinters", len(args.MinterSets)), ) + tx, changeAddr, err := s.buildCreateAssetTx(args) + if err != nil { + return err + } + + assetID, err := s.vm.issueTx(tx) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.AssetID = assetID + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) + return err +} + +func (s *Service) buildCreateAssetTx(args *CreateAssetArgs) (*txs.Tx, ids.ShortID, error) { if len(args.InitialHolders) == 0 && len(args.MinterSets) == 0 { - return errNoHoldersOrMinters + return nil, ids.ShortEmpty, errNoHoldersOrMinters } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } s.vm.ctx.Lock.Lock() @@ -743,16 +759,16 @@ func (s *Service) CreateAsset(_ *http.Request, args *CreateAssetArgs, reply *Ass // Get the UTXOs/keys for the from addresses utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the change address. if len(kc.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr, err := s.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { - return err + return nil, ids.ShortEmpty, err } amountsSpent, ins, keys, err := s.vm.Spend( @@ -763,7 +779,7 @@ func (s *Service) CreateAsset(_ *http.Request, args *CreateAssetArgs, reply *Ass }, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } outs := []*avax.TransferableOutput{} @@ -788,7 +804,7 @@ func (s *Service) CreateAsset(_ *http.Request, args *CreateAssetArgs, reply *Ass for _, holder := range args.InitialHolders { addr, err := avax.ParseServiceAddress(s.vm, holder.Address) if err != nil { - return err + return nil, ids.ShortEmpty, err } initialState.Outs = append(initialState.Outs, &secp256k1fx.TransferOutput{ Amt: uint64(holder.Amount), @@ -807,15 +823,17 @@ func (s *Service) CreateAsset(_ *http.Request, args *CreateAssetArgs, reply *Ass } minterAddrsSet, err := avax.ParseServiceAddresses(s.vm, owner.Minters) if err != nil { - return err + return nil, ids.ShortEmpty, err } minter.Addrs = minterAddrsSet.List() utils.Sort(minter.Addrs) initialState.Outs = append(initialState.Outs, minter) } - initialState.Sort(s.vm.parser.Codec()) - tx := txs.Tx{Unsigned: &txs.CreateAssetTx{ + codec := s.vm.parser.Codec() + initialState.Sort(codec) + + tx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, @@ -827,18 +845,7 @@ func (s *Service) CreateAsset(_ *http.Request, args *CreateAssetArgs, reply *Ass Denomination: args.Denomination, States: []*txs.InitialState{initialState}, }} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { - return err - } - - assetID, err := s.vm.IssueTx(tx.Bytes()) - if err != nil { - return fmt.Errorf("problem issuing transaction: %w", err) - } - - reply.AssetID = assetID - reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) - return err + return tx, changeAddr, tx.SignSECP256K1Fx(codec, keys) } // CreateFixedCapAsset returns ID of the newly created asset @@ -885,14 +892,30 @@ func (s *Service) CreateNFTAsset(_ *http.Request, args *CreateNFTAssetArgs, repl zap.Int("numMinters", len(args.MinterSets)), ) + tx, changeAddr, err := s.buildCreateNFTAsset(args) + if err != nil { + return err + } + + assetID, err := s.vm.issueTx(tx) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.AssetID = assetID + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) + return err +} + +func (s *Service) buildCreateNFTAsset(args *CreateNFTAssetArgs) (*txs.Tx, ids.ShortID, error) { if len(args.MinterSets) == 0 { - return errNoMinters + return nil, ids.ShortEmpty, errNoMinters } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } s.vm.ctx.Lock.Lock() @@ -901,16 +924,16 @@ func (s *Service) CreateNFTAsset(_ *http.Request, args *CreateNFTAssetArgs, repl // Get the UTXOs/keys for the from addresses utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the change address. if len(kc.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr, err := s.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { - return err + return nil, ids.ShortEmpty, err } amountsSpent, ins, keys, err := s.vm.Spend( @@ -921,7 +944,7 @@ func (s *Service) CreateNFTAsset(_ *http.Request, args *CreateNFTAssetArgs, repl }, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } outs := []*avax.TransferableOutput{} @@ -952,15 +975,17 @@ func (s *Service) CreateNFTAsset(_ *http.Request, args *CreateNFTAssetArgs, repl } minterAddrsSet, err := avax.ParseServiceAddresses(s.vm, owner.Minters) if err != nil { - return err + return nil, ids.ShortEmpty, err } minter.Addrs = minterAddrsSet.List() utils.Sort(minter.Addrs) initialState.Outs = append(initialState.Outs, minter) } - initialState.Sort(s.vm.parser.Codec()) - tx := txs.Tx{Unsigned: &txs.CreateAssetTx{ + codec := s.vm.parser.Codec() + initialState.Sort(codec) + + tx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, @@ -972,18 +997,7 @@ func (s *Service) CreateNFTAsset(_ *http.Request, args *CreateNFTAssetArgs, repl Denomination: 0, // NFTs are non-fungible States: []*txs.InitialState{initialState}, }} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { - return err - } - - assetID, err := s.vm.IssueTx(tx.Bytes()) - if err != nil { - return fmt.Errorf("problem issuing transaction: %w", err) - } - - reply.AssetID = assetID - reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) - return err + return tx, changeAddr, tx.SignSECP256K1Fx(codec, keys) } // CreateAddress creates an address for the user [args.Username] @@ -1198,18 +1212,34 @@ func (s *Service) SendMultiple(_ *http.Request, args *SendMultipleArgs, reply *a logging.UserString("username", args.Username), ) + tx, changeAddr, err := s.buildSendMultiple(args) + if err != nil { + return err + } + + txID, err := s.vm.issueTx(tx) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) + return err +} + +func (s *Service) buildSendMultiple(args *SendMultipleArgs) (*txs.Tx, ids.ShortID, error) { // Validate the memo field memoBytes := []byte(args.Memo) if l := len(memoBytes); l > avax.MaxMemoSize { - return fmt.Errorf("max memo length is %d but provided memo field is length %d", avax.MaxMemoSize, l) + return nil, ids.ShortEmpty, fmt.Errorf("max memo length is %d but provided memo field is length %d", avax.MaxMemoSize, l) } else if len(args.Outputs) == 0 { - return errNoOutputs + return nil, ids.ShortEmpty, errNoOutputs } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } s.vm.ctx.Lock.Lock() @@ -1218,16 +1248,16 @@ func (s *Service) SendMultiple(_ *http.Request, args *SendMultipleArgs, reply *a // Load user's UTXOs/keys utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the change address. if len(kc.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr, err := s.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Calculate required input amounts and create the desired outputs @@ -1239,27 +1269,27 @@ func (s *Service) SendMultiple(_ *http.Request, args *SendMultipleArgs, reply *a outs := []*avax.TransferableOutput{} for _, output := range args.Outputs { if output.Amount == 0 { - return errZeroAmount + return nil, ids.ShortEmpty, errZeroAmount } assetID, ok := assetIDs[output.AssetID] // Asset ID of next output if !ok { assetID, err = s.vm.lookupAssetID(output.AssetID) if err != nil { - return fmt.Errorf("couldn't find asset %s", output.AssetID) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't find asset %s", output.AssetID) } assetIDs[output.AssetID] = assetID } currentAmount := amounts[assetID] newAmount, err := safemath.Add64(currentAmount, uint64(output.Amount)) if err != nil { - return fmt.Errorf("problem calculating required spend amount: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("problem calculating required spend amount: %w", err) } amounts[assetID] = newAmount // Parse the to address to, err := avax.ParseServiceAddress(s.vm, output.To) if err != nil { - return fmt.Errorf("problem parsing to address %q: %w", output.To, err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing to address %q: %w", output.To, err) } // Create the Output @@ -1283,7 +1313,7 @@ func (s *Service) SendMultiple(_ *http.Request, args *SendMultipleArgs, reply *a amountWithFee, err := safemath.Add64(amounts[s.vm.feeAssetID], s.vm.TxFee) if err != nil { - return fmt.Errorf("problem calculating required spend amount: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("problem calculating required spend amount: %w", err) } amountsWithFee[s.vm.feeAssetID] = amountWithFee @@ -1293,7 +1323,7 @@ func (s *Service) SendMultiple(_ *http.Request, args *SendMultipleArgs, reply *a amountsWithFee, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Add the required change outputs @@ -1314,27 +1344,18 @@ func (s *Service) SendMultiple(_ *http.Request, args *SendMultipleArgs, reply *a }) } } - avax.SortTransferableOutputs(outs, s.vm.parser.Codec()) - tx := txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ + codec := s.vm.parser.Codec() + avax.SortTransferableOutputs(outs, codec) + + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, Outs: outs, Ins: ins, Memo: memoBytes, }}} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { - return err - } - - txID, err := s.vm.IssueTx(tx.Bytes()) - if err != nil { - return fmt.Errorf("problem issuing transaction: %w", err) - } - - reply.TxID = txID - reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) - return err + return tx, changeAddr, tx.SignSECP256K1Fx(codec, keys) } // MintArgs are arguments for passing into Mint requests @@ -1353,24 +1374,46 @@ func (s *Service) Mint(_ *http.Request, args *MintArgs, reply *api.JSONTxIDChang logging.UserString("username", args.Username), ) + tx, changeAddr, err := s.buildMint(args) + if err != nil { + return err + } + + txID, err := s.vm.issueTx(tx) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) + return err +} + +func (s *Service) buildMint(args *MintArgs) (*txs.Tx, ids.ShortID, error) { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "mint"), + logging.UserString("username", args.Username), + ) + if args.Amount == 0 { - return errInvalidMintAmount + return nil, ids.ShortEmpty, errInvalidMintAmount } assetID, err := s.vm.lookupAssetID(args.AssetID) if err != nil { - return err + return nil, ids.ShortEmpty, err } to, err := avax.ParseServiceAddress(s.vm, args.To) if err != nil { - return fmt.Errorf("problem parsing to address %q: %w", args.To, err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing to address %q: %w", args.To, err) } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } s.vm.ctx.Lock.Lock() @@ -1379,16 +1422,16 @@ func (s *Service) Mint(_ *http.Request, args *MintArgs, reply *api.JSONTxIDChang // Get the UTXOs/keys for the from addresses feeUTXOs, feeKc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the change address. if len(feeKc.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr, err := s.vm.selectChangeAddr(feeKc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { - return err + return nil, ids.ShortEmpty, err } amountsSpent, ins, keys, err := s.vm.Spend( @@ -1399,7 +1442,7 @@ func (s *Service) Mint(_ *http.Request, args *MintArgs, reply *api.JSONTxIDChang }, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } outs := []*avax.TransferableOutput{} @@ -1420,7 +1463,7 @@ func (s *Service) Mint(_ *http.Request, args *MintArgs, reply *api.JSONTxIDChang // Get all UTXOs/keys for the user utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, nil) if err != nil { - return err + return nil, ids.ShortEmpty, err } ops, opKeys, err := s.vm.Mint( @@ -1432,11 +1475,11 @@ func (s *Service) Mint(_ *http.Request, args *MintArgs, reply *api.JSONTxIDChang to, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } keys = append(keys, opKeys...) - tx := txs.Tx{Unsigned: &txs.OperationTx{ + tx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, @@ -1445,18 +1488,7 @@ func (s *Service) Mint(_ *http.Request, args *MintArgs, reply *api.JSONTxIDChang }}, Ops: ops, }} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { - return err - } - - txID, err := s.vm.IssueTx(tx.Bytes()) - if err != nil { - return fmt.Errorf("problem issuing transaction: %w", err) - } - - reply.TxID = txID - reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) - return err + return tx, changeAddr, tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys) } // SendNFTArgs are arguments for passing into SendNFT requests @@ -1475,22 +1507,38 @@ func (s *Service) SendNFT(_ *http.Request, args *SendNFTArgs, reply *api.JSONTxI logging.UserString("username", args.Username), ) + tx, changeAddr, err := s.buildSendNFT(args) + if err != nil { + return err + } + + txID, err := s.vm.issueTx(tx) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) + return err +} + +func (s *Service) buildSendNFT(args *SendNFTArgs) (*txs.Tx, ids.ShortID, error) { // Parse the asset ID assetID, err := s.vm.lookupAssetID(args.AssetID) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the to address to, err := avax.ParseServiceAddress(s.vm, args.To) if err != nil { - return fmt.Errorf("problem parsing to address %q: %w", args.To, err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing to address %q: %w", args.To, err) } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } s.vm.ctx.Lock.Lock() @@ -1499,16 +1547,16 @@ func (s *Service) SendNFT(_ *http.Request, args *SendNFTArgs, reply *api.JSONTxI // Get the UTXOs/keys for the from addresses utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the change address. if len(kc.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr, err := s.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { - return err + return nil, ids.ShortEmpty, err } amountsSpent, ins, secpKeys, err := s.vm.Spend( @@ -1519,7 +1567,7 @@ func (s *Service) SendNFT(_ *http.Request, args *SendNFTArgs, reply *api.JSONTxI }, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } outs := []*avax.TransferableOutput{} @@ -1545,10 +1593,10 @@ func (s *Service) SendNFT(_ *http.Request, args *SendNFTArgs, reply *api.JSONTxI to, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } - tx := txs.Tx{Unsigned: &txs.OperationTx{ + tx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, @@ -1557,21 +1605,12 @@ func (s *Service) SendNFT(_ *http.Request, args *SendNFTArgs, reply *api.JSONTxI }}, Ops: ops, }} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), secpKeys); err != nil { - return err - } - if err := tx.SignNFTFx(s.vm.parser.Codec(), nftKeys); err != nil { - return err - } - txID, err := s.vm.IssueTx(tx.Bytes()) - if err != nil { - return fmt.Errorf("problem issuing transaction: %w", err) + codec := s.vm.parser.Codec() + if err := tx.SignSECP256K1Fx(codec, secpKeys); err != nil { + return nil, ids.ShortEmpty, err } - - reply.TxID = txID - reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) - return err + return tx, changeAddr, tx.SignNFTFx(codec, nftKeys) } // MintNFTArgs are arguments for passing into MintNFT requests @@ -1591,25 +1630,41 @@ func (s *Service) MintNFT(_ *http.Request, args *MintNFTArgs, reply *api.JSONTxI logging.UserString("username", args.Username), ) - assetID, err := s.vm.lookupAssetID(args.AssetID) + tx, changeAddr, err := s.buildMintNFT(args) if err != nil { return err } + txID, err := s.vm.issueTx(tx) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) + return err +} + +func (s *Service) buildMintNFT(args *MintNFTArgs) (*txs.Tx, ids.ShortID, error) { + assetID, err := s.vm.lookupAssetID(args.AssetID) + if err != nil { + return nil, ids.ShortEmpty, err + } + to, err := avax.ParseServiceAddress(s.vm, args.To) if err != nil { - return fmt.Errorf("problem parsing to address %q: %w", args.To, err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing to address %q: %w", args.To, err) } payloadBytes, err := formatting.Decode(args.Encoding, args.Payload) if err != nil { - return fmt.Errorf("problem decoding payload bytes: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("problem decoding payload bytes: %w", err) } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } s.vm.ctx.Lock.Lock() @@ -1618,16 +1673,16 @@ func (s *Service) MintNFT(_ *http.Request, args *MintNFTArgs, reply *api.JSONTxI // Get the UTXOs/keys for the from addresses feeUTXOs, feeKc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the change address. if len(feeKc.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr, err := s.vm.selectChangeAddr(feeKc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { - return err + return nil, ids.ShortEmpty, err } amountsSpent, ins, secpKeys, err := s.vm.Spend( @@ -1638,7 +1693,7 @@ func (s *Service) MintNFT(_ *http.Request, args *MintNFTArgs, reply *api.JSONTxI }, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } outs := []*avax.TransferableOutput{} @@ -1659,7 +1714,7 @@ func (s *Service) MintNFT(_ *http.Request, args *MintNFTArgs, reply *api.JSONTxI // Get all UTXOs/keys utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, nil) if err != nil { - return err + return nil, ids.ShortEmpty, err } ops, nftKeys, err := s.vm.MintNFT( @@ -1670,10 +1725,10 @@ func (s *Service) MintNFT(_ *http.Request, args *MintNFTArgs, reply *api.JSONTxI to, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } - tx := txs.Tx{Unsigned: &txs.OperationTx{ + tx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, @@ -1682,21 +1737,12 @@ func (s *Service) MintNFT(_ *http.Request, args *MintNFTArgs, reply *api.JSONTxI }}, Ops: ops, }} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), secpKeys); err != nil { - return err - } - if err := tx.SignNFTFx(s.vm.parser.Codec(), nftKeys); err != nil { - return err - } - txID, err := s.vm.IssueTx(tx.Bytes()) - if err != nil { - return fmt.Errorf("problem issuing transaction: %w", err) + codec := s.vm.parser.Codec() + if err := tx.SignSECP256K1Fx(codec, secpKeys); err != nil { + return nil, ids.ShortEmpty, err } - - reply.TxID = txID - reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) - return err + return tx, changeAddr, tx.SignNFTFx(codec, nftKeys) } // ImportArgs are arguments for passing into Import requests @@ -1721,14 +1767,29 @@ func (s *Service) Import(_ *http.Request, args *ImportArgs, reply *api.JSONTxID) logging.UserString("username", args.Username), ) + tx, err := s.buildImport(args) + if err != nil { + return err + } + + txID, err := s.vm.issueTx(tx) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + return nil +} + +func (s *Service) buildImport(args *ImportArgs) (*txs.Tx, error) { chainID, err := s.vm.ctx.BCLookup.Lookup(args.SourceChain) if err != nil { - return fmt.Errorf("problem parsing chainID %q: %w", args.SourceChain, err) + return nil, fmt.Errorf("problem parsing chainID %q: %w", args.SourceChain, err) } to, err := avax.ParseServiceAddress(s.vm, args.To) if err != nil { - return fmt.Errorf("problem parsing to address %q: %w", args.To, err) + return nil, fmt.Errorf("problem parsing to address %q: %w", args.To, err) } s.vm.ctx.Lock.Lock() @@ -1736,17 +1797,17 @@ func (s *Service) Import(_ *http.Request, args *ImportArgs, reply *api.JSONTxID) utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, nil) if err != nil { - return err + return nil, err } atomicUTXOs, _, _, err := s.vm.GetAtomicUTXOs(chainID, kc.Addrs, ids.ShortEmpty, ids.Empty, int(maxPageSize)) if err != nil { - return fmt.Errorf("problem retrieving user's atomic UTXOs: %w", err) + return nil, fmt.Errorf("problem retrieving user's atomic UTXOs: %w", err) } amountsSpent, importInputs, importKeys, err := s.vm.SpendAll(atomicUTXOs, kc) if err != nil { - return err + return nil, err } ins := []*avax.TransferableInput{} @@ -1762,12 +1823,12 @@ func (s *Service) Import(_ *http.Request, args *ImportArgs, reply *api.JSONTxID) }, ) if err != nil { - return err + return nil, err } for asset, amount := range localAmountsSpent { newAmount, err := safemath.Add64(amountsSpent[asset], amount) if err != nil { - return fmt.Errorf("problem calculating required spend amount: %w", err) + return nil, fmt.Errorf("problem calculating required spend amount: %w", err) } amountsSpent[asset] = newAmount } @@ -1797,7 +1858,7 @@ func (s *Service) Import(_ *http.Request, args *ImportArgs, reply *api.JSONTxID) } avax.SortTransferableOutputs(outs, s.vm.parser.Codec()) - tx := txs.Tx{Unsigned: &txs.ImportTx{ + tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, @@ -1807,17 +1868,7 @@ func (s *Service) Import(_ *http.Request, args *ImportArgs, reply *api.JSONTxID) SourceChain: chainID, ImportedIns: importInputs, }} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { - return err - } - - txID, err := s.vm.IssueTx(tx.Bytes()) - if err != nil { - return fmt.Errorf("problem issuing transaction: %w", err) - } - - reply.TxID = txID - return nil + return tx, tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys) } // ExportArgs are arguments for passing into ExportAVA requests @@ -1847,10 +1898,26 @@ func (s *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSONTxIDC logging.UserString("username", args.Username), ) + tx, changeAddr, err := s.buildExport(args) + if err != nil { + return err + } + + txID, err := s.vm.issueTx(tx) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) + return err +} + +func (s *Service) buildExport(args *ExportArgs) (*txs.Tx, ids.ShortID, error) { // Parse the asset ID assetID, err := s.vm.lookupAssetID(args.AssetID) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Get the chainID and parse the to address @@ -1858,22 +1925,22 @@ func (s *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSONTxIDC if err != nil { chainID, err = s.vm.ctx.BCLookup.Lookup(args.TargetChain) if err != nil { - return err + return nil, ids.ShortEmpty, err } to, err = ids.ShortFromString(args.To) if err != nil { - return err + return nil, ids.ShortEmpty, err } } if args.Amount == 0 { - return errZeroAmount + return nil, ids.ShortEmpty, errZeroAmount } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } s.vm.ctx.Lock.Lock() @@ -1882,23 +1949,23 @@ func (s *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSONTxIDC // Get the UTXOs/keys for the from addresses utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the change address. if len(kc.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr, err := s.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { - return err + return nil, ids.ShortEmpty, err } amounts := map[ids.ID]uint64{} if assetID == s.vm.feeAssetID { amountWithFee, err := safemath.Add64(uint64(args.Amount), s.vm.TxFee) if err != nil { - return fmt.Errorf("problem calculating required spend amount: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("problem calculating required spend amount: %w", err) } amounts[s.vm.feeAssetID] = amountWithFee } else { @@ -1908,7 +1975,7 @@ func (s *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSONTxIDC amountsSpent, ins, keys, err := s.vm.Spend(utxos, kc, amounts) if err != nil { - return err + return nil, ids.ShortEmpty, err } exportOuts := []*avax.TransferableOutput{{ @@ -1940,9 +2007,11 @@ func (s *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSONTxIDC }) } } - avax.SortTransferableOutputs(outs, s.vm.parser.Codec()) - tx := txs.Tx{Unsigned: &txs.ExportTx{ + codec := s.vm.parser.Codec() + avax.SortTransferableOutputs(outs, codec) + + tx := &txs.Tx{Unsigned: &txs.ExportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, @@ -1952,16 +2021,5 @@ func (s *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSONTxIDC DestinationChain: chainID, ExportedOuts: exportOuts, }} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { - return err - } - - txID, err := s.vm.IssueTx(tx.Bytes()) - if err != nil { - return fmt.Errorf("problem issuing transaction: %w", err) - } - - reply.TxID = txID - reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) - return err + return tx, changeAddr, tx.SignSECP256K1Fx(codec, keys) } diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index 67a92a663879..9f4894943568 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -6,6 +6,7 @@ package avm import ( "context" "fmt" + "strings" "testing" "time" @@ -36,7 +37,7 @@ import ( "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/block/executor" "github.com/ava-labs/avalanchego/vms/avm/config" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/index" @@ -63,7 +64,7 @@ func TestServiceIssueTx(t *testing.T) { err := env.service.IssueTx(nil, txArgs, txReply) require.ErrorIs(err, codec.ErrCantUnpackVersion) - tx := newTx(t, env.genesisBytes, env.vm, "AVAX") + tx := newTx(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.parser, "AVAX") txArgs.Tx, err = formatting.Encode(formatting.Hex, tx.Bytes()) require.NoError(err) txArgs.Encoding = formatting.Hex @@ -89,7 +90,7 @@ func TestServiceGetTxStatus(t *testing.T) { err := env.service.GetTxStatus(nil, statusArgs, statusReply) require.ErrorIs(err, errNilTxID) - newTx := newAvaxBaseTxWithOutputs(t, env.genesisBytes, env.vm) + newTx := newAvaxBaseTxWithOutputs(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.TxFee, env.vm.parser) txID := newTx.ID() statusArgs = &api.JSONTxID{ @@ -99,12 +100,8 @@ func TestServiceGetTxStatus(t *testing.T) { require.NoError(env.service.GetTxStatus(nil, statusArgs, statusReply)) require.Equal(choices.Unknown, statusReply.Status) - env.vm.ctx.Lock.Lock() - issueAndAccept(require, env.vm, env.issuer, newTx) - env.vm.ctx.Lock.Unlock() - statusReply = &GetTxStatusReply{} require.NoError(env.service.GetTxStatus(nil, statusArgs, statusReply)) require.Equal(choices.Accepted, statusReply.Status) @@ -539,17 +536,16 @@ func TestServiceGetTxJSON_BaseTx(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{}) + env.vm.ctx.Lock.Unlock() defer func() { env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) env.vm.ctx.Lock.Unlock() }() - newTx := newAvaxBaseTxWithOutputs(t, env.genesisBytes, env.vm) + newTx := newAvaxBaseTxWithOutputs(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.TxFee, env.vm.parser) issueAndAccept(require, env.vm, env.issuer, newTx) - env.vm.ctx.Lock.Unlock() - reply := api.GetTxReply{} require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ TxID: newTx.ID(), @@ -557,27 +553,82 @@ func TestServiceGetTxJSON_BaseTx(t *testing.T) { }, &reply)) require.Equal(reply.Encoding, formatting.JSON) - jsonString := string(reply.Tx) - require.Contains(jsonString, `"memo":"0x0102030405060708"`) - require.Contains(jsonString, `"inputs":[{"txID":"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ","outputIndex":2,"assetID":"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ","fxID":"spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ","input":{"amount":50000,"signatureIndices":[0]}}]`) - require.Contains(jsonString, `"outputs":[{"assetID":"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ","fxID":"spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ","output":{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"],"amount":49000,"locktime":0,"threshold":1}}]`) + + replyTxBytes, err := stdjson.MarshalIndent(reply.Tx, "", "\t") + require.NoError(err) + + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 49000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "outputIndex": 2, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 50000, + "signatureIndices": [ + 0 + ] + } + } + ], + "memo": "0x0102030405060708" + }, + "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", newTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", newTx.Unsigned.(*txs.BaseTx).BlockchainID.String(), 1) + + sigStr, err := formatting.Encode(formatting.HexNC, newTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) + require.NoError(err) + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_ExportTx(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{}) + env.vm.ctx.Lock.Unlock() defer func() { env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) env.vm.ctx.Lock.Unlock() }() - newTx := newAvaxExportTxWithOutputs(t, env.genesisBytes, env.vm) + newTx := newAvaxExportTxWithOutputs(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.TxFee, env.vm.parser) issueAndAccept(require, env.vm, env.issuer, newTx) - env.vm.ctx.Lock.Unlock() - reply := api.GetTxReply{} require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ TxID: newTx.ID(), @@ -585,9 +636,67 @@ func TestServiceGetTxJSON_ExportTx(t *testing.T) { }, &reply)) require.Equal(reply.Encoding, formatting.JSON) - jsonString := string(reply.Tx) - require.Contains(jsonString, `"inputs":[{"txID":"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ","outputIndex":2,"assetID":"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ","fxID":"spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ","input":{"amount":50000,"signatureIndices":[0]}}]`) - require.Contains(jsonString, `"exportedOutputs":[{"assetID":"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ","fxID":"spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ","output":{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"],"amount":49000,"locktime":0,"threshold":1}}]}`) + replyTxBytes, err := stdjson.MarshalIndent(reply.Tx, "", "\t") + require.NoError(err) + + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": [ + { + "txID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "outputIndex": 2, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 50000, + "signatureIndices": [ + 0 + ] + } + } + ], + "memo": "0x", + "destinationChain": "11111111111111111111111111111111LpoYY", + "exportedOutputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 49000, + "locktime": 0, + "threshold": 1 + } + } + ] + }, + "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", newTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", newTx.Unsigned.(*txs.ExportTx).BlockchainID.String(), 1) + + sigStr, err := formatting.Encode(formatting.HexNC, newTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) + require.NoError(err) + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { @@ -600,17 +709,16 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { Fx: &propertyfx.Fx{}, }}, }) + env.vm.ctx.Lock.Unlock() defer func() { env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) env.vm.ctx.Lock.Unlock() }() - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm) + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - env.vm.ctx.Lock.Unlock() - reply := api.GetTxReply{} require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ TxID: createAssetTx.ID(), @@ -618,11 +726,93 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { }, &reply)) require.Equal(reply.Encoding, formatting.JSON) - jsonString := string(reply.Tx) - // contains the address in the right format - require.Contains(jsonString, `"outputs":[{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"],"groupID":1,"locktime":0,"threshold":1},{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"],"groupID":2,"locktime":0,"threshold":1}]}`) - require.Contains(jsonString, `"initialStates":[{"fxIndex":0,"fxID":"spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ","outputs":[{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"],"locktime":0,"threshold":1},{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"],"locktime":0,"threshold":1}]},{"fxIndex":1,"fxID":"qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT","outputs":[{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"],"groupID":1,"locktime":0,"threshold":1},{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"],"groupID":2,"locktime":0,"threshold":1}]},{"fxIndex":2,"fxID":"rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy","outputs":[{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"],"locktime":0,"threshold":1},{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"],"locktime":0,"threshold":1}]}]},"credentials":[],"id":"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS"}`) + replyTxBytes, err := stdjson.MarshalIndent(reply.Tx, "", "\t") + require.NoError(err) + + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": null, + "memo": "0x", + "name": "Team Rocket", + "symbol": "TR", + "denomination": 0, + "initialStates": [ + { + "fxIndex": 0, + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "outputs": [ + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + } + ] + }, + { + "fxIndex": 1, + "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", + "outputs": [ + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "groupID": 1, + "locktime": 0, + "threshold": 1 + }, + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "groupID": 2, + "locktime": 0, + "threshold": 1 + } + ] + }, + { + "fxIndex": 2, + "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", + "outputs": [ + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + } + ] + } + ] + }, + "credentials": null, + "id": "PLACEHOLDER_TX_ID" +}` + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", createAssetTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", createAssetTx.Unsigned.(*txs.CreateAssetTx).BlockchainID.String(), 1) + + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { @@ -635,6 +825,7 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { Fx: &propertyfx.Fx{}, }}, }) + env.vm.ctx.Lock.Unlock() defer func() { env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) @@ -642,15 +833,13 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm) + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintNFTTx := buildOperationTxWithOp(buildNFTxMintOp(createAssetTx, key, 2, 1)) + mintNFTTx := buildOperationTxWithOp(env.vm.ctx.ChainID, buildNFTxMintOp(createAssetTx, key, 2, 1)) require.NoError(mintNFTTx.SignNFTFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) issueAndAccept(require, env.vm, env.issuer, mintNFTTx) - env.vm.ctx.Lock.Unlock() - reply := api.GetTxReply{} require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ TxID: mintNFTTx.ID(), @@ -658,15 +847,71 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { }, &reply)) require.Equal(reply.Encoding, formatting.JSON) - jsonString := string(reply.Tx) - // assert memo and payload are in hex - require.Contains(jsonString, `"memo":"0x"`) - require.Contains(jsonString, `"payload":"0x68656c6c6f"`) - // contains the address in the right format - require.Contains(jsonString, `"outputs":[{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"]`) - // contains the fxID - require.Contains(jsonString, `"operations":[{"assetID":"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS","inputIDs":[{"txID":"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS","outputIndex":2}],"fxID":"qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT"`) - require.Contains(jsonString, `"credentials":[{"fxID":"qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT","credential":{"signatures":["0x571f18cfdb254263ab6b987f742409bd5403eafe08b4dbc297c5cd8d1c85eb8812e4541e11d3dc692cd14b5f4bccc1835ec001df6d8935ce881caf97017c2a4801"]}}]`) + + replyTxBytes, err := stdjson.MarshalIndent(reply.Tx, "", "\t") + require.NoError(err) + + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": null, + "memo": "0x", + "operations": [ + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 2 + } + ], + "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "groupID": 1, + "payload": "0x68656c6c6f", + "outputs": [ + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + } + ] + } + } + ] + }, + "credentials": [ + { + "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 2) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintNFTTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintNFTTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) + + sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[0].Credential.(*nftfx.Credential).Sigs[0][:]) + require.NoError(err) + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { @@ -679,6 +924,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { Fx: &propertyfx.Fx{}, }}, }) + env.vm.ctx.Lock.Unlock() defer func() { env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) @@ -686,18 +932,16 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm) + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) issueAndAccept(require, env.vm, env.issuer, createAssetTx) mintOp1 := buildNFTxMintOp(createAssetTx, key, 2, 1) mintOp2 := buildNFTxMintOp(createAssetTx, key, 3, 2) - mintNFTTx := buildOperationTxWithOp(mintOp1, mintOp2) + mintNFTTx := buildOperationTxWithOp(env.vm.ctx.ChainID, mintOp1, mintOp2) require.NoError(mintNFTTx.SignNFTFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}})) issueAndAccept(require, env.vm, env.issuer, mintNFTTx) - env.vm.ctx.Lock.Unlock() - reply := api.GetTxReply{} require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ TxID: mintNFTTx.ID(), @@ -705,14 +949,107 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { }, &reply)) require.Equal(reply.Encoding, formatting.JSON) - jsonString := string(reply.Tx) - // contains the address in the right format - require.Contains(jsonString, `"outputs":[{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"]`) + replyTxBytes, err := stdjson.MarshalIndent(reply.Tx, "", "\t") + require.NoError(err) + + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": null, + "memo": "0x", + "operations": [ + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 2 + } + ], + "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "groupID": 1, + "payload": "0x68656c6c6f", + "outputs": [ + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + } + ] + } + }, + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 3 + } + ], + "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "groupID": 2, + "payload": "0x68656c6c6f", + "outputs": [ + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + } + ] + } + } + ] + }, + "credentials": [ + { + "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, + { + "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 4) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintNFTTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintNFTTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) + + sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[0].Credential.(*nftfx.Credential).Sigs[0][:]) + require.NoError(err) + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) - // contains the fxID - require.Contains(jsonString, `"operations":[{"assetID":"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS","inputIDs":[{"txID":"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS","outputIndex":2}],"fxID":"qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT"`) - require.Contains(jsonString, `"credentials":[{"fxID":"qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT","credential":{"signatures":["0x2400cf2cf978697b3484d5340609b524eb9dfa401e5b2bd5d1bc6cee2a6b1ae41926550f00ae0651c312c35e225cb3f39b506d96c5170fb38a820dcfed11ccd801"]}},{"fxID":"qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT","credential":{"signatures":["0x2400cf2cf978697b3484d5340609b524eb9dfa401e5b2bd5d1bc6cee2a6b1ae41926550f00ae0651c312c35e225cb3f39b506d96c5170fb38a820dcfed11ccd801"]}}]`) + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { @@ -725,6 +1062,7 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { Fx: &propertyfx.Fx{}, }}, }) + env.vm.ctx.Lock.Unlock() defer func() { env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) @@ -732,15 +1070,13 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm) + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintSecpOpTx := buildOperationTxWithOp(buildSecpMintOp(createAssetTx, key, 0)) + mintSecpOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, buildSecpMintOp(createAssetTx, key, 0)) require.NoError(mintSecpOpTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) issueAndAccept(require, env.vm, env.issuer, mintSecpOpTx) - env.vm.ctx.Lock.Unlock() - reply := api.GetTxReply{} require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ TxID: mintSecpOpTx.ID(), @@ -748,17 +1084,75 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { }, &reply)) require.Equal(reply.Encoding, formatting.JSON) - jsonString := string(reply.Tx) - // ensure memo is in hex - require.Contains(jsonString, `"memo":"0x"`) - // contains the address in the right format - require.Contains(jsonString, `"mintOutput":{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"]`) - require.Contains(jsonString, `"transferOutput":{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"],"amount":1,"locktime":0,"threshold":1}}}]}`) + replyTxBytes, err := stdjson.MarshalIndent(reply.Tx, "", "\t") + require.NoError(err) + + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": null, + "memo": "0x", + "operations": [ + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 0 + } + ], + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "mintOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + "transferOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 1, + "locktime": 0, + "threshold": 1 + } + } + } + ] + }, + "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` - // contains the fxID - require.Contains(jsonString, `"operations":[{"assetID":"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS","inputIDs":[{"txID":"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS","outputIndex":0}],"fxID":"spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ"`) - require.Contains(jsonString, `"credentials":[{"fxID":"spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ","credential":{"signatures":["0x6d7406d5e1bdb1d80de542e276e2d162b0497d0df1170bec72b14d40e84ecf7929cb571211d60149404413a9342fdfa0a2b5d07b48e6f3eaea1e2f9f183b480500"]}}]`) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 2) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintSecpOpTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintSecpOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) + + sigStr, err := formatting.Encode(formatting.HexNC, mintSecpOpTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) + require.NoError(err) + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { @@ -771,6 +1165,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { Fx: &propertyfx.Fx{}, }}, }) + env.vm.ctx.Lock.Unlock() defer func() { env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) @@ -778,18 +1173,16 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm) + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) issueAndAccept(require, env.vm, env.issuer, createAssetTx) op1 := buildSecpMintOp(createAssetTx, key, 0) op2 := buildSecpMintOp(createAssetTx, key, 1) - mintSecpOpTx := buildOperationTxWithOp(op1, op2) + mintSecpOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, op1, op2) require.NoError(mintSecpOpTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}})) issueAndAccept(require, env.vm, env.issuer, mintSecpOpTx) - env.vm.ctx.Lock.Unlock() - reply := api.GetTxReply{} require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ TxID: mintSecpOpTx.ID(), @@ -797,15 +1190,115 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { }, &reply)) require.Equal(reply.Encoding, formatting.JSON) - jsonString := string(reply.Tx) - // contains the address in the right format - require.Contains(jsonString, `"mintOutput":{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"]`) - require.Contains(jsonString, `"transferOutput":{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"],"amount":1,"locktime":0,"threshold":1}}}`) + replyTxBytes, err := stdjson.MarshalIndent(reply.Tx, "", "\t") + require.NoError(err) + + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": null, + "memo": "0x", + "operations": [ + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 0 + } + ], + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "mintOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + "transferOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 1, + "locktime": 0, + "threshold": 1 + } + } + }, + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 1 + } + ], + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "mintOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + "transferOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 1, + "locktime": 0, + "threshold": 1 + } + } + } + ] + }, + "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 4) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintSecpOpTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintSecpOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) + + sigStr, err := formatting.Encode(formatting.HexNC, mintSecpOpTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) + require.NoError(err) + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) - // contains the fxID - require.Contains(jsonString, `"assetID":"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS","inputIDs":[{"txID":"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS","outputIndex":1}],"fxID":"spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ"`) - require.Contains(jsonString, `"credentials":[{"fxID":"spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ","credential":{"signatures":["0xcc650f48341601c348d8634e8d207e07ea7b4ee4fbdeed3055fa1f1e4f4e27556d25056447a3bd5d949e5f1cbb0155bb20216ac3a4055356e3c82dca74323e7401"]}},{"fxID":"spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ","credential":{"signatures":["0xcc650f48341601c348d8634e8d207e07ea7b4ee4fbdeed3055fa1f1e4f4e27556d25056447a3bd5d949e5f1cbb0155bb20216ac3a4055356e3c82dca74323e7401"]}}]`) + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { @@ -818,6 +1311,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { Fx: &propertyfx.Fx{}, }}, }) + env.vm.ctx.Lock.Unlock() defer func() { env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) @@ -825,15 +1319,13 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm) + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintPropertyFxOpTx := buildOperationTxWithOp(buildPropertyFxMintOp(createAssetTx, key, 4)) + mintPropertyFxOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, buildPropertyFxMintOp(createAssetTx, key, 4)) require.NoError(mintPropertyFxOpTx.SignPropertyFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) issueAndAccept(require, env.vm, env.issuer, mintPropertyFxOpTx) - env.vm.ctx.Lock.Unlock() - reply := api.GetTxReply{} require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ TxID: mintPropertyFxOpTx.ID(), @@ -841,16 +1333,72 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { }, &reply)) require.Equal(reply.Encoding, formatting.JSON) - jsonString := string(reply.Tx) - // ensure memo is in hex - require.Contains(jsonString, `"memo":"0x"`) - // contains the address in the right format - require.Contains(jsonString, `"mintOutput":{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"]`) + replyTxBytes, err := stdjson.MarshalIndent(reply.Tx, "", "\t") + require.NoError(err) + + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": null, + "memo": "0x", + "operations": [ + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 4 + } + ], + "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "mintOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + "ownedOutput": { + "addresses": [], + "locktime": 0, + "threshold": 0 + } + } + } + ] + }, + "credentials": [ + { + "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 2) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintPropertyFxOpTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintPropertyFxOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) + + sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[0].Credential.(*propertyfx.Credential).Sigs[0][:]) + require.NoError(err) + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) - // contains the fxID - require.Contains(jsonString, `"assetID":"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS","inputIDs":[{"txID":"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS","outputIndex":4}],"fxID":"rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy"`) - require.Contains(jsonString, `"credentials":[{"fxID":"rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy","credential":{"signatures":["0xa3a00a03d3f1551ff696d6c0abdde73ae7002cd6dcce1c37d720de3b7ed80757411c9698cd9681a0fa55ca685904ca87056a3b8abc858a8ac08f45483b32a80201"]}}]`) + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) { @@ -863,6 +1411,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) Fx: &propertyfx.Fx{}, }}, }) + env.vm.ctx.Lock.Unlock() defer func() { env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) @@ -870,18 +1419,16 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm) + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) issueAndAccept(require, env.vm, env.issuer, createAssetTx) op1 := buildPropertyFxMintOp(createAssetTx, key, 4) op2 := buildPropertyFxMintOp(createAssetTx, key, 5) - mintPropertyFxOpTx := buildOperationTxWithOp(op1, op2) + mintPropertyFxOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, op1, op2) require.NoError(mintPropertyFxOpTx.SignPropertyFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}})) issueAndAccept(require, env.vm, env.issuer, mintPropertyFxOpTx) - env.vm.ctx.Lock.Unlock() - reply := api.GetTxReply{} require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ TxID: mintPropertyFxOpTx.ID(), @@ -889,40 +1436,135 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) }, &reply)) require.Equal(reply.Encoding, formatting.JSON) - jsonString := string(reply.Tx) - // contains the address in the right format - require.Contains(jsonString, `"mintOutput":{"addresses":["X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e"]`) + replyTxBytes, err := stdjson.MarshalIndent(reply.Tx, "", "\t") + require.NoError(err) + + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": null, + "memo": "0x", + "operations": [ + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 4 + } + ], + "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "mintOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + "ownedOutput": { + "addresses": [], + "locktime": 0, + "threshold": 0 + } + } + }, + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 5 + } + ], + "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "mintOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + "ownedOutput": { + "addresses": [], + "locktime": 0, + "threshold": 0 + } + } + } + ] + }, + "credentials": [ + { + "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, + { + "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 4) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintPropertyFxOpTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintPropertyFxOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) + + sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[0].Credential.(*propertyfx.Credential).Sigs[0][:]) + require.NoError(err) + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) - // contains the fxID - require.Contains(jsonString, `"operations":[{"assetID":"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS","inputIDs":[{"txID":"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS","outputIndex":4}],"fxID":"rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy"`) - require.Contains(jsonString, `"credentials":[{"fxID":"rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy","credential":{"signatures":["0x25b7ca14df108d4a32877bda4f10d84eda6d653c620f4c8d124265bdcf0ac91f45712b58b33f4b62a19698325a3c89adff214b77f772d9f311742860039abb5601"]}},{"fxID":"rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy","credential":{"signatures":["0x25b7ca14df108d4a32877bda4f10d84eda6d653c620f4c8d124265bdcf0ac91f45712b58b33f4b62a19698325a3c89adff214b77f772d9f311742860039abb5601"]}}]`) + require.Equal(expectedReplyTxString, string(replyTxBytes)) } -func newAvaxBaseTxWithOutputs(t *testing.T, genesisBytes []byte, vm *VM) *txs.Tx { +func newAvaxBaseTxWithOutputs(t *testing.T, genesisBytes []byte, chainID ids.ID, fee uint64, parser txs.Parser) *txs.Tx { avaxTx := getCreateTxFromGenesisTest(t, genesisBytes, "AVAX") key := keys[0] - tx := buildBaseTx(avaxTx, vm, key) - require.NoError(t, tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + tx := buildBaseTx(avaxTx, chainID, fee, key) + require.NoError(t, tx.SignSECP256K1Fx(parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) return tx } -func newAvaxExportTxWithOutputs(t *testing.T, genesisBytes []byte, vm *VM) *txs.Tx { +func newAvaxExportTxWithOutputs(t *testing.T, genesisBytes []byte, chainID ids.ID, fee uint64, parser txs.Parser) *txs.Tx { avaxTx := getCreateTxFromGenesisTest(t, genesisBytes, "AVAX") key := keys[0] - tx := buildExportTx(avaxTx, vm, key) - require.NoError(t, tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + tx := buildExportTx(avaxTx, chainID, fee, key) + require.NoError(t, tx.SignSECP256K1Fx(parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) return tx } -func newAvaxCreateAssetTxWithOutputs(t *testing.T, vm *VM) *txs.Tx { +func newAvaxCreateAssetTxWithOutputs(t *testing.T, chainID ids.ID, parser txs.Parser) *txs.Tx { key := keys[0] - tx := buildCreateAssetTx(key) - require.NoError(t, vm.parser.InitializeTx(tx)) + tx := buildCreateAssetTx(chainID, key) + require.NoError(t, tx.Initialize(parser.Codec())) return tx } -func buildBaseTx(avaxTx *txs.Tx, vm *VM, key *secp256k1.PrivateKey) *txs.Tx { +func buildBaseTx(avaxTx *txs.Tx, chainID ids.ID, fee uint64, key *secp256k1.PrivateKey) *txs.Tx { return &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, @@ -946,7 +1588,7 @@ func buildBaseTx(avaxTx *txs.Tx, vm *VM, key *secp256k1.PrivateKey) *txs.Tx { Outs: []*avax.TransferableOutput{{ Asset: avax.Asset{ID: avaxTx.ID()}, Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, + Amt: startBalance - fee, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{key.PublicKey().Address()}, @@ -957,7 +1599,7 @@ func buildBaseTx(avaxTx *txs.Tx, vm *VM, key *secp256k1.PrivateKey) *txs.Tx { }} } -func buildExportTx(avaxTx *txs.Tx, vm *VM, key *secp256k1.PrivateKey) *txs.Tx { +func buildExportTx(avaxTx *txs.Tx, chainID ids.ID, fee uint64, key *secp256k1.PrivateKey) *txs.Tx { return &txs.Tx{Unsigned: &txs.ExportTx{ BaseTx: txs.BaseTx{ BaseTx: avax.BaseTx{ @@ -980,7 +1622,7 @@ func buildExportTx(avaxTx *txs.Tx, vm *VM, key *secp256k1.PrivateKey) *txs.Tx { ExportedOuts: []*avax.TransferableOutput{{ Asset: avax.Asset{ID: avaxTx.ID()}, Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, + Amt: startBalance - fee, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{key.PublicKey().Address()}, @@ -990,7 +1632,7 @@ func buildExportTx(avaxTx *txs.Tx, vm *VM, key *secp256k1.PrivateKey) *txs.Tx { }} } -func buildCreateAssetTx(key *secp256k1.PrivateKey) *txs.Tx { +func buildCreateAssetTx(chainID ids.ID, key *secp256k1.PrivateKey) *txs.Tx { return &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, @@ -1129,7 +1771,7 @@ func buildSecpMintOp(createAssetTx *txs.Tx, key *secp256k1.PrivateKey, outputInd } } -func buildOperationTxWithOp(op ...*txs.Operation) *txs.Tx { +func buildOperationTxWithOp(chainID ids.ID, op ...*txs.Operation) *txs.Tx { return &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, @@ -1445,6 +2087,8 @@ func TestGetAssetDescription(t *testing.T) { AssetID: avaxAssetID.String(), }, &reply)) + require.Equal(reply.AssetID, env.vm.feeAssetID) + require.Equal("AVAX", reply.Name) require.Equal("SYMB", reply.Symbol) } @@ -1501,7 +2145,7 @@ func TestCreateFixedCapAsset(t *testing.T) { changeAddrStr, err := env.vm.FormatLocalAddress(testChangeAddr) require.NoError(err) - _, fromAddrsStr := sampleAddrs(t, env.vm, addrs) + _, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) require.NoError(env.service.CreateFixedCapAsset(nil, &CreateAssetArgs{ JSONSpendHeader: api.JSONSpendHeader{ @@ -1549,7 +2193,7 @@ func TestCreateVariableCapAsset(t *testing.T) { reply := AssetIDChangeAddr{} minterAddrStr, err := env.vm.FormatLocalAddress(keys[0].PublicKey().Address()) require.NoError(err) - _, fromAddrsStr := sampleAddrs(t, env.vm, addrs) + _, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) changeAddrStr := fromAddrsStr[0] require.NoError(env.service.CreateVariableCapAsset(nil, &CreateAssetArgs{ @@ -1574,12 +2218,8 @@ func TestCreateVariableCapAsset(t *testing.T) { }, &reply)) require.Equal(changeAddrStr, reply.ChangeAddr) - env.vm.ctx.Lock.Lock() - buildAndAccept(require, env.vm, env.issuer, reply.AssetID) - env.vm.ctx.Lock.Unlock() - createdAssetID := reply.AssetID.String() // Test minting of the created variable cap asset mintArgs := &MintArgs{ @@ -1598,12 +2238,8 @@ func TestCreateVariableCapAsset(t *testing.T) { require.NoError(env.service.Mint(nil, mintArgs, mintReply)) require.Equal(changeAddrStr, mintReply.ChangeAddr) - env.vm.ctx.Lock.Lock() - buildAndAccept(require, env.vm, env.issuer, mintReply.TxID) - env.vm.ctx.Lock.Unlock() - sendArgs := &SendArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: api.UserPass{ @@ -1647,7 +2283,7 @@ func TestNFTWorkflow(t *testing.T) { env.vm.ctx.Lock.Unlock() }() - fromAddrs, fromAddrsStr := sampleAddrs(t, env.vm, addrs) + fromAddrs, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) // Test minting of the created variable cap asset addrStr, err := env.vm.FormatLocalAddress(keys[0].PublicKey().Address()) @@ -1677,12 +2313,8 @@ func TestNFTWorkflow(t *testing.T) { require.NoError(env.service.CreateNFTAsset(nil, createArgs, createReply)) require.Equal(fromAddrsStr[0], createReply.ChangeAddr) - env.vm.ctx.Lock.Lock() - buildAndAccept(require, env.vm, env.issuer, createReply.AssetID) - env.vm.ctx.Lock.Unlock() - // Key: Address // Value: AVAX balance balances := map[ids.ShortID]uint64{} @@ -1732,13 +2364,9 @@ func TestNFTWorkflow(t *testing.T) { require.NoError(env.service.MintNFT(nil, mintArgs, mintReply)) require.Equal(fromAddrsStr[0], createReply.ChangeAddr) - env.vm.ctx.Lock.Lock() - // Accept the transaction so that we can send the newly minted NFT buildAndAccept(require, env.vm, env.issuer, mintReply.TxID) - env.vm.ctx.Lock.Unlock() - sendArgs := &SendNFTArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: api.UserPass{ @@ -1866,6 +2494,7 @@ func TestSend(t *testing.T) { env.vm.ctx.Lock.Unlock() defer func() { + env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) env.vm.ctx.Lock.Unlock() }() @@ -1877,7 +2506,7 @@ func TestSend(t *testing.T) { require.NoError(err) changeAddrStr, err := env.vm.FormatLocalAddress(testChangeAddr) require.NoError(err) - _, fromAddrsStr := sampleAddrs(t, env.vm, addrs) + _, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) args := &SendArgs{ JSONSpendHeader: api.JSONSpendHeader{ @@ -1898,8 +2527,6 @@ func TestSend(t *testing.T) { require.NoError(env.service.Send(nil, args, reply)) require.Equal(changeAddrStr, reply.ChangeAddr) - env.vm.ctx.Lock.Lock() - buildAndAccept(require, env.vm, env.issuer, reply.TxID) } @@ -1919,6 +2546,7 @@ func TestSendMultiple(t *testing.T) { env.vm.ctx.Lock.Unlock() defer func() { + env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) env.vm.ctx.Lock.Unlock() }() @@ -1930,7 +2558,7 @@ func TestSendMultiple(t *testing.T) { require.NoError(err) changeAddrStr, err := env.vm.FormatLocalAddress(testChangeAddr) require.NoError(err) - _, fromAddrsStr := sampleAddrs(t, env.vm, addrs) + _, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) args := &SendMultipleArgs{ JSONSpendHeader: api.JSONSpendHeader{ @@ -1958,8 +2586,6 @@ func TestSendMultiple(t *testing.T) { require.NoError(env.service.SendMultiple(nil, args, reply)) require.Equal(changeAddrStr, reply.ChangeAddr) - env.vm.ctx.Lock.Lock() - buildAndAccept(require, env.vm, env.issuer, reply.TxID) }) } @@ -2266,7 +2892,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { { name: "block height not found", serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { - state := states.NewMockState(ctrl) + state := state.NewMockState(ctrl) state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(ids.Empty, database.ErrNotFound) manager := executor.NewMockManager(ctrl) @@ -2286,7 +2912,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { { name: "block not found", serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { - state := states.NewMockState(ctrl) + state := state.NewMockState(ctrl) state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(blockID, nil) manager := executor.NewMockManager(ctrl) @@ -2311,7 +2937,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { block.EXPECT().InitCtx(gomock.Any()) block.EXPECT().Txs().Return(nil) - state := states.NewMockState(ctrl) + state := state.NewMockState(ctrl) state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(blockID, nil) manager := executor.NewMockManager(ctrl) @@ -2336,7 +2962,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { blockBytes := []byte("hi mom") block.EXPECT().Bytes().Return(blockBytes) - state := states.NewMockState(ctrl) + state := state.NewMockState(ctrl) state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(blockID, nil) expected, err := formatting.Encode(formatting.Hex, blockBytes) @@ -2364,7 +2990,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { blockBytes := []byte("hi mom") block.EXPECT().Bytes().Return(blockBytes) - state := states.NewMockState(ctrl) + state := state.NewMockState(ctrl) state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(blockID, nil) expected, err := formatting.Encode(formatting.HexC, blockBytes) @@ -2392,7 +3018,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { blockBytes := []byte("hi mom") block.EXPECT().Bytes().Return(blockBytes) - state := states.NewMockState(ctrl) + state := state.NewMockState(ctrl) state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(blockID, nil) expected, err := formatting.Encode(formatting.HexNC, blockBytes) @@ -2470,7 +3096,7 @@ func TestServiceGetHeight(t *testing.T) { { name: "block not found", serviceFunc: func(ctrl *gomock.Controller) *Service { - state := states.NewMockState(ctrl) + state := state.NewMockState(ctrl) state.EXPECT().GetLastAccepted().Return(blockID) manager := executor.NewMockManager(ctrl) @@ -2490,7 +3116,7 @@ func TestServiceGetHeight(t *testing.T) { { name: "happy path", serviceFunc: func(ctrl *gomock.Controller) *Service { - state := states.NewMockState(ctrl) + state := state.NewMockState(ctrl) state.EXPECT().GetLastAccepted().Return(blockID) block := block.NewMockBlock(ctrl) diff --git a/vms/avm/states/diff.go b/vms/avm/state/diff.go similarity index 90% rename from vms/avm/states/diff.go rename to vms/avm/state/diff.go index 2ca6d58cd5ee..83e9c5e0d590 100644 --- a/vms/avm/states/diff.go +++ b/vms/avm/state/diff.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package states +package state import ( "errors" @@ -16,7 +16,8 @@ import ( ) var ( - _ Diff = (*diff)(nil) + _ Diff = (*diff)(nil) + _ Versions = stateGetter{} ErrMissingParentState = errors.New("missing parent state") ) @@ -61,6 +62,20 @@ func NewDiff( }, nil } +type stateGetter struct { + state Chain +} + +func (s stateGetter) GetState(ids.ID) (Chain, bool) { + return s.state, true +} + +func NewDiffOn(parentState Chain) (Diff, error) { + return NewDiff(ids.Empty, stateGetter{ + state: parentState, + }) +} + func (d *diff) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { if utxo, modified := d.modifiedUTXOs[utxoID]; modified { if utxo == nil { diff --git a/vms/avm/states/mock_states.go b/vms/avm/state/mock_state.go similarity index 88% rename from vms/avm/states/mock_states.go rename to vms/avm/state/mock_state.go index 007b8622042e..cb5138c90369 100644 --- a/vms/avm/states/mock_states.go +++ b/vms/avm/state/mock_state.go @@ -1,11 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/avm/states (interfaces: Chain,State,Diff) +// Source: github.com/ava-labs/avalanchego/vms/avm/state (interfaces: Chain,State,Diff) +// +// Generated by this command: +// +// mockgen -package=state -destination=vms/avm/state/mock_state.go github.com/ava-labs/avalanchego/vms/avm/state Chain,State,Diff +// -// Package states is a generated GoMock package. -package states +// Package state is a generated GoMock package. +package state import ( reflect "reflect" @@ -51,7 +53,7 @@ func (m *MockChain) AddBlock(arg0 block.Block) { } // AddBlock indicates an expected call of AddBlock. -func (mr *MockChainMockRecorder) AddBlock(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) AddBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBlock", reflect.TypeOf((*MockChain)(nil).AddBlock), arg0) } @@ -63,7 +65,7 @@ func (m *MockChain) AddTx(arg0 *txs.Tx) { } // AddTx indicates an expected call of AddTx. -func (mr *MockChainMockRecorder) AddTx(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) AddTx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockChain)(nil).AddTx), arg0) } @@ -75,7 +77,7 @@ func (m *MockChain) AddUTXO(arg0 *avax.UTXO) { } // AddUTXO indicates an expected call of AddUTXO. -func (mr *MockChainMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) AddUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockChain)(nil).AddUTXO), arg0) } @@ -87,7 +89,7 @@ func (m *MockChain) DeleteUTXO(arg0 ids.ID) { } // DeleteUTXO indicates an expected call of DeleteUTXO. -func (mr *MockChainMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) DeleteUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockChain)(nil).DeleteUTXO), arg0) } @@ -102,7 +104,7 @@ func (m *MockChain) GetBlock(arg0 ids.ID) (block.Block, error) { } // GetBlock indicates an expected call of GetBlock. -func (mr *MockChainMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) GetBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockChain)(nil).GetBlock), arg0) } @@ -117,7 +119,7 @@ func (m *MockChain) GetBlockIDAtHeight(arg0 uint64) (ids.ID, error) { } // GetBlockIDAtHeight indicates an expected call of GetBlockIDAtHeight. -func (mr *MockChainMockRecorder) GetBlockIDAtHeight(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) GetBlockIDAtHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockChain)(nil).GetBlockIDAtHeight), arg0) } @@ -160,7 +162,7 @@ func (m *MockChain) GetTx(arg0 ids.ID) (*txs.Tx, error) { } // GetTx indicates an expected call of GetTx. -func (mr *MockChainMockRecorder) GetTx(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) GetTx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockChain)(nil).GetTx), arg0) } @@ -175,7 +177,7 @@ func (m *MockChain) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { } // GetUTXO indicates an expected call of GetUTXO. -func (mr *MockChainMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) GetUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockChain)(nil).GetUTXO), arg0) } @@ -187,7 +189,7 @@ func (m *MockChain) SetLastAccepted(arg0 ids.ID) { } // SetLastAccepted indicates an expected call of SetLastAccepted. -func (mr *MockChainMockRecorder) SetLastAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) SetLastAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockChain)(nil).SetLastAccepted), arg0) } @@ -199,7 +201,7 @@ func (m *MockChain) SetTimestamp(arg0 time.Time) { } // SetTimestamp indicates an expected call of SetTimestamp. -func (mr *MockChainMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) SetTimestamp(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockChain)(nil).SetTimestamp), arg0) } @@ -246,7 +248,7 @@ func (m *MockState) AddBlock(arg0 block.Block) { } // AddBlock indicates an expected call of AddBlock. -func (mr *MockStateMockRecorder) AddBlock(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) AddBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBlock", reflect.TypeOf((*MockState)(nil).AddBlock), arg0) } @@ -258,7 +260,7 @@ func (m *MockState) AddTx(arg0 *txs.Tx) { } // AddTx indicates an expected call of AddTx. -func (mr *MockStateMockRecorder) AddTx(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) AddTx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockState)(nil).AddTx), arg0) } @@ -270,7 +272,7 @@ func (m *MockState) AddUTXO(arg0 *avax.UTXO) { } // AddUTXO indicates an expected call of AddUTXO. -func (mr *MockStateMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) AddUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockState)(nil).AddUTXO), arg0) } @@ -340,7 +342,7 @@ func (m *MockState) DeleteUTXO(arg0 ids.ID) { } // DeleteUTXO indicates an expected call of DeleteUTXO. -func (mr *MockStateMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) DeleteUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockState)(nil).DeleteUTXO), arg0) } @@ -355,7 +357,7 @@ func (m *MockState) GetBlock(arg0 ids.ID) (block.Block, error) { } // GetBlock indicates an expected call of GetBlock. -func (mr *MockStateMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockState)(nil).GetBlock), arg0) } @@ -370,7 +372,7 @@ func (m *MockState) GetBlockIDAtHeight(arg0 uint64) (ids.ID, error) { } // GetBlockIDAtHeight indicates an expected call of GetBlockIDAtHeight. -func (mr *MockStateMockRecorder) GetBlockIDAtHeight(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetBlockIDAtHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockState)(nil).GetBlockIDAtHeight), arg0) } @@ -413,7 +415,7 @@ func (m *MockState) GetTx(arg0 ids.ID) (*txs.Tx, error) { } // GetTx indicates an expected call of GetTx. -func (mr *MockStateMockRecorder) GetTx(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetTx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockState)(nil).GetTx), arg0) } @@ -428,7 +430,7 @@ func (m *MockState) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { } // GetUTXO indicates an expected call of GetUTXO. -func (mr *MockStateMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockState)(nil).GetUTXO), arg0) } @@ -442,7 +444,7 @@ func (m *MockState) InitializeChainState(arg0 ids.ID, arg1 time.Time) error { } // InitializeChainState indicates an expected call of InitializeChainState. -func (mr *MockStateMockRecorder) InitializeChainState(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) InitializeChainState(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitializeChainState", reflect.TypeOf((*MockState)(nil).InitializeChainState), arg0, arg1) } @@ -471,7 +473,7 @@ func (m *MockState) Prune(arg0 sync.Locker, arg1 logging.Logger) error { } // Prune indicates an expected call of Prune. -func (mr *MockStateMockRecorder) Prune(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) Prune(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prune", reflect.TypeOf((*MockState)(nil).Prune), arg0, arg1) } @@ -497,7 +499,7 @@ func (m *MockState) SetLastAccepted(arg0 ids.ID) { } // SetLastAccepted indicates an expected call of SetLastAccepted. -func (mr *MockStateMockRecorder) SetLastAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetLastAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockState)(nil).SetLastAccepted), arg0) } @@ -509,7 +511,7 @@ func (m *MockState) SetTimestamp(arg0 time.Time) { } // SetTimestamp indicates an expected call of SetTimestamp. -func (mr *MockStateMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetTimestamp(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockState)(nil).SetTimestamp), arg0) } @@ -524,7 +526,7 @@ func (m *MockState) UTXOIDs(arg0 []byte, arg1 ids.ID, arg2 int) ([]ids.ID, error } // UTXOIDs indicates an expected call of UTXOIDs. -func (mr *MockStateMockRecorder) UTXOIDs(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) UTXOIDs(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UTXOIDs", reflect.TypeOf((*MockState)(nil).UTXOIDs), arg0, arg1, arg2) } @@ -559,7 +561,7 @@ func (m *MockDiff) AddBlock(arg0 block.Block) { } // AddBlock indicates an expected call of AddBlock. -func (mr *MockDiffMockRecorder) AddBlock(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) AddBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBlock", reflect.TypeOf((*MockDiff)(nil).AddBlock), arg0) } @@ -571,7 +573,7 @@ func (m *MockDiff) AddTx(arg0 *txs.Tx) { } // AddTx indicates an expected call of AddTx. -func (mr *MockDiffMockRecorder) AddTx(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) AddTx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockDiff)(nil).AddTx), arg0) } @@ -583,7 +585,7 @@ func (m *MockDiff) AddUTXO(arg0 *avax.UTXO) { } // AddUTXO indicates an expected call of AddUTXO. -func (mr *MockDiffMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) AddUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockDiff)(nil).AddUTXO), arg0) } @@ -595,7 +597,7 @@ func (m *MockDiff) Apply(arg0 Chain) { } // Apply indicates an expected call of Apply. -func (mr *MockDiffMockRecorder) Apply(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) Apply(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockDiff)(nil).Apply), arg0) } @@ -607,7 +609,7 @@ func (m *MockDiff) DeleteUTXO(arg0 ids.ID) { } // DeleteUTXO indicates an expected call of DeleteUTXO. -func (mr *MockDiffMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) DeleteUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockDiff)(nil).DeleteUTXO), arg0) } @@ -622,7 +624,7 @@ func (m *MockDiff) GetBlock(arg0 ids.ID) (block.Block, error) { } // GetBlock indicates an expected call of GetBlock. -func (mr *MockDiffMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) GetBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockDiff)(nil).GetBlock), arg0) } @@ -637,7 +639,7 @@ func (m *MockDiff) GetBlockIDAtHeight(arg0 uint64) (ids.ID, error) { } // GetBlockIDAtHeight indicates an expected call of GetBlockIDAtHeight. -func (mr *MockDiffMockRecorder) GetBlockIDAtHeight(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) GetBlockIDAtHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockDiff)(nil).GetBlockIDAtHeight), arg0) } @@ -680,7 +682,7 @@ func (m *MockDiff) GetTx(arg0 ids.ID) (*txs.Tx, error) { } // GetTx indicates an expected call of GetTx. -func (mr *MockDiffMockRecorder) GetTx(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) GetTx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockDiff)(nil).GetTx), arg0) } @@ -695,7 +697,7 @@ func (m *MockDiff) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { } // GetUTXO indicates an expected call of GetUTXO. -func (mr *MockDiffMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) GetUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockDiff)(nil).GetUTXO), arg0) } @@ -707,7 +709,7 @@ func (m *MockDiff) SetLastAccepted(arg0 ids.ID) { } // SetLastAccepted indicates an expected call of SetLastAccepted. -func (mr *MockDiffMockRecorder) SetLastAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) SetLastAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockDiff)(nil).SetLastAccepted), arg0) } @@ -719,7 +721,7 @@ func (m *MockDiff) SetTimestamp(arg0 time.Time) { } // SetTimestamp indicates an expected call of SetTimestamp. -func (mr *MockDiffMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) SetTimestamp(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockDiff)(nil).SetTimestamp), arg0) } diff --git a/vms/avm/states/state.go b/vms/avm/state/state.go similarity index 99% rename from vms/avm/states/state.go rename to vms/avm/state/state.go index 1167cdb37dce..e85907d77a50 100644 --- a/vms/avm/states/state.go +++ b/vms/avm/state/state.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package states +package state import ( "bytes" diff --git a/vms/avm/states/state_test.go b/vms/avm/state/state_test.go similarity index 96% rename from vms/avm/states/state_test.go rename to vms/avm/state/state_test.go index b64fa3aa7933..758e55ffd7cd 100644 --- a/vms/avm/states/state_test.go +++ b/vms/avm/state/state_test.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package states +package state import ( "testing" @@ -38,9 +38,12 @@ var ( func init() { var err error - parser, err = block.NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - }) + parser, err = block.NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) if err != nil { panic(err) } @@ -61,7 +64,7 @@ func init() { populatedTx = &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ BlockchainID: ids.GenerateTestID(), }}} - err = parser.InitializeTx(populatedTx) + err = populatedTx.Initialize(parser.Codec()) if err != nil { panic(err) } @@ -197,7 +200,7 @@ func ChainTxTest(t *testing.T, c Chain) { tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ BlockchainID: ids.GenerateTestID(), }}} - require.NoError(parser.InitializeTx(tx)) + require.NoError(tx.Initialize(parser.Codec())) txID := tx.ID() _, err = c.GetTx(txID) diff --git a/vms/avm/states/versions.go b/vms/avm/state/versions.go similarity index 78% rename from vms/avm/states/versions.go rename to vms/avm/state/versions.go index 409c47becfff..6afb0fe8e5f2 100644 --- a/vms/avm/states/versions.go +++ b/vms/avm/state/versions.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package states +package state import "github.com/ava-labs/avalanchego/ids" diff --git a/vms/avm/state_test.go b/vms/avm/state_test.go index 976ce1c60840..b17604b2ba62 100644 --- a/vms/avm/state_test.go +++ b/vms/avm/state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -51,7 +51,7 @@ func TestSetsAndGets(t *testing.T) { tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ TxID: ids.Empty, diff --git a/vms/avm/static_client.go b/vms/avm/static_client.go deleted file mode 100644 index 78014785cba9..000000000000 --- a/vms/avm/static_client.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "context" - - "github.com/ava-labs/avalanchego/utils/rpc" -) - -var _ StaticClient = (*staticClient)(nil) - -// StaticClient for interacting with the AVM static api -type StaticClient interface { - BuildGenesis(ctx context.Context, args *BuildGenesisArgs, options ...rpc.Option) (*BuildGenesisReply, error) -} - -// staticClient is an implementation of an AVM client for interacting with the -// avm static api -type staticClient struct { - requester rpc.EndpointRequester -} - -// NewClient returns an AVM client for interacting with the avm static api -func NewStaticClient(uri string) StaticClient { - return &staticClient{requester: rpc.NewEndpointRequester( - uri + "/ext/vm/avm", - )} -} - -func (c *staticClient) BuildGenesis(ctx context.Context, args *BuildGenesisArgs, options ...rpc.Option) (resp *BuildGenesisReply, err error) { - resp = &BuildGenesisReply{} - err = c.requester.SendRequest(ctx, "avm.buildGenesis", args, resp, options...) - return resp, err -} diff --git a/vms/avm/static_service.go b/vms/avm/static_service.go index 275546061354..8340bbb6a5df 100644 --- a/vms/avm/static_service.go +++ b/vms/avm/static_service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -7,6 +7,7 @@ import ( "errors" "fmt" "net/http" + "time" stdjson "encoding/json" @@ -77,11 +78,14 @@ type BuildGenesisReply struct { // BuildGenesis returns the UTXOs such that at least one address in [args.Addresses] is // referenced in the UTXO. func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, reply *BuildGenesisReply) error { - parser, err := txs.NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - &nftfx.Fx{}, - &propertyfx.Fx{}, - }) + parser, err := txs.NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + &nftfx.Fx{}, + &propertyfx.Fx{}, + }, + ) if err != nil { return err } diff --git a/vms/avm/static_service_test.go b/vms/avm/static_service_test.go deleted file mode 100644 index a18220e5de37..000000000000 --- a/vms/avm/static_service_test.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/formatting" - "github.com/ava-labs/avalanchego/utils/formatting/address" - "github.com/ava-labs/avalanchego/utils/json" -) - -var addrStrArray = []string{ - "A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy", - "6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv", - "6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa", - "Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7", -} - -func TestBuildGenesis(t *testing.T) { - require := require.New(t) - - ss := CreateStaticService() - addrMap := map[string]string{} - for _, addrStr := range addrStrArray { - addr, err := ids.ShortFromString(addrStr) - require.NoError(err) - addrMap[addrStr], err = address.FormatBech32(constants.UnitTestHRP, addr[:]) - require.NoError(err) - } - args := BuildGenesisArgs{ - Encoding: formatting.Hex, - GenesisData: map[string]AssetDefinition{ - "asset1": { - Name: "myFixedCapAsset", - Symbol: "MFCA", - Denomination: 8, - InitialState: map[string][]interface{}{ - "fixedCap": { - Holder{ - Amount: 100000, - Address: addrMap["A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy"], - }, - Holder{ - Amount: 100000, - Address: addrMap["6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv"], - }, - Holder{ - Amount: json.Uint64(startBalance), - Address: addrMap["6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa"], - }, - Holder{ - Amount: json.Uint64(startBalance), - Address: addrMap["Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7"], - }, - }, - }, - }, - "asset2": { - Name: "myVarCapAsset", - Symbol: "MVCA", - InitialState: map[string][]interface{}{ - "variableCap": { - Owners{ - Threshold: 1, - Minters: []string{ - addrMap["A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy"], - addrMap["6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv"], - }, - }, - Owners{ - Threshold: 2, - Minters: []string{ - addrMap["6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa"], - addrMap["Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7"], - }, - }, - }, - }, - }, - "asset3": { - Name: "myOtherVarCapAsset", - InitialState: map[string][]interface{}{ - "variableCap": { - Owners{ - Threshold: 1, - Minters: []string{ - addrMap["A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy"], - }, - }, - }, - }, - }, - }, - } - reply := BuildGenesisReply{} - require.NoError(ss.BuildGenesis(nil, &args, &reply)) -} diff --git a/vms/avm/tx.go b/vms/avm/tx.go index 57afe6864d8e..13064a59a18f 100644 --- a/vms/avm/tx.go +++ b/vms/avm/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm diff --git a/vms/avm/tx_init.go b/vms/avm/tx_init.go index 1a7d29ebeb40..00112bf6dd45 100644 --- a/vms/avm/tx_init.go +++ b/vms/avm/tx_init.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm diff --git a/vms/avm/txs/base_tx.go b/vms/avm/txs/base_tx.go index 617769d343d8..5cc0d222dce3 100644 --- a/vms/avm/txs/base_tx.go +++ b/vms/avm/txs/base_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/avm/txs/base_tx_test.go b/vms/avm/txs/base_tx_test.go index 6ec386a7ab8a..d7403e3420fc 100644 --- a/vms/avm/txs/base_tx_test.go +++ b/vms/avm/txs/base_tx_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -125,12 +126,15 @@ func TestBaseTxSerialization(t *testing.T) { Memo: []byte{0x00, 0x01, 0x02, 0x03}, }}} - parser, err := NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - }) + parser, err := NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) require.NoError(err) - require.NoError(parser.InitializeTx(tx)) + require.NoError(tx.Initialize(parser.Codec())) require.Equal(tx.ID().String(), "zeqT8FTnRAxes7QQQYkaWhNkHavd9d6aCdH8TQu2Mx5KEydEz") result := tx.Bytes() diff --git a/vms/avm/txs/codec.go b/vms/avm/txs/codec.go index 777e4562a509..2a83f7497ae0 100644 --- a/vms/avm/txs/codec.go +++ b/vms/avm/txs/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/avm/txs/create_asset_tx.go b/vms/avm/txs/create_asset_tx.go index 4a80d018e428..818bea5b9259 100644 --- a/vms/avm/txs/create_asset_tx.go +++ b/vms/avm/txs/create_asset_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/avm/txs/create_asset_tx_test.go b/vms/avm/txs/create_asset_tx_test.go index 08d0c46f4d54..9ef548eedea2 100644 --- a/vms/avm/txs/create_asset_tx_test.go +++ b/vms/avm/txs/create_asset_tx_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -193,12 +194,15 @@ func TestCreateAssetTxSerialization(t *testing.T) { }, }} - parser, err := NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - }) + parser, err := NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) require.NoError(err) - require.NoError(parser.InitializeTx(tx)) + require.NoError(tx.Initialize(parser.Codec())) result := tx.Bytes() require.Equal(expected, result) @@ -362,11 +366,14 @@ func TestCreateAssetTxSerializationAgain(t *testing.T) { }) } - parser, err := NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - }) + parser, err := NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) require.NoError(err) - require.NoError(parser.InitializeTx(tx)) + require.NoError(tx.Initialize(parser.Codec())) result := tx.Bytes() require.Equal(expected, result) diff --git a/vms/avm/txs/executor/backend.go b/vms/avm/txs/executor/backend.go index fbf4a756faec..fdb020423bdb 100644 --- a/vms/avm/txs/executor/backend.go +++ b/vms/avm/txs/executor/backend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor diff --git a/vms/avm/txs/executor/executor.go b/vms/avm/txs/executor/executor.go index 040b1d9c816f..2e7db5659dae 100644 --- a/vms/avm/txs/executor/executor.go +++ b/vms/avm/txs/executor/executor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" ) @@ -19,7 +19,7 @@ var _ txs.Visitor = (*Executor)(nil) type Executor struct { Codec codec.Manager - State states.Chain // state will be modified + State state.Chain // state will be modified Tx *txs.Tx Inputs set.Set[ids.ID] // imported inputs AtomicRequests map[ids.ID]*atomic.Requests // may be nil diff --git a/vms/avm/txs/executor/executor_test.go b/vms/avm/txs/executor/executor_test.go index 042ae39a9048..66d210b40cb8 100644 --- a/vms/avm/txs/executor/executor_test.go +++ b/vms/avm/txs/executor/executor_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( "testing" + "time" "github.com/prometheus/client_golang/prometheus" @@ -19,7 +20,7 @@ import ( "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/fxs" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -37,14 +38,17 @@ func TestBaseTxExecutor(t *testing.T) { require := require.New(t) secpFx := &secp256k1fx.Fx{} - parser, err := block.NewParser([]fxs.Fx{secpFx}) + parser, err := block.NewParser( + time.Time{}, + []fxs.Fx{secpFx}, + ) require.NoError(err) codec := parser.Codec() db := memdb.New() vdb := versiondb.New(db) registerer := prometheus.NewRegistry() - state, err := states.New(vdb, parser, registerer, trackChecksums) + state, err := state.New(vdb, parser, registerer, trackChecksums) require.NoError(err) utxoID := avax.UTXOID{ @@ -142,14 +146,17 @@ func TestCreateAssetTxExecutor(t *testing.T) { require := require.New(t) secpFx := &secp256k1fx.Fx{} - parser, err := block.NewParser([]fxs.Fx{secpFx}) + parser, err := block.NewParser( + time.Time{}, + []fxs.Fx{secpFx}, + ) require.NoError(err) codec := parser.Codec() db := memdb.New() vdb := versiondb.New(db) registerer := prometheus.NewRegistry() - state, err := states.New(vdb, parser, registerer, trackChecksums) + state, err := state.New(vdb, parser, registerer, trackChecksums) require.NoError(err) utxoID := avax.UTXOID{ @@ -285,14 +292,17 @@ func TestOperationTxExecutor(t *testing.T) { require := require.New(t) secpFx := &secp256k1fx.Fx{} - parser, err := block.NewParser([]fxs.Fx{secpFx}) + parser, err := block.NewParser( + time.Time{}, + []fxs.Fx{secpFx}, + ) require.NoError(err) codec := parser.Codec() db := memdb.New() vdb := versiondb.New(db) registerer := prometheus.NewRegistry() - state, err := states.New(vdb, parser, registerer, trackChecksums) + state, err := state.New(vdb, parser, registerer, trackChecksums) require.NoError(err) outputOwners := secp256k1fx.OutputOwners{ diff --git a/vms/avm/txs/executor/semantic_verifier.go b/vms/avm/txs/executor/semantic_verifier.go index 0a8d59083255..946346bc0646 100644 --- a/vms/avm/txs/executor/semantic_verifier.go +++ b/vms/avm/txs/executor/semantic_verifier.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -9,7 +9,7 @@ import ( "reflect" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -26,7 +26,7 @@ var ( type SemanticVerifier struct { *Backend - State states.ReadOnlyChain + State state.ReadOnlyChain Tx *txs.Tx } diff --git a/vms/avm/txs/executor/semantic_verifier_test.go b/vms/avm/txs/executor/semantic_verifier_test.go index 72638762c39b..6579e29784d5 100644 --- a/vms/avm/txs/executor/semantic_verifier_test.go +++ b/vms/avm/txs/executor/semantic_verifier_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -6,6 +6,7 @@ package executor import ( "reflect" "testing" + "time" "github.com/stretchr/testify/require" @@ -16,13 +17,14 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/avm/fxs" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -30,11 +32,12 @@ import ( ) func TestSemanticVerifierBaseTx(t *testing.T) { - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) typeToFxIndex := make(map[reflect.Type]int) secpFx := &secp256k1fx.Fx{} parser, err := txs.NewCustomParser( + time.Time{}, typeToFxIndex, new(mockable.Clock), logging.NoWarn{}, @@ -117,14 +120,14 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tests := []struct { name string - stateFunc func(*gomock.Controller) states.Chain + stateFunc func(*gomock.Controller) state.Chain txFunc func(*require.Assertions) *txs.Tx err error }{ { name: "valid", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) @@ -147,8 +150,8 @@ func TestSemanticVerifierBaseTx(t *testing.T) { }, { name: "assetID mismatch", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) utxo := utxo utxo.Asset.ID = ids.GenerateTestID() @@ -173,8 +176,8 @@ func TestSemanticVerifierBaseTx(t *testing.T) { }, { name: "not allowed input feature extension", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) unsignedCreateAssetTx := unsignedCreateAssetTx unsignedCreateAssetTx.States = nil @@ -204,8 +207,8 @@ func TestSemanticVerifierBaseTx(t *testing.T) { }, { name: "invalid signature", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) @@ -228,8 +231,8 @@ func TestSemanticVerifierBaseTx(t *testing.T) { }, { name: "missing UTXO", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(nil, database.ErrNotFound) @@ -251,8 +254,8 @@ func TestSemanticVerifierBaseTx(t *testing.T) { }, { name: "invalid UTXO amount", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) output := output output.Amt-- @@ -281,8 +284,8 @@ func TestSemanticVerifierBaseTx(t *testing.T) { }, { name: "not allowed output feature extension", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) unsignedCreateAssetTx := unsignedCreateAssetTx unsignedCreateAssetTx.States = nil @@ -317,8 +320,8 @@ func TestSemanticVerifierBaseTx(t *testing.T) { }, { name: "unknown asset", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(nil, database.ErrNotFound) @@ -341,8 +344,8 @@ func TestSemanticVerifierBaseTx(t *testing.T) { }, { name: "not an asset", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) tx := txs.Tx{ Unsigned: &baseTx, @@ -387,16 +390,12 @@ func TestSemanticVerifierBaseTx(t *testing.T) { } func TestSemanticVerifierExportTx(t *testing.T) { - ctx := newContext(t) - ctrl := gomock.NewController(t) - - validatorState := validators.NewMockState(ctrl) - validatorState.EXPECT().GetSubnetID(gomock.Any(), ctx.CChainID).AnyTimes().Return(ctx.SubnetID, nil) - ctx.ValidatorState = validatorState + ctx := snowtest.Context(t, snowtest.XChainID) typeToFxIndex := make(map[reflect.Type]int) secpFx := &secp256k1fx.Fx{} parser, err := txs.NewCustomParser( + time.Time{}, typeToFxIndex, new(mockable.Clock), logging.NoWarn{}, @@ -483,14 +482,14 @@ func TestSemanticVerifierExportTx(t *testing.T) { tests := []struct { name string - stateFunc func(*gomock.Controller) states.Chain + stateFunc func(*gomock.Controller) state.Chain txFunc func(*require.Assertions) *txs.Tx err error }{ { name: "valid", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) @@ -513,8 +512,8 @@ func TestSemanticVerifierExportTx(t *testing.T) { }, { name: "assetID mismatch", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) utxo := utxo utxo.Asset.ID = ids.GenerateTestID() @@ -539,8 +538,8 @@ func TestSemanticVerifierExportTx(t *testing.T) { }, { name: "not allowed input feature extension", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) unsignedCreateAssetTx := unsignedCreateAssetTx unsignedCreateAssetTx.States = nil @@ -570,8 +569,8 @@ func TestSemanticVerifierExportTx(t *testing.T) { }, { name: "invalid signature", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) @@ -594,8 +593,8 @@ func TestSemanticVerifierExportTx(t *testing.T) { }, { name: "missing UTXO", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(nil, database.ErrNotFound) @@ -617,8 +616,8 @@ func TestSemanticVerifierExportTx(t *testing.T) { }, { name: "invalid UTXO amount", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) output := output output.Amt-- @@ -647,8 +646,8 @@ func TestSemanticVerifierExportTx(t *testing.T) { }, { name: "not allowed output feature extension", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) unsignedCreateAssetTx := unsignedCreateAssetTx unsignedCreateAssetTx.States = nil @@ -683,8 +682,8 @@ func TestSemanticVerifierExportTx(t *testing.T) { }, { name: "unknown asset", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(nil, database.ErrNotFound) @@ -707,8 +706,8 @@ func TestSemanticVerifierExportTx(t *testing.T) { }, { name: "not an asset", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) tx := txs.Tx{ Unsigned: &baseTx, @@ -756,7 +755,7 @@ func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) validatorState := validators.NewMockState(ctrl) validatorState.EXPECT().GetSubnetID(gomock.Any(), ctx.CChainID).AnyTimes().Return(ids.GenerateTestID(), nil) @@ -765,6 +764,7 @@ func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { typeToFxIndex := make(map[reflect.Type]int) secpFx := &secp256k1fx.Fx{} parser, err := txs.NewCustomParser( + time.Time{}, typeToFxIndex, new(mockable.Clock), logging.NoWarn{}, @@ -849,7 +849,7 @@ func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { Unsigned: &unsignedCreateAssetTx, } - state := states.NewMockChain(ctrl) + state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) @@ -873,13 +873,7 @@ func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { } func TestSemanticVerifierImportTx(t *testing.T) { - ctrl := gomock.NewController(t) - - ctx := newContext(t) - - validatorState := validators.NewMockState(ctrl) - validatorState.EXPECT().GetSubnetID(gomock.Any(), ctx.CChainID).AnyTimes().Return(ctx.SubnetID, nil) - ctx.ValidatorState = validatorState + ctx := snowtest.Context(t, snowtest.XChainID) m := atomic.NewMemory(prefixdb.New([]byte{0}, memdb.New())) ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) @@ -887,6 +881,7 @@ func TestSemanticVerifierImportTx(t *testing.T) { typeToFxIndex := make(map[reflect.Type]int) fx := &secp256k1fx.Fx{} parser, err := txs.NewCustomParser( + time.Time{}, typeToFxIndex, new(mockable.Clock), logging.NoWarn{}, @@ -999,14 +994,14 @@ func TestSemanticVerifierImportTx(t *testing.T) { } tests := []struct { name string - stateFunc func(*gomock.Controller) states.Chain + stateFunc func(*gomock.Controller) state.Chain txFunc func(*require.Assertions) *txs.Tx expectedErr error }{ { name: "valid", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil).AnyTimes() state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).AnyTimes() return state @@ -1018,8 +1013,8 @@ func TestSemanticVerifierImportTx(t *testing.T) { }, { name: "not allowed input feature extension", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) unsignedCreateAssetTx := unsignedCreateAssetTx unsignedCreateAssetTx.States = nil createAssetTx := txs.Tx{ @@ -1036,8 +1031,8 @@ func TestSemanticVerifierImportTx(t *testing.T) { }, { name: "invalid signature", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil).AnyTimes() state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).AnyTimes() return state @@ -1058,8 +1053,8 @@ func TestSemanticVerifierImportTx(t *testing.T) { }, { name: "not allowed output feature extension", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) unsignedCreateAssetTx := unsignedCreateAssetTx unsignedCreateAssetTx.States = nil createAssetTx := txs.Tx{ @@ -1087,8 +1082,8 @@ func TestSemanticVerifierImportTx(t *testing.T) { }, { name: "unknown asset", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil).AnyTimes() state.EXPECT().GetTx(asset.ID).Return(nil, database.ErrNotFound) return state @@ -1100,8 +1095,8 @@ func TestSemanticVerifierImportTx(t *testing.T) { }, { name: "not an asset", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) tx := txs.Tx{ Unsigned: &baseTx, } diff --git a/vms/avm/txs/executor/syntactic_verifier.go b/vms/avm/txs/executor/syntactic_verifier.go index 7419b5738215..81a2f2a715f4 100644 --- a/vms/avm/txs/executor/syntactic_verifier.go +++ b/vms/avm/txs/executor/syntactic_verifier.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor diff --git a/vms/avm/txs/executor/syntactic_verifier_test.go b/vms/avm/txs/executor/syntactic_verifier_test.go index 34f1b27ebbcc..108ac9e94a60 100644 --- a/vms/avm/txs/executor/syntactic_verifier_test.go +++ b/vms/avm/txs/executor/syntactic_verifier_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -7,11 +7,12 @@ import ( "math" "strings" "testing" + "time" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/avm/config" @@ -32,30 +33,16 @@ var ( } ) -func newContext(t testing.TB) *snow.Context { - require := require.New(t) - - ctx := snow.DefaultContextTest() - ctx.NetworkID = constants.UnitTestID - ctx.ChainID = ids.GenerateTestID() - ctx.XChainID = ctx.ChainID - ctx.CChainID = ids.GenerateTestID() - - aliaser := ctx.BCLookup.(ids.Aliaser) - require.NoError(aliaser.Alias(ctx.XChainID, "X")) - require.NoError(aliaser.Alias(ctx.XChainID, ctx.XChainID.String())) - require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) - require.NoError(aliaser.Alias(constants.PlatformChainID, constants.PlatformChainID.String())) - return ctx -} - func TestSyntacticVerifierBaseTx(t *testing.T) { - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) fx := &secp256k1fx.Fx{} - parser, err := txs.NewParser([]fxs.Fx{ - fx, - }) + parser, err := txs.NewParser( + time.Time{}, + []fxs.Fx{ + fx, + }, + ) require.NoError(t, err) feeAssetID := ids.GenerateTestID() @@ -420,12 +407,15 @@ func TestSyntacticVerifierBaseTx(t *testing.T) { } func TestSyntacticVerifierCreateAssetTx(t *testing.T) { - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) fx := &secp256k1fx.Fx{} - parser, err := txs.NewParser([]fxs.Fx{ - fx, - }) + parser, err := txs.NewParser( + time.Time{}, + []fxs.Fx{ + fx, + }, + ) require.NoError(t, err) feeAssetID := ids.GenerateTestID() @@ -1027,12 +1017,15 @@ func TestSyntacticVerifierCreateAssetTx(t *testing.T) { } func TestSyntacticVerifierOperationTx(t *testing.T) { - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) fx := &secp256k1fx.Fx{} - parser, err := txs.NewParser([]fxs.Fx{ - fx, - }) + parser, err := txs.NewParser( + time.Time{}, + []fxs.Fx{ + fx, + }, + ) require.NoError(t, err) feeAssetID := ids.GenerateTestID() @@ -1514,12 +1507,15 @@ func TestSyntacticVerifierOperationTx(t *testing.T) { } func TestSyntacticVerifierImportTx(t *testing.T) { - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) fx := &secp256k1fx.Fx{} - parser, err := txs.NewParser([]fxs.Fx{ - fx, - }) + parser, err := txs.NewParser( + time.Time{}, + []fxs.Fx{ + fx, + }, + ) require.NoError(t, err) feeAssetID := ids.GenerateTestID() @@ -1912,12 +1908,15 @@ func TestSyntacticVerifierImportTx(t *testing.T) { } func TestSyntacticVerifierExportTx(t *testing.T) { - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) fx := &secp256k1fx.Fx{} - parser, err := txs.NewParser([]fxs.Fx{ - fx, - }) + parser, err := txs.NewParser( + time.Time{}, + []fxs.Fx{ + fx, + }, + ) require.NoError(t, err) feeAssetID := ids.GenerateTestID() diff --git a/vms/avm/txs/export_tx.go b/vms/avm/txs/export_tx.go index aec13141497d..e0be45360693 100644 --- a/vms/avm/txs/export_tx.go +++ b/vms/avm/txs/export_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/avm/txs/export_tx_test.go b/vms/avm/txs/export_tx_test.go index a7c1ed16196f..1d3ce2dee276 100644 --- a/vms/avm/txs/export_tx_test.go +++ b/vms/avm/txs/export_tx_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -108,12 +109,15 @@ func TestExportTxSerialization(t *testing.T) { }, }} - parser, err := NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - }) + parser, err := NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) require.NoError(err) - require.NoError(parser.InitializeTx(tx)) + require.NoError(tx.Initialize(parser.Codec())) require.Equal(tx.ID().String(), "2PKJE4TrKYpgynBFCpNPpV3GHK7d9QTgrL5mpYG6abHKDvNBG3") result := tx.Bytes() diff --git a/vms/avm/txs/import_tx.go b/vms/avm/txs/import_tx.go index c3066ccc5c40..5ef8929fc641 100644 --- a/vms/avm/txs/import_tx.go +++ b/vms/avm/txs/import_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/avm/txs/import_tx_test.go b/vms/avm/txs/import_tx_test.go index 4172a4047792..82dd0cfcd7b2 100644 --- a/vms/avm/txs/import_tx_test.go +++ b/vms/avm/txs/import_tx_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -108,12 +109,15 @@ func TestImportTxSerialization(t *testing.T) { }}, }} - parser, err := NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - }) + parser, err := NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) require.NoError(err) - require.NoError(parser.InitializeTx(tx)) + require.NoError(tx.Initialize(parser.Codec())) require.Equal(tx.ID().String(), "9wdPb5rsThXYLX4WxkNeyYrNMfDE5cuWLgifSjxKiA2dCmgCZ") result := tx.Bytes() diff --git a/vms/avm/txs/initial_state.go b/vms/avm/txs/initial_state.go index a50c88c0a294..a093ead5c6b2 100644 --- a/vms/avm/txs/initial_state.go +++ b/vms/avm/txs/initial_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -59,8 +59,8 @@ func (is *InitialState) Verify(c codec.Manager, numFxs int) error { return nil } -func (is *InitialState) Less(other *InitialState) bool { - return is.FxIndex < other.FxIndex +func (is *InitialState) Compare(other *InitialState) int { + return utils.Compare(is.FxIndex, other.FxIndex) } func (is *InitialState) Sort(c codec.Manager) { diff --git a/vms/avm/txs/initial_state_test.go b/vms/avm/txs/initial_state_test.go index f54f54b3b9ed..5f61deb3e7c6 100644 --- a/vms/avm/txs/initial_state_test.go +++ b/vms/avm/txs/initial_state_test.go @@ -1,11 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( "errors" + "fmt" "testing" + "time" "github.com/stretchr/testify/require" @@ -22,7 +24,7 @@ var errTest = errors.New("non-nil error") func TestInitialStateVerifySerialization(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(CodecVersion, c)) @@ -79,7 +81,7 @@ func TestInitialStateVerifySerialization(t *testing.T) { func TestInitialStateVerifyNil(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 @@ -92,7 +94,7 @@ func TestInitialStateVerifyNil(t *testing.T) { func TestInitialStateVerifyUnknownFxID(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 @@ -107,7 +109,7 @@ func TestInitialStateVerifyUnknownFxID(t *testing.T) { func TestInitialStateVerifyNilOutput(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 @@ -123,7 +125,7 @@ func TestInitialStateVerifyNilOutput(t *testing.T) { func TestInitialStateVerifyInvalidOutput(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) require.NoError(c.RegisterType(&avax.TestState{})) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(CodecVersion, c)) @@ -140,7 +142,7 @@ func TestInitialStateVerifyInvalidOutput(t *testing.T) { func TestInitialStateVerifyUnsortedOutputs(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) require.NoError(c.RegisterType(&avax.TestTransferable{})) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(CodecVersion, c)) @@ -159,14 +161,31 @@ func TestInitialStateVerifyUnsortedOutputs(t *testing.T) { require.NoError(is.Verify(m, numFxs)) } -func TestInitialStateLess(t *testing.T) { - require := require.New(t) - - var is1, is2 InitialState - require.False(is1.Less(&is2)) - require.False(is2.Less(&is1)) +func TestInitialStateCompare(t *testing.T) { + tests := []struct { + a *InitialState + b *InitialState + expected int + }{ + { + a: &InitialState{}, + b: &InitialState{}, + expected: 0, + }, + { + a: &InitialState{ + FxIndex: 1, + }, + b: &InitialState{}, + expected: 1, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d_%d_%d", test.a.FxIndex, test.b.FxIndex, test.expected), func(t *testing.T) { + require := require.New(t) - is1.FxIndex = 1 - require.False(is1.Less(&is2)) - require.True(is2.Less(&is1)) + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) + } } diff --git a/vms/avm/txs/mempool/mempool.go b/vms/avm/txs/mempool/mempool.go index b64002e8f39d..4ac275a21305 100644 --- a/vms/avm/txs/mempool/mempool.go +++ b/vms/avm/txs/mempool/mempool.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mempool @@ -6,14 +6,16 @@ package mempool import ( "errors" "fmt" + "sync" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/linkedhashmap" - "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/setmap" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/avm/txs" ) @@ -26,8 +28,6 @@ const ( // droppedTxIDsCacheSize is the maximum number of dropped txIDs to cache droppedTxIDsCacheSize = 64 - initialConsumedUTXOsSize = 512 - // maxMempoolSize is the maximum number of bytes allowed in the mempool maxMempoolSize = 64 * units.MiB ) @@ -35,22 +35,25 @@ const ( var ( _ Mempool = (*mempool)(nil) - errDuplicateTx = errors.New("duplicate tx") - errTxTooLarge = errors.New("tx too large") - errMempoolFull = errors.New("mempool is full") - errConflictsWithOtherTx = errors.New("tx conflicts with other tx") + ErrDuplicateTx = errors.New("duplicate tx") + ErrTxTooLarge = errors.New("tx too large") + ErrMempoolFull = errors.New("mempool is full") + ErrConflictsWithOtherTx = errors.New("tx conflicts with other tx") ) // Mempool contains transactions that have not yet been put into a block. type Mempool interface { Add(tx *txs.Tx) error - Has(txID ids.ID) bool - Get(txID ids.ID) *txs.Tx - Remove(txs []*txs.Tx) + Get(txID ids.ID) (*txs.Tx, bool) + // Remove [txs] and any conflicts of [txs] from the mempool. + Remove(txs ...*txs.Tx) + + // Peek returns the oldest tx in the mempool. + Peek() (tx *txs.Tx, exists bool) - // Peek returns the next first tx that was added to the mempool whose size - // is less than or equal to maxTxSize. - Peek(maxTxSize int) *txs.Tx + // Iterate over transactions from oldest to newest until the function + // returns false or there are no more transactions. + Iterate(f func(tx *txs.Tx) bool) // RequestBuildBlock notifies the consensus engine that a block should be // built if there is at least one transaction in the mempool. @@ -60,22 +63,22 @@ type Mempool interface { // unissued. This allows previously dropped txs to be possibly reissued. MarkDropped(txID ids.ID, reason error) GetDropReason(txID ids.ID) error + + // Len returns the number of txs in the mempool. + Len() int } type mempool struct { - bytesAvailableMetric prometheus.Gauge - bytesAvailable int - - unissuedTxs linkedhashmap.LinkedHashmap[ids.ID, *txs.Tx] - numTxs prometheus.Gauge + lock sync.RWMutex + unissuedTxs linkedhashmap.LinkedHashmap[ids.ID, *txs.Tx] + consumedUTXOs *setmap.SetMap[ids.ID, ids.ID] // TxID -> Consumed UTXOs + bytesAvailable int + droppedTxIDs *cache.LRU[ids.ID, error] // TxID -> Verification error toEngine chan<- common.Message - // Key: Tx ID - // Value: Verification error - droppedTxIDs *cache.LRU[ids.ID, error] - - consumedUTXOs set.Set[ids.ID] + numTxs prometheus.Gauge + bytesAvailableMetric prometheus.Gauge } func New( @@ -83,47 +86,46 @@ func New( registerer prometheus.Registerer, toEngine chan<- common.Message, ) (Mempool, error) { - bytesAvailableMetric := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "bytes_available", - Help: "Number of bytes of space currently available in the mempool", - }) - if err := registerer.Register(bytesAvailableMetric); err != nil { - return nil, err + m := &mempool{ + unissuedTxs: linkedhashmap.New[ids.ID, *txs.Tx](), + consumedUTXOs: setmap.New[ids.ID, ids.ID](), + bytesAvailable: maxMempoolSize, + droppedTxIDs: &cache.LRU[ids.ID, error]{Size: droppedTxIDsCacheSize}, + toEngine: toEngine, + numTxs: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "count", + Help: "Number of transactions in the mempool", + }), + bytesAvailableMetric: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "bytes_available", + Help: "Number of bytes of space currently available in the mempool", + }), } + m.bytesAvailableMetric.Set(maxMempoolSize) - numTxsMetric := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "count", - Help: "Number of transactions in the mempool", - }) - if err := registerer.Register(numTxsMetric); err != nil { - return nil, err - } - - bytesAvailableMetric.Set(maxMempoolSize) - return &mempool{ - bytesAvailableMetric: bytesAvailableMetric, - bytesAvailable: maxMempoolSize, - unissuedTxs: linkedhashmap.New[ids.ID, *txs.Tx](), - numTxs: numTxsMetric, - toEngine: toEngine, - droppedTxIDs: &cache.LRU[ids.ID, error]{Size: droppedTxIDsCacheSize}, - consumedUTXOs: set.NewSet[ids.ID](initialConsumedUTXOsSize), - }, nil + err := utils.Err( + registerer.Register(m.numTxs), + registerer.Register(m.bytesAvailableMetric), + ) + return m, err } func (m *mempool) Add(tx *txs.Tx) error { - // Note: a previously dropped tx can be re-added txID := tx.ID() - if m.Has(txID) { - return fmt.Errorf("%w: %s", errDuplicateTx, txID) + + m.lock.Lock() + defer m.lock.Unlock() + + if _, ok := m.unissuedTxs.Get(txID); ok { + return fmt.Errorf("%w: %s", ErrDuplicateTx, txID) } txSize := len(tx.Bytes()) if txSize > MaxTxSize { return fmt.Errorf("%w: %s size (%d) > max size (%d)", - errTxTooLarge, + ErrTxTooLarge, txID, txSize, MaxTxSize, @@ -131,7 +133,7 @@ func (m *mempool) Add(tx *txs.Tx) error { } if txSize > m.bytesAvailable { return fmt.Errorf("%w: %s size (%d) > available space (%d)", - errMempoolFull, + ErrMempoolFull, txID, txSize, m.bytesAvailable, @@ -139,8 +141,8 @@ func (m *mempool) Add(tx *txs.Tx) error { } inputs := tx.Unsigned.InputIDs() - if m.consumedUTXOs.Overlaps(inputs) { - return fmt.Errorf("%w: %s", errConflictsWithOtherTx, txID) + if m.consumedUTXOs.HasOverlap(inputs) { + return fmt.Errorf("%w: %s", ErrConflictsWithOtherTx, txID) } m.bytesAvailable -= txSize @@ -150,52 +152,58 @@ func (m *mempool) Add(tx *txs.Tx) error { m.numTxs.Inc() // Mark these UTXOs as consumed in the mempool - m.consumedUTXOs.Union(inputs) + m.consumedUTXOs.Put(txID, inputs) - // An explicitly added tx must not be marked as dropped. + // An added tx must not be marked as dropped. m.droppedTxIDs.Evict(txID) return nil } -func (m *mempool) Has(txID ids.ID) bool { - return m.Get(txID) != nil +func (m *mempool) Get(txID ids.ID) (*txs.Tx, bool) { + tx, ok := m.unissuedTxs.Get(txID) + return tx, ok } -func (m *mempool) Get(txID ids.ID) *txs.Tx { - unissuedTxs, _ := m.unissuedTxs.Get(txID) - return unissuedTxs -} +func (m *mempool) Remove(txs ...*txs.Tx) { + m.lock.Lock() + defer m.lock.Unlock() -func (m *mempool) Remove(txsToRemove []*txs.Tx) { - for _, tx := range txsToRemove { + for _, tx := range txs { txID := tx.ID() - if _, ok := m.unissuedTxs.Get(txID); !ok { - // If tx isn't in the mempool, there is nothing to do. + // If the transaction is in the mempool, remove it. + if _, ok := m.consumedUTXOs.DeleteKey(txID); ok { + m.unissuedTxs.Delete(txID) + m.bytesAvailable += len(tx.Bytes()) continue } - txBytes := tx.Bytes() - m.bytesAvailable += len(txBytes) - m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) - - m.unissuedTxs.Delete(txID) - m.numTxs.Dec() - + // If the transaction isn't in the mempool, remove any conflicts it has. inputs := tx.Unsigned.InputIDs() - m.consumedUTXOs.Difference(inputs) + for _, removed := range m.consumedUTXOs.DeleteOverlapping(inputs) { + tx, _ := m.unissuedTxs.Get(removed.Key) + m.unissuedTxs.Delete(removed.Key) + m.bytesAvailable += len(tx.Bytes()) + } } + m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) + m.numTxs.Set(float64(m.unissuedTxs.Len())) +} + +func (m *mempool) Peek() (*txs.Tx, bool) { + _, tx, exists := m.unissuedTxs.Oldest() + return tx, exists } -func (m *mempool) Peek(maxTxSize int) *txs.Tx { - txIter := m.unissuedTxs.NewIterator() - for txIter.Next() { - tx := txIter.Value() - txSize := len(tx.Bytes()) - if txSize <= maxTxSize { - return tx +func (m *mempool) Iterate(f func(*txs.Tx) bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + it := m.unissuedTxs.NewIterator() + for it.Next() { + if !f(it.Value()) { + return } } - return nil } func (m *mempool) RequestBuildBlock() { @@ -210,6 +218,17 @@ func (m *mempool) RequestBuildBlock() { } func (m *mempool) MarkDropped(txID ids.ID, reason error) { + if errors.Is(reason, ErrMempoolFull) { + return + } + + m.lock.RLock() + defer m.lock.RUnlock() + + if _, ok := m.unissuedTxs.Get(txID); ok { + return + } + m.droppedTxIDs.Put(txID, reason) } @@ -217,3 +236,10 @@ func (m *mempool) GetDropReason(txID ids.ID) error { err, _ := m.droppedTxIDs.Get(txID) return err } + +func (m *mempool) Len() int { + m.lock.RLock() + defer m.lock.RUnlock() + + return m.unissuedTxs.Len() +} diff --git a/vms/avm/txs/mempool/mempool_test.go b/vms/avm/txs/mempool/mempool_test.go index 4e1396ac3d41..3a1a82484267 100644 --- a/vms/avm/txs/mempool/mempool_test.go +++ b/vms/avm/txs/mempool/mempool_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mempool import ( + "errors" "testing" "github.com/prometheus/client_golang/prometheus" @@ -13,160 +14,306 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var ( - keys = secp256k1.TestKeys() - chainID = ids.ID{5, 4, 3, 2, 1} - assetID = ids.ID{1, 2, 3} -) +func TestAdd(t *testing.T) { + tx0 := newTx(0, 32) + + tests := []struct { + name string + initialTxs []*txs.Tx + tx *txs.Tx + err error + dropReason error + }{ + { + name: "successfully add tx", + initialTxs: nil, + tx: tx0, + err: nil, + dropReason: nil, + }, + { + name: "attempt adding duplicate tx", + initialTxs: []*txs.Tx{tx0}, + tx: tx0, + err: ErrDuplicateTx, + dropReason: nil, + }, + { + name: "attempt adding too large tx", + initialTxs: nil, + tx: newTx(0, MaxTxSize+1), + err: ErrTxTooLarge, + dropReason: ErrTxTooLarge, + }, + { + name: "attempt adding tx when full", + initialTxs: newTxs(maxMempoolSize/MaxTxSize, MaxTxSize), + tx: newTx(maxMempoolSize/MaxTxSize, MaxTxSize), + err: ErrMempoolFull, + dropReason: nil, + }, + { + name: "attempt adding conflicting tx", + initialTxs: []*txs.Tx{tx0}, + tx: newTx(0, 32), + err: ErrConflictsWithOtherTx, + dropReason: ErrConflictsWithOtherTx, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + mempool, err := New( + "mempool", + prometheus.NewRegistry(), + nil, + ) + require.NoError(err) + + for _, tx := range test.initialTxs { + require.NoError(mempool.Add(tx)) + } -// shows that valid tx is not added to mempool if this would exceed its maximum -// size -func TestBlockBuilderMaxMempoolSizeHandling(t *testing.T) { + err = mempool.Add(test.tx) + require.ErrorIs(err, test.err) + + txID := test.tx.ID() + + if err != nil { + mempool.MarkDropped(txID, err) + } + + err = mempool.GetDropReason(txID) + require.ErrorIs(err, test.dropReason) + }) + } +} + +func TestGet(t *testing.T) { require := require.New(t) - registerer := prometheus.NewRegistry() - mempoolIntf, err := New("mempool", registerer, nil) + mempool, err := New( + "mempool", + prometheus.NewRegistry(), + nil, + ) require.NoError(err) - mempool := mempoolIntf.(*mempool) + tx := newTx(0, 32) + txID := tx.ID() - testTxs := createTestTxs(2) - tx := testTxs[0] + _, exists := mempool.Get(txID) + require.False(exists) - // shortcut to simulated almost filled mempool - mempool.bytesAvailable = len(tx.Bytes()) - 1 + require.NoError(mempool.Add(tx)) - err = mempool.Add(tx) - require.ErrorIs(err, errMempoolFull) + returned, exists := mempool.Get(txID) + require.True(exists) + require.Equal(tx, returned) - // shortcut to simulated almost filled mempool - mempool.bytesAvailable = len(tx.Bytes()) + mempool.Remove(tx) - require.NoError(mempool.Add(tx)) + _, exists = mempool.Get(txID) + require.False(exists) } -func TestTxsInMempool(t *testing.T) { +func TestPeek(t *testing.T) { require := require.New(t) - registerer := prometheus.NewRegistry() - toEngine := make(chan common.Message, 100) - mempool, err := New("mempool", registerer, toEngine) + mempool, err := New( + "mempool", + prometheus.NewRegistry(), + nil, + ) require.NoError(err) - testTxs := createTestTxs(2) + _, exists := mempool.Peek() + require.False(exists) - mempool.RequestBuildBlock() - select { - case <-toEngine: - require.FailNow("should not have sent message to engine") - default: - } + tx0 := newTx(0, 32) + tx1 := newTx(1, 32) - for _, tx := range testTxs { - txID := tx.ID() - // tx not already there - require.False(mempool.Has(txID)) + require.NoError(mempool.Add(tx0)) + require.NoError(mempool.Add(tx1)) - // we can insert - require.NoError(mempool.Add(tx)) + tx, exists := mempool.Peek() + require.True(exists) + require.Equal(tx, tx0) - // we can get it - require.True(mempool.Has(txID)) + mempool.Remove(tx0) - retrieved := mempool.Get(txID) - require.NotNil(retrieved) - require.Equal(tx, retrieved) + tx, exists = mempool.Peek() + require.True(exists) + require.Equal(tx, tx1) - // tx exists in mempool - require.True(mempool.Has(txID)) + mempool.Remove(tx0) - // once removed it cannot be there - mempool.Remove([]*txs.Tx{tx}) + tx, exists = mempool.Peek() + require.True(exists) + require.Equal(tx, tx1) - require.False(mempool.Has(txID)) - require.Nil(mempool.Get(txID)) + mempool.Remove(tx1) - // we can reinsert it again to grow the mempool - require.NoError(mempool.Add(tx)) + _, exists = mempool.Peek() + require.False(exists) +} + +func TestRemoveConflict(t *testing.T) { + require := require.New(t) + + mempool, err := New( + "mempool", + prometheus.NewRegistry(), + nil, + ) + require.NoError(err) + + tx := newTx(0, 32) + txConflict := newTx(0, 32) + + require.NoError(mempool.Add(tx)) + + returnedTx, exists := mempool.Peek() + require.True(exists) + require.Equal(returnedTx, tx) + + mempool.Remove(txConflict) + + _, exists = mempool.Peek() + require.False(exists) +} + +func TestIterate(t *testing.T) { + require := require.New(t) + + mempool, err := New( + "mempool", + prometheus.NewRegistry(), + nil, + ) + require.NoError(err) + + var ( + iteratedTxs []*txs.Tx + maxLen = 2 + ) + addTxs := func(tx *txs.Tx) bool { + iteratedTxs = append(iteratedTxs, tx) + return len(iteratedTxs) < maxLen } + mempool.Iterate(addTxs) + require.Empty(iteratedTxs) + + tx0 := newTx(0, 32) + require.NoError(mempool.Add(tx0)) + + mempool.Iterate(addTxs) + require.Equal([]*txs.Tx{tx0}, iteratedTxs) + + tx1 := newTx(1, 32) + require.NoError(mempool.Add(tx1)) + + iteratedTxs = nil + mempool.Iterate(addTxs) + require.Equal([]*txs.Tx{tx0, tx1}, iteratedTxs) + + tx2 := newTx(2, 32) + require.NoError(mempool.Add(tx2)) + + iteratedTxs = nil + mempool.Iterate(addTxs) + require.Equal([]*txs.Tx{tx0, tx1}, iteratedTxs) + + mempool.Remove(tx0, tx2) + + iteratedTxs = nil + mempool.Iterate(addTxs) + require.Equal([]*txs.Tx{tx1}, iteratedTxs) +} + +func TestRequestBuildBlock(t *testing.T) { + require := require.New(t) + + toEngine := make(chan common.Message, 1) + mempool, err := New( + "mempool", + prometheus.NewRegistry(), + toEngine, + ) + require.NoError(err) mempool.RequestBuildBlock() select { case <-toEngine: + require.FailNow("should not have sent message to engine") default: - require.FailNow("should have sent message to engine") } - mempool.Remove(testTxs) + tx := newTx(0, 32) + require.NoError(mempool.Add(tx)) mempool.RequestBuildBlock() + mempool.RequestBuildBlock() // Must not deadlock select { case <-toEngine: - require.FailNow("should not have sent message to engine") + default: + require.FailNow("should have sent message to engine") + } + select { + case <-toEngine: + require.FailNow("should have only sent one message to engine") default: } } -func createTestTxs(count int) []*txs.Tx { - testTxs := make([]*txs.Tx, 0, count) - addr := keys[0].PublicKey().Address() - for i := uint32(0); i < uint32(count); i++ { - tx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{'t', 'x', 'I', 'D'}, - OutputIndex: i, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{i}, - }, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{addr}, - }, - }, - }}, - }}, - Name: "NormalName", - Symbol: "TICK", - Denomination: byte(2), - States: []*txs.InitialState{ - { - FxIndex: 0, - Outs: []verify.State{ - &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{addr}, - }, - }, - }, - }, - }, - }} - tx.SetBytes(utils.RandomBytes(16), utils.RandomBytes(16)) - testTxs = append(testTxs, tx) +func TestDropped(t *testing.T) { + require := require.New(t) + + mempool, err := New( + "mempool", + prometheus.NewRegistry(), + nil, + ) + require.NoError(err) + + tx := newTx(0, 32) + txID := tx.ID() + testErr := errors.New("test") + + mempool.MarkDropped(txID, testErr) + + err = mempool.GetDropReason(txID) + require.ErrorIs(err, testErr) + + require.NoError(mempool.Add(tx)) + require.NoError(mempool.GetDropReason(txID)) + + mempool.MarkDropped(txID, testErr) + require.NoError(mempool.GetDropReason(txID)) +} + +func newTxs(num int, size int) []*txs.Tx { + txs := make([]*txs.Tx, num) + for i := range txs { + txs[i] = newTx(uint32(i), size) } - return testTxs + return txs +} + +func newTx(index uint32, size int) *txs.Tx { + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: ids.ID{'t', 'x', 'I', 'D'}, + OutputIndex: index, + }, + }}, + }}} + tx.SetBytes(utils.RandomBytes(size), utils.RandomBytes(size)) + return tx } diff --git a/vms/avm/txs/mempool/mock_mempool.go b/vms/avm/txs/mempool/mock_mempool.go index e84f01e0875e..69860c38d5d3 100644 --- a/vms/avm/txs/mempool/mock_mempool.go +++ b/vms/avm/txs/mempool/mock_mempool.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/avm/txs/mempool (interfaces: Mempool) +// +// Generated by this command: +// +// mockgen -package=mempool -destination=vms/avm/txs/mempool/mock_mempool.go github.com/ava-labs/avalanchego/vms/avm/txs/mempool Mempool +// // Package mempool is a generated GoMock package. package mempool @@ -47,21 +49,22 @@ func (m *MockMempool) Add(arg0 *txs.Tx) error { } // Add indicates an expected call of Add. -func (mr *MockMempoolMockRecorder) Add(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) Add(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockMempool)(nil).Add), arg0) } // Get mocks base method. -func (m *MockMempool) Get(arg0 ids.ID) *txs.Tx { +func (m *MockMempool) Get(arg0 ids.ID) (*txs.Tx, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0) ret0, _ := ret[0].(*txs.Tx) - return ret0 + ret1, _ := ret[1].(bool) + return ret0, ret1 } // Get indicates an expected call of Get. -func (mr *MockMempoolMockRecorder) Get(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) Get(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockMempool)(nil).Get), arg0) } @@ -75,23 +78,35 @@ func (m *MockMempool) GetDropReason(arg0 ids.ID) error { } // GetDropReason indicates an expected call of GetDropReason. -func (mr *MockMempoolMockRecorder) GetDropReason(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) GetDropReason(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDropReason", reflect.TypeOf((*MockMempool)(nil).GetDropReason), arg0) } -// Has mocks base method. -func (m *MockMempool) Has(arg0 ids.ID) bool { +// Iterate mocks base method. +func (m *MockMempool) Iterate(arg0 func(*txs.Tx) bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Has", arg0) - ret0, _ := ret[0].(bool) + m.ctrl.Call(m, "Iterate", arg0) +} + +// Iterate indicates an expected call of Iterate. +func (mr *MockMempoolMockRecorder) Iterate(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterate", reflect.TypeOf((*MockMempool)(nil).Iterate), arg0) +} + +// Len mocks base method. +func (m *MockMempool) Len() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Len") + ret0, _ := ret[0].(int) return ret0 } -// Has indicates an expected call of Has. -func (mr *MockMempoolMockRecorder) Has(arg0 interface{}) *gomock.Call { +// Len indicates an expected call of Len. +func (mr *MockMempoolMockRecorder) Len() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockMempool)(nil).Has), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Len", reflect.TypeOf((*MockMempool)(nil).Len)) } // MarkDropped mocks base method. @@ -101,35 +116,40 @@ func (m *MockMempool) MarkDropped(arg0 ids.ID, arg1 error) { } // MarkDropped indicates an expected call of MarkDropped. -func (mr *MockMempoolMockRecorder) MarkDropped(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) MarkDropped(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkDropped", reflect.TypeOf((*MockMempool)(nil).MarkDropped), arg0, arg1) } // Peek mocks base method. -func (m *MockMempool) Peek(arg0 int) *txs.Tx { +func (m *MockMempool) Peek() (*txs.Tx, bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Peek", arg0) + ret := m.ctrl.Call(m, "Peek") ret0, _ := ret[0].(*txs.Tx) - return ret0 + ret1, _ := ret[1].(bool) + return ret0, ret1 } // Peek indicates an expected call of Peek. -func (mr *MockMempoolMockRecorder) Peek(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) Peek() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Peek", reflect.TypeOf((*MockMempool)(nil).Peek), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Peek", reflect.TypeOf((*MockMempool)(nil).Peek)) } // Remove mocks base method. -func (m *MockMempool) Remove(arg0 []*txs.Tx) { +func (m *MockMempool) Remove(arg0 ...*txs.Tx) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Remove", arg0) + varargs := []any{} + for _, a := range arg0 { + varargs = append(varargs, a) + } + m.ctrl.Call(m, "Remove", varargs...) } // Remove indicates an expected call of Remove. -func (mr *MockMempoolMockRecorder) Remove(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) Remove(arg0 ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockMempool)(nil).Remove), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockMempool)(nil).Remove), arg0...) } // RequestBuildBlock mocks base method. diff --git a/vms/avm/txs/mock_unsigned_tx.go b/vms/avm/txs/mock_unsigned_tx.go index c6504c7855a0..25bc9d501a16 100644 --- a/vms/avm/txs/mock_unsigned_tx.go +++ b/vms/avm/txs/mock_unsigned_tx.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/avm/txs (interfaces: UnsignedTx) +// Source: vms/avm/txs/tx.go +// +// Generated by this command: +// +// mockgen -source=vms/avm/txs/tx.go -destination=vms/avm/txs/mock_unsigned_tx.go -package=txs -exclude_interfaces= +// // Package txs is a generated GoMock package. package txs @@ -55,15 +57,15 @@ func (mr *MockUnsignedTxMockRecorder) Bytes() *gomock.Call { } // InitCtx mocks base method. -func (m *MockUnsignedTx) InitCtx(arg0 *snow.Context) { +func (m *MockUnsignedTx) InitCtx(ctx *snow.Context) { m.ctrl.T.Helper() - m.ctrl.Call(m, "InitCtx", arg0) + m.ctrl.Call(m, "InitCtx", ctx) } // InitCtx indicates an expected call of InitCtx. -func (mr *MockUnsignedTxMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { +func (mr *MockUnsignedTxMockRecorder) InitCtx(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockUnsignedTx)(nil).InitCtx), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockUnsignedTx)(nil).InitCtx), ctx) } // InputIDs mocks base method. @@ -109,27 +111,27 @@ func (mr *MockUnsignedTxMockRecorder) NumCredentials() *gomock.Call { } // SetBytes mocks base method. -func (m *MockUnsignedTx) SetBytes(arg0 []byte) { +func (m *MockUnsignedTx) SetBytes(unsignedBytes []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetBytes", arg0) + m.ctrl.Call(m, "SetBytes", unsignedBytes) } // SetBytes indicates an expected call of SetBytes. -func (mr *MockUnsignedTxMockRecorder) SetBytes(arg0 interface{}) *gomock.Call { +func (mr *MockUnsignedTxMockRecorder) SetBytes(unsignedBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBytes", reflect.TypeOf((*MockUnsignedTx)(nil).SetBytes), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBytes", reflect.TypeOf((*MockUnsignedTx)(nil).SetBytes), unsignedBytes) } // Visit mocks base method. -func (m *MockUnsignedTx) Visit(arg0 Visitor) error { +func (m *MockUnsignedTx) Visit(visitor Visitor) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Visit", arg0) + ret := m.ctrl.Call(m, "Visit", visitor) ret0, _ := ret[0].(error) return ret0 } // Visit indicates an expected call of Visit. -func (mr *MockUnsignedTxMockRecorder) Visit(arg0 interface{}) *gomock.Call { +func (mr *MockUnsignedTxMockRecorder) Visit(visitor any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Visit", reflect.TypeOf((*MockUnsignedTx)(nil).Visit), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Visit", reflect.TypeOf((*MockUnsignedTx)(nil).Visit), visitor) } diff --git a/vms/avm/txs/operation.go b/vms/avm/txs/operation.go index 4b4cb27aa46b..d37b162955c6 100644 --- a/vms/avm/txs/operation.go +++ b/vms/avm/txs/operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -48,16 +48,16 @@ type operationAndCodec struct { codec codec.Manager } -func (o *operationAndCodec) Less(other *operationAndCodec) bool { +func (o *operationAndCodec) Compare(other *operationAndCodec) int { oBytes, err := o.codec.Marshal(CodecVersion, o.op) if err != nil { - return false + return 0 } otherBytes, err := o.codec.Marshal(CodecVersion, other.op) if err != nil { - return false + return 0 } - return bytes.Compare(oBytes, otherBytes) == -1 + return bytes.Compare(oBytes, otherBytes) } func SortOperations(ops []*Operation, c codec.Manager) { diff --git a/vms/avm/txs/operation_test.go b/vms/avm/txs/operation_test.go index f0dc3ec3c742..3ca4676eb370 100644 --- a/vms/avm/txs/operation_test.go +++ b/vms/avm/txs/operation_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -79,7 +80,7 @@ func TestOperationVerify(t *testing.T) { func TestOperationSorting(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) require.NoError(c.RegisterType(&testOperable{})) m := codec.NewDefaultManager() diff --git a/vms/avm/txs/operation_tx.go b/vms/avm/txs/operation_tx.go index df143af30b36..8a1b261ca2dd 100644 --- a/vms/avm/txs/operation_tx.go +++ b/vms/avm/txs/operation_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/avm/txs/parser.go b/vms/avm/txs/parser.go index def42dfed501..979c71d8a7c8 100644 --- a/vms/avm/txs/parser.go +++ b/vms/avm/txs/parser.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -7,10 +7,10 @@ import ( "fmt" "math" "reflect" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/codec/reflectcodec" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -31,9 +31,6 @@ type Parser interface { ParseTx(bytes []byte) (*Tx, error) ParseGenesisTx(bytes []byte) (*Tx, error) - - InitializeTx(tx *Tx) error - InitializeGenesisTx(tx *Tx) error } type parser struct { @@ -43,8 +40,9 @@ type parser struct { gc linearcodec.Codec } -func NewParser(fxs []fxs.Fx) (Parser, error) { +func NewParser(durangoTime time.Time, fxs []fxs.Fx) (Parser, error) { return NewCustomParser( + durangoTime, make(map[reflect.Type]int), &mockable.Clock{}, logging.NoLog{}, @@ -53,13 +51,14 @@ func NewParser(fxs []fxs.Fx) (Parser, error) { } func NewCustomParser( + durangoTime time.Time, typeToFxIndex map[reflect.Type]int, clock *mockable.Clock, log logging.Logger, fxs []fxs.Fx, ) (Parser, error) { - gc := linearcodec.New([]string{reflectcodec.DefaultTagName}, 1<<20) - c := linearcodec.NewDefault() + gc := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault(durangoTime) gcm := codec.NewManager(math.MaxInt32) cm := codec.NewDefaultManager() @@ -130,14 +129,6 @@ func (p *parser) ParseGenesisTx(bytes []byte) (*Tx, error) { return parse(p.gcm, bytes) } -func (p *parser) InitializeTx(tx *Tx) error { - return initializeTx(p.cm, tx) -} - -func (p *parser) InitializeGenesisTx(tx *Tx) error { - return initializeTx(p.gcm, tx) -} - func parse(cm codec.Manager, signedBytes []byte) (*Tx, error) { tx := &Tx{} parsedVersion, err := cm.Unmarshal(signedBytes, tx) @@ -157,19 +148,3 @@ func parse(cm codec.Manager, signedBytes []byte) (*Tx, error) { tx.SetBytes(unsignedBytes, signedBytes) return tx, nil } - -func initializeTx(cm codec.Manager, tx *Tx) error { - signedBytes, err := cm.Marshal(CodecVersion, tx) - if err != nil { - return fmt.Errorf("problem creating transaction: %w", err) - } - - unsignedBytesLen, err := cm.Size(CodecVersion, &tx.Unsigned) - if err != nil { - return fmt.Errorf("couldn't calculate UnsignedTx marshal length: %w", err) - } - - unsignedBytes := signedBytes[:unsignedBytesLen] - tx.SetBytes(unsignedBytes, signedBytes) - return nil -} diff --git a/vms/avm/txs/tx.go b/vms/avm/txs/tx.go index 6025828a5dcb..42e845b07b15 100644 --- a/vms/avm/txs/tx.go +++ b/vms/avm/txs/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" @@ -19,6 +20,8 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +var _ gossip.Gossipable = (*Tx)(nil) + type UnsignedTx interface { snow.ContextInitializable @@ -75,6 +78,11 @@ func (t *Tx) ID() ids.ID { return t.TxID } +// GossipID returns the unique ID that this tx should use for mempool gossip +func (t *Tx) GossipID() ids.ID { + return t.TxID +} + // Bytes returns the binary representation of this tx func (t *Tx) Bytes() []byte { return t.bytes diff --git a/vms/avm/txs/visitor.go b/vms/avm/txs/visitor.go index 8de00c1bf35c..31eccb67834d 100644 --- a/vms/avm/txs/visitor.go +++ b/vms/avm/txs/visitor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/avm/utxo/spender.go b/vms/avm/utxo/spender.go index ed57549da306..02ade0d92eae 100644 --- a/vms/avm/utxo/spender.go +++ b/vms/avm/utxo/spender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utxo diff --git a/vms/avm/vm.go b/vms/avm/vm.go index cae4514ff3a6..2b3577082813 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -9,8 +9,7 @@ import ( "fmt" "net/http" "reflect" - - stdjson "encoding/json" + "sync" "github.com/gorilla/rpc/v2" @@ -38,7 +37,7 @@ import ( "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/metrics" "github.com/ava-labs/avalanchego/vms/avm/network" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" "github.com/ava-labs/avalanchego/vms/avm/utxo" @@ -59,7 +58,6 @@ var ( errIncompatibleFx = errors.New("incompatible feature extension") errUnknownFx = errors.New("unknown feature extension") errGenesisAssetMustHaveState = errors.New("genesis asset must have non-empty state") - errBootstrapping = errors.New("chain is currently bootstrapping") _ vertex.LinearizableVMWithEngine = (*VM)(nil) ) @@ -84,6 +82,8 @@ type VM struct { registerer prometheus.Registerer + connectedPeers map[ids.NodeID]*version.Application + parser block.Parser pubsub *pubsub.Server @@ -91,7 +91,7 @@ type VM struct { appSender common.AppSender // State management - state states.State + state state.State // Set to true once this VM is marked as `Bootstrapped` by the engine bootstrapped bool @@ -114,18 +114,37 @@ type VM struct { txBackend *txexecutor.Backend + // Cancelled on shutdown + onShutdownCtx context.Context + // Call [onShutdownCtxCancel] to cancel [onShutdownCtx] during Shutdown() + onShutdownCtxCancel context.CancelFunc + awaitShutdown sync.WaitGroup + + networkConfig network.Config // These values are only initialized after the chain has been linearized. blockbuilder.Builder chainManager blockexecutor.Manager - network network.Network + network *network.Network } -func (*VM) Connected(context.Context, ids.NodeID, *version.Application) error { - return nil +func (vm *VM) Connected(ctx context.Context, nodeID ids.NodeID, version *version.Application) error { + // If the chain isn't linearized yet, we must track the peers externally + // until the network is initialized. + if vm.network == nil { + vm.connectedPeers[nodeID] = version + return nil + } + return vm.network.Connected(ctx, nodeID, version) } -func (*VM) Disconnected(context.Context, ids.NodeID) error { - return nil +func (vm *VM) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + // If the chain isn't linearized yet, we must track the peers externally + // until the network is initialized. + if vm.network == nil { + delete(vm.connectedPeers, nodeID) + return nil + } + return vm.network.Disconnected(ctx, nodeID) } /* @@ -134,12 +153,6 @@ func (*VM) Disconnected(context.Context, ids.NodeID) error { ****************************************************************************** */ -type Config struct { - IndexTransactions bool `json:"index-transactions"` - IndexAllowIncomplete bool `json:"index-allow-incomplete"` - ChecksumsEnabled bool `json:"checksums-enabled"` -} - func (vm *VM) Initialize( _ context.Context, ctx *snow.Context, @@ -154,15 +167,13 @@ func (vm *VM) Initialize( noopMessageHandler := common.NewNoOpAppHandler(ctx.Log) vm.Atomic = network.NewAtomic(noopMessageHandler) - avmConfig := Config{} - if len(configBytes) > 0 { - if err := stdjson.Unmarshal(configBytes, &avmConfig); err != nil { - return err - } - ctx.Log.Info("VM config initialized", - zap.Reflect("config", avmConfig), - ) + avmConfig, err := ParseConfig(configBytes) + if err != nil { + return err } + ctx.Log.Info("VM config initialized", + zap.Reflect("config", avmConfig), + ) registerer := prometheus.NewRegistry() if err := ctx.Metrics.Register(registerer); err != nil { @@ -170,8 +181,9 @@ func (vm *VM) Initialize( } vm.registerer = registerer + vm.connectedPeers = make(map[ids.NodeID]*version.Application) + // Initialize metrics as soon as possible - var err error vm.metrics, err = metrics.New("", registerer) if err != nil { return fmt.Errorf("failed to initialize metrics: %w", err) @@ -207,6 +219,7 @@ func (vm *VM) Initialize( vm.typeToFxIndex = map[reflect.Type]int{} vm.parser, err = block.NewCustomParser( + vm.DurangoTime, vm.typeToFxIndex, &vm.clock, ctx.Log, @@ -220,7 +233,7 @@ func (vm *VM) Initialize( vm.AtomicUTXOManager = avax.NewAtomicUTXOManager(ctx.SharedMemory, codec) vm.Spender = utxo.NewSpender(&vm.clock, codec) - state, err := states.New( + state, err := state.New( vm.db, vm.parser, vm.registerer, @@ -264,6 +277,8 @@ func (vm *VM) Initialize( Bootstrapped: false, } + vm.onShutdownCtx, vm.onShutdownCtxCancel = context.WithCancel(context.Background()) + vm.networkConfig = avmConfig.Network return vm.state.Commit() } @@ -306,6 +321,9 @@ func (vm *VM) Shutdown(context.Context) error { return nil } + vm.onShutdownCtxCancel() + vm.awaitShutdown.Wait() + return utils.Err( vm.state.Close(), vm.baseDB.Close(), @@ -344,17 +362,6 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { }, err } -func (*VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, error) { - server := rpc.NewServer() - codec := json.NewCodec() - server.RegisterCodec(codec, "application/json") - server.RegisterCodec(codec, "application/json;charset=UTF-8") - staticService := CreateStaticService() - return map[string]http.Handler{ - "": server, - }, server.RegisterService(staticService, "avm") -} - /* ****************************************************************************** ********************************** Chain VM ********************************** @@ -396,7 +403,7 @@ func (*VM) VerifyHeightIndex(context.Context) error { ****************************************************************************** */ -func (vm *VM) Linearize(_ context.Context, stopVertexID ids.ID, toEngine chan<- common.Message) error { +func (vm *VM) Linearize(ctx context.Context, stopVertexID ids.ID, toEngine chan<- common.Message) error { time := version.GetCortinaTime(vm.ctx.NetworkID) err := vm.state.InitializeChainState(stopVertexID, time) if err != nil { @@ -424,19 +431,44 @@ func (vm *VM) Linearize(_ context.Context, stopVertexID ids.ID, toEngine chan<- mempool, ) - vm.network = network.New( + // Invariant: The context lock is not held when calling network.IssueTx. + vm.network, err = network.New( vm.ctx, vm.parser, - vm.chainManager, + network.NewLockedTxVerifier( + &vm.ctx.Lock, + vm.chainManager, + ), mempool, vm.appSender, + vm.registerer, + vm.networkConfig, ) + if err != nil { + return fmt.Errorf("failed to initialize network: %w", err) + } + + // Notify the network of our current peers + for nodeID, version := range vm.connectedPeers { + if err := vm.network.Connected(ctx, nodeID, version); err != nil { + return err + } + } + vm.connectedPeers = nil // Note: It's important only to switch the networking stack after the full // chainVM has been initialized. Traffic will immediately start being // handled asynchronously. vm.Atomic.Set(vm.network) + vm.awaitShutdown.Add(1) + go func() { + defer vm.awaitShutdown.Done() + + // Invariant: Gossip must never grab the context lock. + vm.network.Gossip(vm.onShutdownCtx) + }() + go func() { err := vm.state.Prune(&vm.ctx.Lock, vm.ctx.Log) if err != nil { @@ -477,32 +509,21 @@ func (vm *VM) ParseTx(_ context.Context, bytes []byte) (snowstorm.Tx, error) { ****************************************************************************** */ -// IssueTx attempts to send a transaction to consensus. -// If onDecide is specified, the function will be called when the transaction is -// either accepted or rejected with the appropriate status. This function will -// go out of scope when the transaction is removed from memory. -func (vm *VM) IssueTx(b []byte) (ids.ID, error) { - if !vm.bootstrapped || vm.Builder == nil { - return ids.ID{}, errBootstrapping - } - - tx, err := vm.parser.ParseTx(b) - if err != nil { - vm.ctx.Log.Debug("failed to parse tx", - zap.Error(err), - ) - return ids.ID{}, err - } - - err = vm.network.IssueTx(context.TODO(), tx) - if err != nil { +// issueTx attempts to send a transaction to consensus. +// +// Invariant: The context lock is not held +// Invariant: This function is only called after Linearize has been called. +func (vm *VM) issueTx(tx *txs.Tx) (ids.ID, error) { + txID := tx.ID() + err := vm.network.IssueTx(context.TODO(), tx) + if err != nil && !errors.Is(err, mempool.ErrDuplicateTx) { vm.ctx.Log.Debug("failed to add tx to mempool", + zap.Stringer("txID", txID), zap.Error(err), ) - return ids.ID{}, err + return txID, err } - - return tx.ID(), nil + return txID, nil } /* @@ -534,7 +555,7 @@ func (vm *VM) initGenesis(genesisBytes []byte) error { tx := &txs.Tx{ Unsigned: &genesisTx.CreateAssetTx, } - if err := vm.parser.InitializeGenesisTx(tx); err != nil { + if err := tx.Initialize(genesisCodec); err != nil { return err } diff --git a/vms/avm/vm_benchmark_test.go b/vms/avm/vm_benchmark_test.go index ba6ca3cef957..713f809f7f5c 100644 --- a/vms/avm/vm_benchmark_test.go +++ b/vms/avm/vm_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -82,7 +82,7 @@ func GetAllUTXOsBenchmark(b *testing.B, utxoCount int) { TxID: ids.GenerateTestID(), OutputIndex: rand.Uint32(), }, - Asset: avax.Asset{ID: ids.ID{'y', 'e', 'e', 't'}}, + Asset: avax.Asset{ID: env.vm.ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: 100000, OutputOwners: secp256k1fx.OutputOwners{ diff --git a/vms/avm/vm_regression_test.go b/vms/avm/vm_regression_test.go index 6c1dd1be1798..c6ac40df845d 100644 --- a/vms/avm/vm_regression_test.go +++ b/vms/avm/vm_regression_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -26,7 +26,9 @@ func TestVerifyFxUsage(t *testing.T) { env := setup(t, &envConfig{ vmStaticConfig: &config.Config{}, }) + env.vm.ctx.Lock.Unlock() defer func() { + env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) env.vm.ctx.Lock.Unlock() }() @@ -34,7 +36,7 @@ func TestVerifyFxUsage(t *testing.T) { createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Name: "Team Rocket", Symbol: "TR", @@ -66,13 +68,13 @@ func TestVerifyFxUsage(t *testing.T) { }, }, }} - require.NoError(env.vm.parser.InitializeTx(createAssetTx)) + require.NoError(createAssetTx.Initialize(env.vm.parser.Codec())) issueAndAccept(require, env.vm, env.issuer, createAssetTx) mintNFTTx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Ops: []*txs.Operation{{ Asset: avax.Asset{ID: createAssetTx.ID()}, @@ -95,7 +97,7 @@ func TestVerifyFxUsage(t *testing.T) { spendTx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ TxID: createAssetTx.ID(), diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index 3b4dc9558807..d8aeaf3b8743 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -16,6 +16,7 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/avm/config" @@ -32,12 +33,9 @@ func TestInvalidGenesis(t *testing.T) { require := require.New(t) vm := &VM{} - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - ctx.Lock.Unlock() - }() + defer ctx.Lock.Unlock() err := vm.Initialize( context.Background(), @@ -57,7 +55,7 @@ func TestInvalidFx(t *testing.T) { require := require.New(t) vm := &VM{} - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -85,7 +83,7 @@ func TestFxInitializationFailure(t *testing.T) { require := require.New(t) vm := &VM{} - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -118,12 +116,14 @@ func TestIssueTx(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{}) + env.vm.ctx.Lock.Unlock() defer func() { + env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) env.vm.ctx.Lock.Unlock() }() - tx := newTx(t, env.genesisBytes, env.vm, "AVAX") + tx := newTx(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.parser, "AVAX") issueAndAccept(require, env.vm, env.issuer, tx) } @@ -134,7 +134,9 @@ func TestIssueNFT(t *testing.T) { env := setup(t, &envConfig{ vmStaticConfig: &config.Config{}, }) + env.vm.ctx.Lock.Unlock() defer func() { + env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) env.vm.ctx.Lock.Unlock() }() @@ -142,7 +144,7 @@ func TestIssueNFT(t *testing.T) { createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Name: "Team Rocket", Symbol: "TR", @@ -167,13 +169,13 @@ func TestIssueNFT(t *testing.T) { }, }}, }} - require.NoError(env.vm.parser.InitializeTx(createAssetTx)) + require.NoError(createAssetTx.Initialize(env.vm.parser.Codec())) issueAndAccept(require, env.vm, env.issuer, createAssetTx) mintNFTTx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Ops: []*txs.Operation{{ Asset: avax.Asset{ID: createAssetTx.ID()}, @@ -198,7 +200,7 @@ func TestIssueNFT(t *testing.T) { Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Ops: []*txs.Operation{{ Asset: avax.Asset{ID: createAssetTx.ID()}, @@ -222,7 +224,7 @@ func TestIssueNFT(t *testing.T) { }, }, } - require.NoError(env.vm.parser.InitializeTx(transferNFTTx)) + require.NoError(transferNFTTx.Initialize(env.vm.parser.Codec())) issueAndAccept(require, env.vm, env.issuer, transferNFTTx) } @@ -237,7 +239,9 @@ func TestIssueProperty(t *testing.T) { Fx: &propertyfx.Fx{}, }}, }) + env.vm.ctx.Lock.Unlock() defer func() { + env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) env.vm.ctx.Lock.Unlock() }() @@ -245,7 +249,7 @@ func TestIssueProperty(t *testing.T) { createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Name: "Team Rocket", Symbol: "TR", @@ -262,13 +266,13 @@ func TestIssueProperty(t *testing.T) { }, }}, }} - require.NoError(env.vm.parser.InitializeTx(createAssetTx)) + require.NoError(createAssetTx.Initialize(env.vm.parser.Codec())) issueAndAccept(require, env.vm, env.issuer, createAssetTx) mintPropertyTx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Ops: []*txs.Operation{{ Asset: avax.Asset{ID: createAssetTx.ID()}, @@ -300,7 +304,7 @@ func TestIssueProperty(t *testing.T) { burnPropertyTx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Ops: []*txs.Operation{{ Asset: avax.Asset{ID: createAssetTx.ID()}, @@ -324,13 +328,15 @@ func TestIssueTxWithFeeAsset(t *testing.T) { env := setup(t, &envConfig{ isCustomFeeAsset: true, }) + env.vm.ctx.Lock.Unlock() defer func() { + env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) env.vm.ctx.Lock.Unlock() }() // send first asset - tx := newTx(t, env.genesisBytes, env.vm, feeAssetName) + tx := newTx(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.parser, feeAssetName) issueAndAccept(require, env.vm, env.issuer, tx) } @@ -340,7 +346,9 @@ func TestIssueTxWithAnotherAsset(t *testing.T) { env := setup(t, &envConfig{ isCustomFeeAsset: true, }) + env.vm.ctx.Lock.Unlock() defer func() { + env.vm.ctx.Lock.Lock() require.NoError(env.vm.Shutdown(context.Background())) env.vm.ctx.Lock.Unlock() }() @@ -352,7 +360,7 @@ func TestIssueTxWithAnotherAsset(t *testing.T) { tx := &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, Ins: []*avax.TransferableInput{ // fee asset { @@ -435,7 +443,7 @@ func TestTxAcceptAfterParseTx(t *testing.T) { firstTx := &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ TxID: env.genesisTx.ID(), @@ -468,7 +476,7 @@ func TestTxAcceptAfterParseTx(t *testing.T) { secondTx := &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ TxID: firstTx.ID(), @@ -538,7 +546,7 @@ func TestIssueImportTx(t *testing.T) { tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, Outs: []*avax.TransferableOutput{{ Asset: txAssetID, Out: &secp256k1fx.TransferOutput{ @@ -593,8 +601,12 @@ func TestIssueImportTx(t *testing.T) { }, })) + env.vm.ctx.Lock.Unlock() + issueAndAccept(require, env.vm, env.issuer, tx) + env.vm.ctx.Lock.Lock() + assertIndexedTX(t, env.vm.db, 0, key.PublicKey().Address(), txAssetID.AssetID(), tx.ID()) assertLatestIdx(t, env.vm.db, key.PublicKey().Address(), avaxID, 1) @@ -633,7 +645,7 @@ func TestForceAcceptImportTx(t *testing.T) { tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, Outs: []*avax.TransferableOutput{{ Asset: txAssetID, Out: &secp256k1fx.TransferOutput{ @@ -698,7 +710,7 @@ func TestIssueExportTx(t *testing.T) { tx := &txs.Tx{Unsigned: &txs.ExportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ TxID: avaxID, @@ -738,8 +750,12 @@ func TestIssueExportTx(t *testing.T) { require.NoError(err) require.Empty(utxoBytes) + env.vm.ctx.Lock.Unlock() + issueAndAccept(require, env.vm, env.issuer, tx) + env.vm.ctx.Lock.Lock() + utxoBytes, _, _, err = peerSharedMemory.Indexed( env.vm.ctx.ChainID, [][]byte{ @@ -770,7 +786,7 @@ func TestClearForceAcceptedExportTx(t *testing.T) { tx := &txs.Tx{Unsigned: &txs.ExportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ TxID: avaxID, @@ -813,8 +829,12 @@ func TestClearForceAcceptedExportTx(t *testing.T) { _, err := peerSharedMemory.Get(env.vm.ctx.ChainID, [][]byte{utxoID[:]}) require.ErrorIs(err, database.ErrNotFound) + env.vm.ctx.Lock.Unlock() + issueAndAccept(require, env.vm, env.issuer, tx) + env.vm.ctx.Lock.Lock() + _, err = peerSharedMemory.Get(env.vm.ctx.ChainID, [][]byte{utxoID[:]}) require.ErrorIs(err, database.ErrNotFound) diff --git a/vms/avm/wallet_client.go b/vms/avm/wallet_client.go index c74918e6e401..69bdc06f9d74 100644 --- a/vms/avm/wallet_client.go +++ b/vms/avm/wallet_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm diff --git a/vms/avm/wallet_service.go b/vms/avm/wallet_service.go index 47aa3bbf88ef..f10c6eee780b 100644 --- a/vms/avm/wallet_service.go +++ b/vms/avm/wallet_service.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( + "context" "errors" "fmt" "net/http" @@ -19,6 +20,7 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -31,29 +33,29 @@ type WalletService struct { } func (w *WalletService) decided(txID ids.ID) { - if _, ok := w.pendingTxs.Get(txID); !ok { + if !w.pendingTxs.Delete(txID) { return } w.vm.ctx.Log.Info("tx decided over wallet API", zap.Stringer("txID", txID), ) - w.pendingTxs.Delete(txID) - for { txID, tx, ok := w.pendingTxs.Oldest() if !ok { return } - txBytes := tx.Bytes() - _, err := w.vm.IssueTx(txBytes) + err := w.vm.network.IssueVerifiedTx(context.TODO(), tx) if err == nil { w.vm.ctx.Log.Info("issued tx to mempool over wallet API", zap.Stringer("txID", txID), ) return } + if errors.Is(err, mempool.ErrDuplicateTx) { + return + } w.pendingTxs.Delete(txID) w.vm.ctx.Log.Warn("dropping tx issued over wallet API", @@ -63,12 +65,7 @@ func (w *WalletService) decided(txID ids.ID) { } } -func (w *WalletService) issue(txBytes []byte) (ids.ID, error) { - tx, err := w.vm.parser.ParseTx(txBytes) - if err != nil { - return ids.ID{}, err - } - +func (w *WalletService) issue(tx *txs.Tx) (ids.ID, error) { txID := tx.ID() w.vm.ctx.Log.Info("issuing tx over wallet API", zap.Stringer("txID", txID), @@ -82,14 +79,17 @@ func (w *WalletService) issue(txBytes []byte) (ids.ID, error) { } if w.pendingTxs.Len() == 0 { - _, err := w.vm.IssueTx(txBytes) - if err != nil { - return ids.ID{}, err + if err := w.vm.network.IssueVerifiedTx(context.TODO(), tx); err == nil { + w.vm.ctx.Log.Info("issued tx to mempool over wallet API", + zap.Stringer("txID", txID), + ) + } else if !errors.Is(err, mempool.ErrDuplicateTx) { + w.vm.ctx.Log.Warn("failed to issue tx over wallet API", + zap.Stringer("txID", txID), + zap.Error(err), + ) + return ids.Empty, err } - - w.vm.ctx.Log.Info("issued tx to mempool over wallet API", - zap.Stringer("txID", txID), - ) } else { w.vm.ctx.Log.Info("enqueueing tx over wallet API", zap.Stringer("txID", txID), @@ -142,10 +142,15 @@ func (w *WalletService) IssueTx(_ *http.Request, args *api.FormattedTx, reply *a return fmt.Errorf("problem decoding transaction: %w", err) } + tx, err := w.vm.parser.ParseTx(txBytes) + if err != nil { + return err + } + w.vm.ctx.Lock.Lock() defer w.vm.ctx.Lock.Unlock() - txID, err := w.issue(txBytes) + txID, err := w.issue(tx) reply.TxID = txID return err } @@ -291,7 +296,7 @@ func (w *WalletService) SendMultiple(_ *http.Request, args *SendMultipleArgs, re codec := w.vm.parser.Codec() avax.SortTransferableOutputs(outs, codec) - tx := txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: w.vm.ctx.NetworkID, BlockchainID: w.vm.ctx.ChainID, Outs: outs, @@ -302,7 +307,7 @@ func (w *WalletService) SendMultiple(_ *http.Request, args *SendMultipleArgs, re return err } - txID, err := w.issue(tx.Bytes()) + txID, err := w.issue(tx) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } diff --git a/vms/avm/wallet_service_test.go b/vms/avm/wallet_service_test.go index 7a9232e00a6a..7ffdccdaaa20 100644 --- a/vms/avm/wallet_service_test.go +++ b/vms/avm/wallet_service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -39,7 +39,7 @@ func TestWalletService_SendMultiple(t *testing.T) { require.NoError(err) changeAddrStr, err := env.vm.FormatLocalAddress(testChangeAddr) require.NoError(err) - _, fromAddrsStr := sampleAddrs(t, env.vm, addrs) + _, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) args := &SendMultipleArgs{ JSONSpendHeader: api.JSONSpendHeader{ @@ -67,10 +67,10 @@ func TestWalletService_SendMultiple(t *testing.T) { require.NoError(env.walletService.SendMultiple(nil, args, reply)) require.Equal(changeAddrStr, reply.ChangeAddr) - env.vm.ctx.Lock.Lock() - buildAndAccept(require, env.vm, env.issuer, reply.TxID) + env.vm.ctx.Lock.Lock() + _, err = env.vm.state.GetTx(reply.TxID) require.NoError(err) }) diff --git a/vms/components/avax/addresses.go b/vms/components/avax/addresses.go index 40929e22f895..a1567f75f4d6 100644 --- a/vms/components/avax/addresses.go +++ b/vms/components/avax/addresses.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/vms/components/avax/asset.go b/vms/components/avax/asset.go index 90a3eef6dff9..bc165b4d59ed 100644 --- a/vms/components/avax/asset.go +++ b/vms/components/avax/asset.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/vms/components/avax/asset_test.go b/vms/components/avax/asset_test.go index 68c371ae1b06..ad7628ce98b9 100644 --- a/vms/components/avax/asset_test.go +++ b/vms/components/avax/asset_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -28,7 +29,7 @@ func TestAssetVerifyEmpty(t *testing.T) { func TestAssetID(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) manager := codec.NewDefaultManager() require.NoError(manager.RegisterCodec(codecVersion, c)) diff --git a/vms/components/avax/atomic_utxos.go b/vms/components/avax/atomic_utxos.go index 0d263213ac23..50d19b4f0586 100644 --- a/vms/components/avax/atomic_utxos.go +++ b/vms/components/avax/atomic_utxos.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/vms/components/avax/base_tx.go b/vms/components/avax/base_tx.go index 2bcafa24e497..5afed5f3a16d 100644 --- a/vms/components/avax/base_tx.go +++ b/vms/components/avax/base_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -65,3 +65,21 @@ func (t *BaseTx) Verify(ctx *snow.Context) error { return nil } } + +func VerifyMemoFieldLength(memo types.JSONByteSlice, isDurangoActive bool) error { + if !isDurangoActive { + // SyntacticVerify validates this field pre-Durango + return nil + } + + if len(memo) != 0 { + return fmt.Errorf( + "%w: %d > %d", + ErrMemoTooLarge, + len(memo), + 0, + ) + } + + return nil +} diff --git a/vms/components/avax/camino_timed_utxo_test.go b/vms/components/avax/camino_timed_utxo_test.go index 48f003e253ad..c30a6c0e6595 100644 --- a/vms/components/avax/camino_timed_utxo_test.go +++ b/vms/components/avax/camino_timed_utxo_test.go @@ -5,6 +5,7 @@ package avax import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -16,7 +17,7 @@ import ( ) func TestRewardUTXOSerializeC(t *testing.T) { - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) manager := codec.NewDefaultManager() errs := wrappers.Errs{} diff --git a/vms/components/avax/flow_checker.go b/vms/components/avax/flow_checker.go index b0ed8c86e551..e02aee717e3b 100644 --- a/vms/components/avax/flow_checker.go +++ b/vms/components/avax/flow_checker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/vms/components/avax/metadata.go b/vms/components/avax/metadata.go index f03389c46937..1630484131a8 100644 --- a/vms/components/avax/metadata.go +++ b/vms/components/avax/metadata.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/vms/components/avax/metadata_test.go b/vms/components/avax/metadata_test.go index 01dada2feda7..9569e3e3a465 100644 --- a/vms/components/avax/metadata_test.go +++ b/vms/components/avax/metadata_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/vms/components/avax/mock_atomic_utxos.go b/vms/components/avax/mock_atomic_utxos.go index 9623379c04aa..68f8b7d80f47 100644 --- a/vms/components/avax/mock_atomic_utxos.go +++ b/vms/components/avax/mock_atomic_utxos.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/avax (interfaces: AtomicUTXOManager) +// Source: vms/components/avax/atomic_utxos.go +// +// Generated by this command: +// +// mockgen -source=vms/components/avax/atomic_utxos.go -destination=vms/components/avax/mock_atomic_utxos.go -package=avax -exclude_interfaces= +// // Package avax is a generated GoMock package. package avax @@ -39,9 +41,9 @@ func (m *MockAtomicUTXOManager) EXPECT() *MockAtomicUTXOManagerMockRecorder { } // GetAtomicUTXOs mocks base method. -func (m *MockAtomicUTXOManager) GetAtomicUTXOs(arg0 ids.ID, arg1 set.Set[ids.ShortID], arg2 ids.ShortID, arg3 ids.ID, arg4 int) ([]*UTXO, ids.ShortID, ids.ID, error) { +func (m *MockAtomicUTXOManager) GetAtomicUTXOs(chainID ids.ID, addrs set.Set[ids.ShortID], startAddr ids.ShortID, startUTXOID ids.ID, limit int) ([]*UTXO, ids.ShortID, ids.ID, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAtomicUTXOs", arg0, arg1, arg2, arg3, arg4) + ret := m.ctrl.Call(m, "GetAtomicUTXOs", chainID, addrs, startAddr, startUTXOID, limit) ret0, _ := ret[0].([]*UTXO) ret1, _ := ret[1].(ids.ShortID) ret2, _ := ret[2].(ids.ID) @@ -50,7 +52,7 @@ func (m *MockAtomicUTXOManager) GetAtomicUTXOs(arg0 ids.ID, arg1 set.Set[ids.Sho } // GetAtomicUTXOs indicates an expected call of GetAtomicUTXOs. -func (mr *MockAtomicUTXOManagerMockRecorder) GetAtomicUTXOs(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockAtomicUTXOManagerMockRecorder) GetAtomicUTXOs(chainID, addrs, startAddr, startUTXOID, limit any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAtomicUTXOs", reflect.TypeOf((*MockAtomicUTXOManager)(nil).GetAtomicUTXOs), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAtomicUTXOs", reflect.TypeOf((*MockAtomicUTXOManager)(nil).GetAtomicUTXOs), chainID, addrs, startAddr, startUTXOID, limit) } diff --git a/vms/components/avax/mock_transferable_in.go b/vms/components/avax/mock_transferable_in.go index cfd7e419dc26..b4db89933057 100644 --- a/vms/components/avax/mock_transferable_in.go +++ b/vms/components/avax/mock_transferable_in.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/components/avax (interfaces: TransferableIn) +// +// Generated by this command: +// +// mockgen -package=avax -destination=vms/components/avax/mock_transferable_in.go github.com/ava-labs/avalanchego/vms/components/avax TransferableIn +// // Package avax is a generated GoMock package. package avax @@ -73,7 +75,7 @@ func (m *MockTransferableIn) InitCtx(arg0 *snow.Context) { } // InitCtx indicates an expected call of InitCtx. -func (mr *MockTransferableInMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { +func (mr *MockTransferableInMockRecorder) InitCtx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockTransferableIn)(nil).InitCtx), arg0) } diff --git a/vms/components/avax/mock_transferable_out.go b/vms/components/avax/mock_transferable_out.go index bf1836002214..b518b86302d4 100644 --- a/vms/components/avax/mock_transferable_out.go +++ b/vms/components/avax/mock_transferable_out.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/components/avax (interfaces: TransferableOut) +// +// Generated by this command: +// +// mockgen -package=avax -destination=vms/components/avax/mock_transferable_out.go github.com/ava-labs/avalanchego/vms/components/avax TransferableOut +// // Package avax is a generated GoMock package. package avax @@ -61,7 +63,7 @@ func (m *MockTransferableOut) InitCtx(arg0 *snow.Context) { } // InitCtx indicates an expected call of InitCtx. -func (mr *MockTransferableOutMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { +func (mr *MockTransferableOutMockRecorder) InitCtx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockTransferableOut)(nil).InitCtx), arg0) } @@ -79,3 +81,15 @@ func (mr *MockTransferableOutMockRecorder) Verify() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockTransferableOut)(nil).Verify)) } + +// isState mocks base method. +func (m *MockTransferableOut) isState() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "isState") +} + +// isState indicates an expected call of isState. +func (mr *MockTransferableOutMockRecorder) isState() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isState", reflect.TypeOf((*MockTransferableOut)(nil).isState)) +} diff --git a/vms/components/avax/state.go b/vms/components/avax/state.go index ab0d42ab080d..1a7616b2f729 100644 --- a/vms/components/avax/state.go +++ b/vms/components/avax/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/vms/components/avax/test_verifiable.go b/vms/components/avax/test_verifiable.go index 36ea7d57f8ff..8649b01eaa97 100644 --- a/vms/components/avax/test_verifiable.go +++ b/vms/components/avax/test_verifiable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/vms/components/avax/transferables.go b/vms/components/avax/transferables.go index 3134ac68ff4a..18e3cf77542c 100644 --- a/vms/components/avax/transferables.go +++ b/vms/components/avax/transferables.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -161,8 +161,8 @@ func (in *TransferableInput) Verify() error { } } -func (in *TransferableInput) Less(other *TransferableInput) bool { - return in.UTXOID.Less(&other.UTXOID) +func (in *TransferableInput) Compare(other *TransferableInput) int { + return in.UTXOID.Compare(&other.UTXOID) } type innerSortTransferableInputsWithSigners struct { diff --git a/vms/components/avax/transferables_test.go b/vms/components/avax/transferables_test.go index f0bd6332f78d..755a0124eb7b 100644 --- a/vms/components/avax/transferables_test.go +++ b/vms/components/avax/transferables_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -42,7 +43,7 @@ func TestTransferableOutputVerify(t *testing.T) { func TestTransferableOutputSorting(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) require.NoError(c.RegisterType(&TestTransferable{})) manager := codec.NewDefaultManager() require.NoError(manager.RegisterCodec(codecVersion, c)) @@ -84,7 +85,7 @@ func TestTransferableOutputSorting(t *testing.T) { func TestTransferableOutputSerialization(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) manager := codec.NewDefaultManager() require.NoError(manager.RegisterCodec(codecVersion, c)) @@ -175,7 +176,7 @@ func TestTransferableInputVerify(t *testing.T) { func TestTransferableInputSorting(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) require.NoError(c.RegisterType(&TestTransferable{})) ins := []*TransferableInput{ @@ -232,7 +233,7 @@ func TestTransferableInputSorting(t *testing.T) { func TestTransferableInputSerialization(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) require.NoError(c.RegisterType(&secp256k1fx.TransferInput{})) manager := codec.NewDefaultManager() require.NoError(manager.RegisterCodec(codecVersion, c)) diff --git a/vms/components/avax/utxo.go b/vms/components/avax/utxo.go index afea68914b58..a11c94af80ba 100644 --- a/vms/components/avax/utxo.go +++ b/vms/components/avax/utxo.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/vms/components/avax/utxo_fetching.go b/vms/components/avax/utxo_fetching.go index 1852513165a4..c5170f1d3123 100644 --- a/vms/components/avax/utxo_fetching.go +++ b/vms/components/avax/utxo_fetching.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/vms/components/avax/utxo_fetching_test.go b/vms/components/avax/utxo_fetching_test.go index ad34ee9e25b1..e36545c19cb7 100644 --- a/vms/components/avax/utxo_fetching_test.go +++ b/vms/components/avax/utxo_fetching_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -39,7 +40,7 @@ func TestFetchUTXOs(t *testing.T) { }, } - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) manager := codec.NewDefaultManager() require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) @@ -72,7 +73,7 @@ func TestGetPaginatedUTXOs(t *testing.T) { addr2 := ids.GenerateTestShortID() addrs := set.Of(addr0, addr1) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) manager := codec.NewDefaultManager() require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) diff --git a/vms/components/avax/utxo_handler.go b/vms/components/avax/utxo_handler.go index 782d8592e448..c6e705affa2a 100644 --- a/vms/components/avax/utxo_handler.go +++ b/vms/components/avax/utxo_handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/vms/components/avax/utxo_id.go b/vms/components/avax/utxo_id.go index 26fe8b83fb98..fafc940444b5 100644 --- a/vms/components/avax/utxo_id.go +++ b/vms/components/avax/utxo_id.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -91,16 +91,11 @@ func (utxo *UTXOID) Verify() error { } } -func (utxo *UTXOID) Less(other *UTXOID) bool { +func (utxo *UTXOID) Compare(other *UTXOID) int { utxoID, utxoIndex := utxo.InputSource() otherID, otherIndex := other.InputSource() - - switch bytes.Compare(utxoID[:], otherID[:]) { - case -1: - return true - case 0: - return utxoIndex < otherIndex - default: - return false + if txIDComp := bytes.Compare(utxoID[:], otherID[:]); txIDComp != 0 { + return txIDComp } + return utils.Compare(utxoIndex, otherIndex) } diff --git a/vms/components/avax/utxo_id_test.go b/vms/components/avax/utxo_id_test.go index 5652fa1afa69..fed21d5ce986 100644 --- a/vms/components/avax/utxo_id_test.go +++ b/vms/components/avax/utxo_id_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -6,6 +6,7 @@ package avax import ( "math" "testing" + "time" "github.com/stretchr/testify/require" @@ -23,7 +24,7 @@ func TestUTXOIDVerifyNil(t *testing.T) { func TestUTXOID(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) manager := codec.NewDefaultManager() require.NoError(manager.RegisterCodec(codecVersion, c)) @@ -50,56 +51,43 @@ func TestUTXOID(t *testing.T) { require.Equal(utxoID.InputID(), newUTXOID.InputID()) } -func TestUTXOIDLess(t *testing.T) { +func TestUTXOIDCompare(t *testing.T) { type test struct { name string id1 UTXOID id2 UTXOID - expected bool + expected int } tests := []*test{ { name: "same", id1: UTXOID{}, id2: UTXOID{}, - expected: false, + expected: 0, }, { - name: "first id smaller", + name: "id smaller", id1: UTXOID{}, id2: UTXOID{ TxID: ids.ID{1}, }, - expected: true, + expected: -1, }, { - name: "first id larger", - id1: UTXOID{ - TxID: ids.ID{1}, - }, - id2: UTXOID{}, - expected: false, - }, - { - name: "first index smaller", + name: "index smaller", id1: UTXOID{}, id2: UTXOID{ OutputIndex: 1, }, - expected: true, - }, - { - name: "first index larger", - id1: UTXOID{ - OutputIndex: 1, - }, - id2: UTXOID{}, - expected: false, + expected: -1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.expected, tt.id1.Less(&tt.id2)) + require := require.New(t) + + require.Equal(tt.expected, tt.id1.Compare(&tt.id2)) + require.Equal(-tt.expected, tt.id2.Compare(&tt.id1)) }) } } diff --git a/vms/components/avax/utxo_state.go b/vms/components/avax/utxo_state.go index 5ff20b38dddd..9bc648a6e531 100644 --- a/vms/components/avax/utxo_state.go +++ b/vms/components/avax/utxo_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/vms/components/avax/utxo_state_test.go b/vms/components/avax/utxo_state_test.go index e5fc8c695638..fa4c530e011a 100644 --- a/vms/components/avax/utxo_state_test.go +++ b/vms/components/avax/utxo_state_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -41,7 +42,7 @@ func TestUTXOState(t *testing.T) { } utxoID := utxo.InputID() - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) manager := codec.NewDefaultManager() require.NoError(c.RegisterType(&secp256k1fx.MintOutput{})) diff --git a/vms/components/avax/utxo_test.go b/vms/components/avax/utxo_test.go index 7561f85da2cc..a79c8fcb6cd6 100644 --- a/vms/components/avax/utxo_test.go +++ b/vms/components/avax/utxo_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -32,7 +33,7 @@ func TestUTXOVerifyEmpty(t *testing.T) { func TestUTXOSerialize(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) manager := codec.NewDefaultManager() require.NoError(c.RegisterType(&secp256k1fx.MintOutput{})) diff --git a/vms/components/chain/block.go b/vms/components/chain/block.go index d03659ed16da..3966dd2005bd 100644 --- a/vms/components/chain/block.go +++ b/vms/components/chain/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chain diff --git a/vms/components/chain/state.go b/vms/components/chain/state.go index 6311e550ce6c..6ada30e73eee 100644 --- a/vms/components/chain/state.go +++ b/vms/components/chain/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chain diff --git a/vms/components/chain/state_test.go b/vms/components/chain/state_test.go index add7723e9fcf..c0583011bd75 100644 --- a/vms/components/chain/state_test.go +++ b/vms/components/chain/state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chain diff --git a/vms/components/index/index.go b/vms/components/index/index.go index a1bced563979..18bdd7337b28 100644 --- a/vms/components/index/index.go +++ b/vms/components/index/index.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package index diff --git a/vms/components/index/metrics.go b/vms/components/index/metrics.go index a38006305548..8531de6982e3 100644 --- a/vms/components/index/metrics.go +++ b/vms/components/index/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package index diff --git a/vms/components/keystore/codec.go b/vms/components/keystore/codec.go index 5acb1725aa6e..15576b73e4ea 100644 --- a/vms/components/keystore/codec.go +++ b/vms/components/keystore/codec.go @@ -1,31 +1,28 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/utils" ) -const ( - // CodecVersion is the current default codec version - CodecVersion = 0 -) +const CodecVersion = 0 -// Codecs do serialization and deserialization var ( Codec codec.Manager LegacyCodec codec.Manager ) func init() { - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) Codec = codec.NewDefaultManager() - lc := linearcodec.NewCustomMaxLength(math.MaxUint32) + lc := linearcodec.NewDefault(time.Time{}) LegacyCodec = codec.NewManager(math.MaxInt32) err := utils.Err( diff --git a/vms/components/keystore/user.go b/vms/components/keystore/user.go index 561e2f52b819..20749e5b48bf 100644 --- a/vms/components/keystore/user.go +++ b/vms/components/keystore/user.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore diff --git a/vms/components/keystore/user_test.go b/vms/components/keystore/user_test.go index 9f94cf03b7c6..66e331c2f655 100644 --- a/vms/components/keystore/user_test.go +++ b/vms/components/keystore/user_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore diff --git a/vms/components/message/camino_codec.go b/vms/components/message/camino_codec.go deleted file mode 100644 index a6dbc60fd21c..000000000000 --- a/vms/components/message/camino_codec.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2022-2024, Chain4Travel AG. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -const ( - CodecVersion uint16 = 0 -) - -// Codec does serialization and deserialization -var Codec codec.Manager - -func init() { - Codec = codec.NewManager(maxMessageSize) - lc := linearcodec.NewCaminoDefault() - - errs := wrappers.Errs{} - errs.Add( - lc.RegisterType(&CaminoRewardMessage{}), - Codec.RegisterCodec(CodecVersion, lc), - ) - if errs.Errored() { - panic(errs.Err) - } -} diff --git a/vms/components/message/camino_readme.md b/vms/components/message/camino_readme.md new file mode 100644 index 000000000000..9d2f86211c1e --- /dev/null +++ b/vms/components/message/camino_readme.md @@ -0,0 +1,4 @@ +This codec types are only used in communication between nodes or node chains. + +`CaminoRewardMessage` is sent from C chain to P chain +`Tx` message is sent from one node to another (tx gossip) diff --git a/vms/components/message/codec.go b/vms/components/message/codec.go index 3a5eee5416ca..5d28233be3db 100644 --- a/vms/components/message/codec.go +++ b/vms/components/message/codec.go @@ -1,9 +1,21 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2024, Chain4Travel AG. All rights reserved. +// +// This file is a derived work, based on ava-labs code whose +// original notices appear below. +// +// It is distributed under the same license conditions as the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********************************************************** +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message import ( + "time" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/utils" @@ -11,21 +23,21 @@ import ( ) const ( - codecVersion = 0 + CodecVersion = 0 + maxMessageSize = 512 * units.KiB - maxSliceLen = maxMessageSize ) -// Codec does serialization and deserialization -var c codec.Manager +var Codec codec.Manager func init() { - c = codec.NewManager(maxMessageSize) - lc := linearcodec.NewCustomMaxLength(maxSliceLen) + Codec = codec.NewManager(maxMessageSize) + lc := linearcodec.NewDefault(time.Time{}) err := utils.Err( + lc.RegisterType(&CaminoRewardMessage{}), lc.RegisterType(&Tx{}), - c.RegisterCodec(codecVersion, lc), + Codec.RegisterCodec(CodecVersion, lc), ) if err != nil { panic(err) diff --git a/vms/components/message/handler.go b/vms/components/message/handler.go index afe123518164..2af2f55a3f0c 100644 --- a/vms/components/message/handler.go +++ b/vms/components/message/handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message diff --git a/vms/components/message/handler_test.go b/vms/components/message/handler_test.go index 489c973b68e0..bc2342838efa 100644 --- a/vms/components/message/handler_test.go +++ b/vms/components/message/handler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message diff --git a/vms/components/message/message.go b/vms/components/message/message.go index 009cf67f4884..a33d4104430a 100644 --- a/vms/components/message/message.go +++ b/vms/components/message/message.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -65,11 +65,11 @@ func Parse(bytes []byte) (Message, error) { // It must have been encoded with avalanchego's codec. // TODO remove else statement remove once all nodes support proto encoding. // i.e. when all nodes are on v1.11.0 or later. - version, err := c.Unmarshal(bytes, &msg) + version, err := Codec.Unmarshal(bytes, &msg) if err != nil { return nil, err } - if version != codecVersion { + if version != CodecVersion { return nil, ErrUnexpectedCodecVersion } } @@ -78,7 +78,7 @@ func Parse(bytes []byte) (Message, error) { } func Build(msg Message) ([]byte, error) { - bytes, err := c.Marshal(codecVersion, &msg) + bytes, err := Codec.Marshal(CodecVersion, &msg) msg.initialize(bytes) return bytes, err } diff --git a/vms/components/message/message_test.go b/vms/components/message/message_test.go index 38b5099e2b45..a4a12312cddb 100644 --- a/vms/components/message/message_test.go +++ b/vms/components/message/message_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message diff --git a/vms/components/message/tx.go b/vms/components/message/tx.go index 62fdb0dd54f8..4eced1818233 100644 --- a/vms/components/message/tx.go +++ b/vms/components/message/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message diff --git a/vms/components/message/tx_test.go b/vms/components/message/tx_test.go index 3634abb4c71f..8c52828b7977 100644 --- a/vms/components/message/tx_test.go +++ b/vms/components/message/tx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message diff --git a/vms/components/verify/mock_verifiable.go b/vms/components/verify/mock_verifiable.go index 915491142014..fe0e5770500c 100644 --- a/vms/components/verify/mock_verifiable.go +++ b/vms/components/verify/mock_verifiable.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/components/verify (interfaces: Verifiable) +// +// Generated by this command: +// +// mockgen -package=verify -destination=vms/components/verify/mock_verifiable.go github.com/ava-labs/avalanchego/vms/components/verify Verifiable +// // Package verify is a generated GoMock package. package verify diff --git a/vms/components/verify/subnet.go b/vms/components/verify/subnet.go index a1030164e145..ba4e65ee2bb3 100644 --- a/vms/components/verify/subnet.go +++ b/vms/components/verify/subnet.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package verify diff --git a/vms/components/verify/subnet_test.go b/vms/components/verify/subnet_test.go index 1bfc6c490350..bcffab905ed0 100644 --- a/vms/components/verify/subnet_test.go +++ b/vms/components/verify/subnet_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package verify diff --git a/vms/components/verify/verification.go b/vms/components/verify/verification.go index 566facbdf865..b712b730e8e8 100644 --- a/vms/components/verify/verification.go +++ b/vms/components/verify/verification.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package verify diff --git a/vms/components/verify/verification_test.go b/vms/components/verify/verification_test.go index 408fc2e94736..57f3b856b3a8 100644 --- a/vms/components/verify/verification_test.go +++ b/vms/components/verify/verification_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package verify diff --git a/vms/example/xsvm/README.md b/vms/example/xsvm/README.md index 6f30f7b7e756..53548228a989 100644 --- a/vms/example/xsvm/README.md +++ b/vms/example/xsvm/README.md @@ -18,7 +18,7 @@ Avalanche is a network composed of multiple sub-networks (called [subnets][Subne ## Introduction -Just as [Coreth] powers the [C-Chain], XSVM can be used to power its own blockchain in an Avalanche [Subnet]. Instead of providing a place to execute Solidity smart contracts, however, XSVM enables asset transfers for assets originating on it's own chain or other XSVM chains on other subnets. +Just as [Coreth] powers the [C-Chain], XSVM can be used to power its own blockchain in an Avalanche [Subnet]. Instead of providing a place to execute Solidity smart contracts, however, XSVM enables asset transfers for assets originating on its own chain or other XSVM chains on other subnets. ## How it Works diff --git a/vms/example/xsvm/api/client.go b/vms/example/xsvm/api/client.go index 785b092faed1..d9a6a711950a 100644 --- a/vms/example/xsvm/api/client.go +++ b/vms/example/xsvm/api/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package api @@ -170,7 +170,7 @@ func (c *client) IssueTx( newTx *tx.Tx, options ...rpc.Option, ) (ids.ID, error) { - txBytes, err := tx.Codec.Marshal(tx.Version, newTx) + txBytes, err := tx.Codec.Marshal(tx.CodecVersion, newTx) if err != nil { return ids.Empty, err } diff --git a/vms/example/xsvm/api/server.go b/vms/example/xsvm/api/server.go index 733cf4f2cc44..dd2545e88dcf 100644 --- a/vms/example/xsvm/api/server.go +++ b/vms/example/xsvm/api/server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package api diff --git a/vms/example/xsvm/block/block.go b/vms/example/xsvm/block/block.go index 2db314ea1a5a..ab6b41d77f2b 100644 --- a/vms/example/xsvm/block/block.go +++ b/vms/example/xsvm/block/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -26,7 +26,7 @@ func (b *Stateless) Time() time.Time { } func (b *Stateless) ID() (ids.ID, error) { - bytes, err := Codec.Marshal(Version, b) + bytes, err := Codec.Marshal(CodecVersion, b) return hashing.ComputeHash256Array(bytes), err } diff --git a/vms/example/xsvm/block/codec.go b/vms/example/xsvm/block/codec.go index 0ffbc98d58e7..b4e5c811e29f 100644 --- a/vms/example/xsvm/block/codec.go +++ b/vms/example/xsvm/block/codec.go @@ -1,11 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" -// Version is the current default codec version -const Version = tx.Version +const CodecVersion = tx.CodecVersion var Codec = tx.Codec diff --git a/vms/example/xsvm/builder/builder.go b/vms/example/xsvm/builder/builder.go index 7135985d3707..231679f5df56 100644 --- a/vms/example/xsvm/builder/builder.go +++ b/vms/example/xsvm/builder/builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder diff --git a/vms/example/xsvm/chain/block.go b/vms/example/xsvm/chain/block.go index 8eb660e23c12..8ab761d515f8 100644 --- a/vms/example/xsvm/chain/block.go +++ b/vms/example/xsvm/chain/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chain diff --git a/vms/example/xsvm/chain/chain.go b/vms/example/xsvm/chain/chain.go index ef6a59de2dbb..7fc60261d159 100644 --- a/vms/example/xsvm/chain/chain.go +++ b/vms/example/xsvm/chain/chain.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chain @@ -80,7 +80,7 @@ func (c *chain) NewBlock(blk *xsblock.Stateless) (Block, error) { return blk, nil } - blkBytes, err := xsblock.Codec.Marshal(xsblock.Version, blk) + blkBytes, err := xsblock.Codec.Marshal(xsblock.CodecVersion, blk) if err != nil { return nil, err } diff --git a/vms/example/xsvm/cmd/account/cmd.go b/vms/example/xsvm/cmd/account/cmd.go index 3436c3fea1b5..cea0b7b6a227 100644 --- a/vms/example/xsvm/cmd/account/cmd.go +++ b/vms/example/xsvm/cmd/account/cmd.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package account diff --git a/vms/example/xsvm/cmd/account/flags.go b/vms/example/xsvm/cmd/account/flags.go index 17092bbe1a21..3a9588ab2c63 100644 --- a/vms/example/xsvm/cmd/account/flags.go +++ b/vms/example/xsvm/cmd/account/flags.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package account diff --git a/vms/example/xsvm/cmd/chain/cmd.go b/vms/example/xsvm/cmd/chain/cmd.go index a87e6911b8a8..679bdea0b9f0 100644 --- a/vms/example/xsvm/cmd/chain/cmd.go +++ b/vms/example/xsvm/cmd/chain/cmd.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chain diff --git a/vms/example/xsvm/cmd/chain/create/cmd.go b/vms/example/xsvm/cmd/chain/create/cmd.go index 1f00491a4ce6..984ff45df8b0 100644 --- a/vms/example/xsvm/cmd/chain/create/cmd.go +++ b/vms/example/xsvm/cmd/chain/create/cmd.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package create @@ -55,7 +55,7 @@ func createFunc(c *cobra.Command, args []string) error { // Get the P-chain wallet pWallet := wallet.P() - genesisBytes, err := genesis.Codec.Marshal(genesis.Version, &genesis.Genesis{ + genesisBytes, err := genesis.Codec.Marshal(genesis.CodecVersion, &genesis.Genesis{ Timestamp: 0, Allocations: []genesis.Allocation{ { diff --git a/vms/example/xsvm/cmd/chain/create/flags.go b/vms/example/xsvm/cmd/chain/create/flags.go index 80b1eefd67cf..d3e554659e49 100644 --- a/vms/example/xsvm/cmd/chain/create/flags.go +++ b/vms/example/xsvm/cmd/chain/create/flags.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package create diff --git a/vms/example/xsvm/cmd/chain/genesis/cmd.go b/vms/example/xsvm/cmd/chain/genesis/cmd.go index ae18e1db85e3..be839fced200 100644 --- a/vms/example/xsvm/cmd/chain/genesis/cmd.go +++ b/vms/example/xsvm/cmd/chain/genesis/cmd.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -34,7 +34,7 @@ func genesisFunc(c *cobra.Command, args []string) error { return err } - genesisBytes, err := genesis.Codec.Marshal(genesis.Version, config.Genesis) + genesisBytes, err := genesis.Codec.Marshal(genesis.CodecVersion, config.Genesis) if err != nil { return err } diff --git a/vms/example/xsvm/cmd/chain/genesis/flags.go b/vms/example/xsvm/cmd/chain/genesis/flags.go index 5291327197d5..0bacf0edd29b 100644 --- a/vms/example/xsvm/cmd/chain/genesis/flags.go +++ b/vms/example/xsvm/cmd/chain/genesis/flags.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis diff --git a/vms/example/xsvm/cmd/issue/cmd.go b/vms/example/xsvm/cmd/issue/cmd.go index e973efc31d5b..12c156d06628 100644 --- a/vms/example/xsvm/cmd/issue/cmd.go +++ b/vms/example/xsvm/cmd/issue/cmd.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package issue diff --git a/vms/example/xsvm/cmd/issue/export/cmd.go b/vms/example/xsvm/cmd/issue/export/cmd.go index c0a8cd11008c..efde479971cc 100644 --- a/vms/example/xsvm/cmd/issue/export/cmd.go +++ b/vms/example/xsvm/cmd/issue/export/cmd.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package export diff --git a/vms/example/xsvm/cmd/issue/export/flags.go b/vms/example/xsvm/cmd/issue/export/flags.go index f14c21ef8f6f..6d7f4e49a233 100644 --- a/vms/example/xsvm/cmd/issue/export/flags.go +++ b/vms/example/xsvm/cmd/issue/export/flags.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package export diff --git a/vms/example/xsvm/cmd/issue/importtx/cmd.go b/vms/example/xsvm/cmd/issue/importtx/cmd.go index 2c8fbb7edb27..5bf104212ef6 100644 --- a/vms/example/xsvm/cmd/issue/importtx/cmd.go +++ b/vms/example/xsvm/cmd/issue/importtx/cmd.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package importtx diff --git a/vms/example/xsvm/cmd/issue/importtx/flags.go b/vms/example/xsvm/cmd/issue/importtx/flags.go index 486dfa1a05ea..15b968775fcc 100644 --- a/vms/example/xsvm/cmd/issue/importtx/flags.go +++ b/vms/example/xsvm/cmd/issue/importtx/flags.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package importtx diff --git a/vms/example/xsvm/cmd/issue/transfer/cmd.go b/vms/example/xsvm/cmd/issue/transfer/cmd.go index 5dd15bc4ea26..86c47032a6c0 100644 --- a/vms/example/xsvm/cmd/issue/transfer/cmd.go +++ b/vms/example/xsvm/cmd/issue/transfer/cmd.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package transfer diff --git a/vms/example/xsvm/cmd/issue/transfer/flags.go b/vms/example/xsvm/cmd/issue/transfer/flags.go index 5d4667b47458..043c07243e54 100644 --- a/vms/example/xsvm/cmd/issue/transfer/flags.go +++ b/vms/example/xsvm/cmd/issue/transfer/flags.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package transfer diff --git a/vms/example/xsvm/cmd/run/cmd.go b/vms/example/xsvm/cmd/run/cmd.go index 9776b15eb3a7..eace7e85d7ed 100644 --- a/vms/example/xsvm/cmd/run/cmd.go +++ b/vms/example/xsvm/cmd/run/cmd.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package run diff --git a/vms/example/xsvm/cmd/version/cmd.go b/vms/example/xsvm/cmd/version/cmd.go index 0827a9e800b7..1c956c6a9b00 100644 --- a/vms/example/xsvm/cmd/version/cmd.go +++ b/vms/example/xsvm/cmd/version/cmd.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version diff --git a/vms/example/xsvm/cmd/xsvm/main.go b/vms/example/xsvm/cmd/xsvm/main.go index ac370a4a9df2..c6961a8c1741 100644 --- a/vms/example/xsvm/cmd/xsvm/main.go +++ b/vms/example/xsvm/cmd/xsvm/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main diff --git a/vms/example/xsvm/constants.go b/vms/example/xsvm/constants.go index e4b22b742450..eb2199211ef7 100644 --- a/vms/example/xsvm/constants.go +++ b/vms/example/xsvm/constants.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package xsvm diff --git a/vms/example/xsvm/execute/block.go b/vms/example/xsvm/execute/block.go index dc9de45af19e..b2938a58f5e8 100644 --- a/vms/example/xsvm/execute/block.go +++ b/vms/example/xsvm/execute/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package execute @@ -62,7 +62,7 @@ func Block( return err } - blkBytes, err := xsblock.Codec.Marshal(xsblock.Version, blk) + blkBytes, err := xsblock.Codec.Marshal(xsblock.CodecVersion, blk) if err != nil { return err } diff --git a/vms/example/xsvm/execute/expects_context.go b/vms/example/xsvm/execute/expects_context.go index 0109cadc3731..da21b520f314 100644 --- a/vms/example/xsvm/execute/expects_context.go +++ b/vms/example/xsvm/execute/expects_context.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package execute diff --git a/vms/example/xsvm/execute/genesis.go b/vms/example/xsvm/execute/genesis.go index 312a7bd0b73c..889432d38256 100644 --- a/vms/example/xsvm/execute/genesis.go +++ b/vms/example/xsvm/execute/genesis.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package execute @@ -36,7 +36,7 @@ func Genesis(db database.KeyValueReaderWriterDeleter, chainID ids.ID, g *genesis return err } - blkBytes, err := block.Codec.Marshal(block.Version, blk) + blkBytes, err := block.Codec.Marshal(block.CodecVersion, blk) if err != nil { return err } diff --git a/vms/example/xsvm/execute/tx.go b/vms/example/xsvm/execute/tx.go index 01bfc1fb7d6d..f3f6ad504de4 100644 --- a/vms/example/xsvm/execute/tx.go +++ b/vms/example/xsvm/execute/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package execute diff --git a/vms/example/xsvm/factory.go b/vms/example/xsvm/factory.go index 0531a1bcc7d0..99d33b8290d1 100644 --- a/vms/example/xsvm/factory.go +++ b/vms/example/xsvm/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package xsvm diff --git a/vms/example/xsvm/genesis/codec.go b/vms/example/xsvm/genesis/codec.go index 6ef652864574..c0851cccf146 100644 --- a/vms/example/xsvm/genesis/codec.go +++ b/vms/example/xsvm/genesis/codec.go @@ -1,11 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis import "github.com/ava-labs/avalanchego/vms/example/xsvm/block" -// Version is the current default codec version -const Version = block.Version +const CodecVersion = block.CodecVersion var Codec = block.Codec diff --git a/vms/example/xsvm/genesis/genesis.go b/vms/example/xsvm/genesis/genesis.go index e8580ffef7f6..0fb420f3a4da 100644 --- a/vms/example/xsvm/genesis/genesis.go +++ b/vms/example/xsvm/genesis/genesis.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -26,7 +26,7 @@ func Parse(bytes []byte) (*Genesis, error) { } func Block(genesis *Genesis) (*block.Stateless, error) { - bytes, err := Codec.Marshal(Version, genesis) + bytes, err := Codec.Marshal(CodecVersion, genesis) if err != nil { return nil, err } diff --git a/vms/example/xsvm/genesis/genesis_test.go b/vms/example/xsvm/genesis/genesis_test.go index 511c5c9b0b8b..ba050d12f801 100644 --- a/vms/example/xsvm/genesis/genesis_test.go +++ b/vms/example/xsvm/genesis/genesis_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -26,7 +26,7 @@ func TestGenesis(t *testing.T) { {Address: id2, Balance: 3000000000}, }, } - bytes, err := Codec.Marshal(Version, genesis) + bytes, err := Codec.Marshal(CodecVersion, genesis) require.NoError(err) parsed, err := Parse(bytes) diff --git a/vms/example/xsvm/state/keys.go b/vms/example/xsvm/state/keys.go index 5e78f1b95a0e..e0df1e36cdc0 100644 --- a/vms/example/xsvm/state/keys.go +++ b/vms/example/xsvm/state/keys.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/example/xsvm/state/storage.go b/vms/example/xsvm/state/storage.go index c9b6bf1ae65d..48234e9678de 100644 --- a/vms/example/xsvm/state/storage.go +++ b/vms/example/xsvm/state/storage.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/example/xsvm/tx/codec.go b/vms/example/xsvm/tx/codec.go index c91a2165f1f6..f61c7bf18098 100644 --- a/vms/example/xsvm/tx/codec.go +++ b/vms/example/xsvm/tx/codec.go @@ -1,30 +1,30 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tx import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/utils" ) -// Version is the current default codec version -const Version = 0 +const CodecVersion = 0 var Codec codec.Manager func init() { - c := linearcodec.NewCustomMaxLength(math.MaxInt32) + c := linearcodec.NewDefault(time.Time{}) Codec = codec.NewManager(math.MaxInt32) err := utils.Err( c.RegisterType(&Transfer{}), c.RegisterType(&Export{}), c.RegisterType(&Import{}), - Codec.RegisterCodec(Version, c), + Codec.RegisterCodec(CodecVersion, c), ) if err != nil { panic(err) diff --git a/vms/example/xsvm/tx/export.go b/vms/example/xsvm/tx/export.go index 1192952c4dca..d8de16a69e7e 100644 --- a/vms/example/xsvm/tx/export.go +++ b/vms/example/xsvm/tx/export.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tx diff --git a/vms/example/xsvm/tx/import.go b/vms/example/xsvm/tx/import.go index 8449c79c55b2..ff98b0a01aa2 100644 --- a/vms/example/xsvm/tx/import.go +++ b/vms/example/xsvm/tx/import.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tx diff --git a/vms/example/xsvm/tx/payload.go b/vms/example/xsvm/tx/payload.go index e93e5f560ac5..eecc2f082d11 100644 --- a/vms/example/xsvm/tx/payload.go +++ b/vms/example/xsvm/tx/payload.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tx @@ -34,7 +34,7 @@ func NewPayload( Amount: amount, To: to, } - bytes, err := Codec.Marshal(Version, p) + bytes, err := Codec.Marshal(CodecVersion, p) p.bytes = bytes return p, err } diff --git a/vms/example/xsvm/tx/transfer.go b/vms/example/xsvm/tx/transfer.go index c895b5e5cc51..a3d29c1432c9 100644 --- a/vms/example/xsvm/tx/transfer.go +++ b/vms/example/xsvm/tx/transfer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tx diff --git a/vms/example/xsvm/tx/tx.go b/vms/example/xsvm/tx/tx.go index fae58bae0806..8b05d5375b27 100644 --- a/vms/example/xsvm/tx/tx.go +++ b/vms/example/xsvm/tx/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tx @@ -28,7 +28,7 @@ func Parse(bytes []byte) (*Tx, error) { } func Sign(utx Unsigned, key *secp256k1.PrivateKey) (*Tx, error) { - unsignedBytes, err := Codec.Marshal(Version, &utx) + unsignedBytes, err := Codec.Marshal(CodecVersion, &utx) if err != nil { return nil, err } @@ -46,12 +46,12 @@ func Sign(utx Unsigned, key *secp256k1.PrivateKey) (*Tx, error) { } func (tx *Tx) ID() (ids.ID, error) { - bytes, err := Codec.Marshal(Version, tx) + bytes, err := Codec.Marshal(CodecVersion, tx) return hashing.ComputeHash256Array(bytes), err } func (tx *Tx) SenderID() (ids.ShortID, error) { - unsignedBytes, err := Codec.Marshal(Version, &tx.Unsigned) + unsignedBytes, err := Codec.Marshal(CodecVersion, &tx.Unsigned) if err != nil { return ids.ShortEmpty, err } diff --git a/vms/example/xsvm/tx/unsigned.go b/vms/example/xsvm/tx/unsigned.go index 2c57f91e26fb..110611412b59 100644 --- a/vms/example/xsvm/tx/unsigned.go +++ b/vms/example/xsvm/tx/unsigned.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tx diff --git a/vms/example/xsvm/tx/visitor.go b/vms/example/xsvm/tx/visitor.go index b411e06f15b0..045b03247479 100644 --- a/vms/example/xsvm/tx/visitor.go +++ b/vms/example/xsvm/tx/visitor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tx diff --git a/vms/example/xsvm/vm.go b/vms/example/xsvm/vm.go index 3150fc59b481..f090ff468fe2 100644 --- a/vms/example/xsvm/vm.go +++ b/vms/example/xsvm/vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package xsvm @@ -112,10 +112,6 @@ func (*VM) Version(context.Context) (string, error) { return Version.String(), nil } -func (*VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, error) { - return nil, nil -} - func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { server := rpc.NewServer() server.RegisterCodec(json.NewCodec(), "application/json") diff --git a/vms/fx/factory.go b/vms/fx/factory.go new file mode 100644 index 000000000000..a2c957a5bf60 --- /dev/null +++ b/vms/fx/factory.go @@ -0,0 +1,9 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package fx + +// Factory returns an instance of a feature extension +type Factory interface { + New() any +} diff --git a/vms/manager.go b/vms/manager.go index d1041d3a4693..f4ae49e39cd0 100644 --- a/vms/manager.go +++ b/vms/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vms diff --git a/vms/metervm/batched_vm.go b/vms/metervm/batched_vm.go index dad17637a918..7b06f0989b89 100644 --- a/vms/metervm/batched_vm.go +++ b/vms/metervm/batched_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm diff --git a/vms/metervm/block.go b/vms/metervm/block.go index 17ffffd5f549..10d44d2c41b6 100644 --- a/vms/metervm/block.go +++ b/vms/metervm/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm diff --git a/vms/metervm/block_metrics.go b/vms/metervm/block_metrics.go index 094e41875aac..160d0eee50ad 100644 --- a/vms/metervm/block_metrics.go +++ b/vms/metervm/block_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm diff --git a/vms/metervm/block_vm.go b/vms/metervm/block_vm.go index 8f9fee1e247d..73e949180a96 100644 --- a/vms/metervm/block_vm.go +++ b/vms/metervm/block_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm diff --git a/vms/metervm/build_block_with_context_vm.go b/vms/metervm/build_block_with_context_vm.go index 141d68e0ceac..012237ee3178 100644 --- a/vms/metervm/build_block_with_context_vm.go +++ b/vms/metervm/build_block_with_context_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm diff --git a/vms/metervm/metrics.go b/vms/metervm/metrics.go index eb2c2b40736b..f79c5e38e029 100644 --- a/vms/metervm/metrics.go +++ b/vms/metervm/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm diff --git a/vms/metervm/state_syncable_vm.go b/vms/metervm/state_syncable_vm.go index bcb27d682b08..42b5efa8a79e 100644 --- a/vms/metervm/state_syncable_vm.go +++ b/vms/metervm/state_syncable_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm diff --git a/vms/metervm/vertex_metrics.go b/vms/metervm/vertex_metrics.go index 2a4b0a506151..67caa50b610e 100644 --- a/vms/metervm/vertex_metrics.go +++ b/vms/metervm/vertex_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm diff --git a/vms/metervm/vertex_vm.go b/vms/metervm/vertex_vm.go index 827bb535fcbd..8992b4863283 100644 --- a/vms/metervm/vertex_vm.go +++ b/vms/metervm/vertex_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm diff --git a/vms/mock_manager.go b/vms/mock_manager.go index 9726f085c513..cea232ba2a7d 100644 --- a/vms/mock_manager.go +++ b/vms/mock_manager.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms (interfaces: Factory,Manager) +// +// Generated by this command: +// +// mockgen -package=vms -destination=vms/mock_manager.go github.com/ava-labs/avalanchego/vms Factory,Manager +// // Package vms is a generated GoMock package. package vms @@ -40,16 +42,16 @@ func (m *MockFactory) EXPECT() *MockFactoryMockRecorder { } // New mocks base method. -func (m *MockFactory) New(arg0 logging.Logger) (interface{}, error) { +func (m *MockFactory) New(arg0 logging.Logger) (any, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "New", arg0) - ret0, _ := ret[0].(interface{}) + ret0, _ := ret[0].(any) ret1, _ := ret[1].(error) return ret0, ret1 } // New indicates an expected call of New. -func (mr *MockFactoryMockRecorder) New(arg0 interface{}) *gomock.Call { +func (mr *MockFactoryMockRecorder) New(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "New", reflect.TypeOf((*MockFactory)(nil).New), arg0) } @@ -86,7 +88,7 @@ func (m *MockManager) Alias(arg0 ids.ID, arg1 string) error { } // Alias indicates an expected call of Alias. -func (mr *MockManagerMockRecorder) Alias(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Alias(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Alias", reflect.TypeOf((*MockManager)(nil).Alias), arg0, arg1) } @@ -101,7 +103,7 @@ func (m *MockManager) Aliases(arg0 ids.ID) ([]string, error) { } // Aliases indicates an expected call of Aliases. -func (mr *MockManagerMockRecorder) Aliases(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Aliases(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aliases", reflect.TypeOf((*MockManager)(nil).Aliases), arg0) } @@ -116,7 +118,7 @@ func (m *MockManager) GetFactory(arg0 ids.ID) (Factory, error) { } // GetFactory indicates an expected call of GetFactory. -func (mr *MockManagerMockRecorder) GetFactory(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetFactory(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFactory", reflect.TypeOf((*MockManager)(nil).GetFactory), arg0) } @@ -146,7 +148,7 @@ func (m *MockManager) Lookup(arg0 string) (ids.ID, error) { } // Lookup indicates an expected call of Lookup. -func (mr *MockManagerMockRecorder) Lookup(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Lookup(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lookup", reflect.TypeOf((*MockManager)(nil).Lookup), arg0) } @@ -161,7 +163,7 @@ func (m *MockManager) PrimaryAlias(arg0 ids.ID) (string, error) { } // PrimaryAlias indicates an expected call of PrimaryAlias. -func (mr *MockManagerMockRecorder) PrimaryAlias(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) PrimaryAlias(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrimaryAlias", reflect.TypeOf((*MockManager)(nil).PrimaryAlias), arg0) } @@ -175,7 +177,7 @@ func (m *MockManager) PrimaryAliasOrDefault(arg0 ids.ID) string { } // PrimaryAliasOrDefault indicates an expected call of PrimaryAliasOrDefault. -func (mr *MockManagerMockRecorder) PrimaryAliasOrDefault(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) PrimaryAliasOrDefault(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrimaryAliasOrDefault", reflect.TypeOf((*MockManager)(nil).PrimaryAliasOrDefault), arg0) } @@ -189,7 +191,7 @@ func (m *MockManager) RegisterFactory(arg0 context.Context, arg1 ids.ID, arg2 Fa } // RegisterFactory indicates an expected call of RegisterFactory. -func (mr *MockManagerMockRecorder) RegisterFactory(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RegisterFactory(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterFactory", reflect.TypeOf((*MockManager)(nil).RegisterFactory), arg0, arg1, arg2) } @@ -201,7 +203,7 @@ func (m *MockManager) RemoveAliases(arg0 ids.ID) { } // RemoveAliases indicates an expected call of RemoveAliases. -func (mr *MockManagerMockRecorder) RemoveAliases(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RemoveAliases(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAliases", reflect.TypeOf((*MockManager)(nil).RemoveAliases), arg0) } diff --git a/vms/nftfx/credential.go b/vms/nftfx/credential.go index 64af78587282..a8970b854b1a 100644 --- a/vms/nftfx/credential.go +++ b/vms/nftfx/credential.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/vms/nftfx/credential_test.go b/vms/nftfx/credential_test.go index c1e83ccab00e..0f05af26a738 100644 --- a/vms/nftfx/credential_test.go +++ b/vms/nftfx/credential_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/vms/nftfx/factory.go b/vms/nftfx/factory.go index e52d629fe670..c8be03661bb2 100644 --- a/vms/nftfx/factory.go +++ b/vms/nftfx/factory.go @@ -1,16 +1,17 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/fx" ) +const Name = "nftfx" + var ( - _ vms.Factory = (*Factory)(nil) + _ fx.Factory = (*Factory)(nil) // ID that this Fx uses when labeled ID = ids.ID{'n', 'f', 't', 'f', 'x'} @@ -18,6 +19,6 @@ var ( type Factory struct{} -func (*Factory) New(logging.Logger) (interface{}, error) { - return &Fx{}, nil +func (*Factory) New() any { + return &Fx{} } diff --git a/vms/nftfx/factory_test.go b/vms/nftfx/factory_test.go index 10581c8db922..6b5ecafbeece 100644 --- a/vms/nftfx/factory_test.go +++ b/vms/nftfx/factory_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -7,15 +7,11 @@ import ( "testing" "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/utils/logging" ) func TestFactory(t *testing.T) { require := require.New(t) factory := Factory{} - fx, err := factory.New(logging.NoLog{}) - require.NoError(err) - require.NotNil(fx) + require.Equal(&Fx{}, factory.New()) } diff --git a/vms/nftfx/fx.go b/vms/nftfx/fx.go index f56ffcc10e5d..66ea9460b56b 100644 --- a/vms/nftfx/fx.go +++ b/vms/nftfx/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/vms/nftfx/fx_test.go b/vms/nftfx/fx_test.go index d054d680c305..1ed3426f5b11 100644 --- a/vms/nftfx/fx_test.go +++ b/vms/nftfx/fx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -39,7 +39,7 @@ var ( func TestFxInitialize(t *testing.T) { vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } fx := Fx{} @@ -56,7 +56,7 @@ func TestFxVerifyMintOperation(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -92,7 +92,7 @@ func TestFxVerifyMintOperationWrongTx(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -126,7 +126,7 @@ func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -157,7 +157,7 @@ func TestFxVerifyMintOperationWrongCredential(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -189,7 +189,7 @@ func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -220,7 +220,7 @@ func TestFxVerifyMintOperationFailingVerification(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -258,7 +258,7 @@ func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -296,7 +296,7 @@ func TestFxVerifyTransferOperation(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -346,7 +346,7 @@ func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -387,7 +387,7 @@ func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -435,7 +435,7 @@ func TestFxVerifyTransferOperationWrongGroupID(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -486,7 +486,7 @@ func TestFxVerifyTransferOperationWrongBytes(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -537,7 +537,7 @@ func TestFxVerifyTransferOperationTooSoon(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -589,7 +589,7 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -625,7 +625,7 @@ func TestFxVerifyTransfer(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) diff --git a/vms/nftfx/mint_operation.go b/vms/nftfx/mint_operation.go index 2d1c5bbb5d98..bb01ee52ed23 100644 --- a/vms/nftfx/mint_operation.go +++ b/vms/nftfx/mint_operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/vms/nftfx/mint_operation_test.go b/vms/nftfx/mint_operation_test.go index 4dc1ce2a3eb6..ff397e9a07a3 100644 --- a/vms/nftfx/mint_operation_test.go +++ b/vms/nftfx/mint_operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/vms/nftfx/mint_output.go b/vms/nftfx/mint_output.go index 4c2f53698307..e3a974379a16 100644 --- a/vms/nftfx/mint_output.go +++ b/vms/nftfx/mint_output.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/vms/nftfx/mint_output_test.go b/vms/nftfx/mint_output_test.go index 583f211f972f..9589fc17f75b 100644 --- a/vms/nftfx/mint_output_test.go +++ b/vms/nftfx/mint_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/vms/nftfx/transfer_operation.go b/vms/nftfx/transfer_operation.go index 010d43890f62..014cd900eca7 100644 --- a/vms/nftfx/transfer_operation.go +++ b/vms/nftfx/transfer_operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/vms/nftfx/transfer_operation_test.go b/vms/nftfx/transfer_operation_test.go index ad896b14ee7d..b8892aec63bd 100644 --- a/vms/nftfx/transfer_operation_test.go +++ b/vms/nftfx/transfer_operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/vms/nftfx/transfer_output.go b/vms/nftfx/transfer_output.go index c87d47984e39..e849e1c462e5 100644 --- a/vms/nftfx/transfer_output.go +++ b/vms/nftfx/transfer_output.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/vms/nftfx/transfer_output_test.go b/vms/nftfx/transfer_output_test.go index 330723144106..0effa6c393a7 100644 --- a/vms/nftfx/transfer_output_test.go +++ b/vms/nftfx/transfer_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/vms/platformvm/api/camino.go b/vms/platformvm/api/camino.go index c4b9662ef5b3..41bcfcf0a598 100644 --- a/vms/platformvm/api/camino.go +++ b/vms/platformvm/api/camino.go @@ -274,7 +274,7 @@ func buildCaminoGenesis(args *BuildGenesisArgs, reply *BuildGenesisReply) error } // Marshal genesis to bytes - bytes, err := genesis.Codec.Marshal(genesis.Version, g) + bytes, err := genesis.Codec.Marshal(genesis.CodecVersion, g) if err != nil { return fmt.Errorf("couldn't marshal genesis: %w", err) } @@ -287,7 +287,7 @@ func buildCaminoGenesis(args *BuildGenesisArgs, reply *BuildGenesisReply) error } func makeValidator( - vdr *PermissionlessValidator, + vdr *GenesisPermissionlessValidator, avaxAssetID ids.ID, networkID uint32, ) (*txs.Tx, error) { diff --git a/vms/platformvm/api/camino_test.go b/vms/platformvm/api/camino_test.go index d9d391739589..b86c08a1de8c 100644 --- a/vms/platformvm/api/camino_test.go +++ b/vms/platformvm/api/camino_test.go @@ -62,8 +62,8 @@ func TestBuildCaminoGenesis(t *testing.T) { Address: addrStr, Amount: 10, }}, - Validators: []PermissionlessValidator{{ - Staker: Staker{ + Validators: []GenesisPermissionlessValidator{{ + GenesisValidator: GenesisValidator{ StartTime: 0, EndTime: 20, NodeID: nodeID, @@ -347,9 +347,9 @@ func TestBuildCaminoGenesis(t *testing.T) { Amount: 10, }, }, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ { - Staker: Staker{ + GenesisValidator: GenesisValidator{ StartTime: 0, EndTime: 20, NodeID: nodeID, @@ -407,7 +407,7 @@ func TestBuildCaminoGenesis(t *testing.T) { Amount: 0, }, }, - Validators: []PermissionlessValidator{}, + Validators: []GenesisPermissionlessValidator{}, Time: 5, Encoding: formatting.Hex, Camino: &Camino{ @@ -435,9 +435,9 @@ func TestBuildCaminoGenesis(t *testing.T) { "Wrong Validator Number": { args: BuildGenesisArgs{ UTXOs: []UTXO{}, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ { - Staker: Staker{ + GenesisValidator: GenesisValidator{ StartTime: 0, EndTime: 20, NodeID: nodeID, @@ -483,9 +483,9 @@ func TestBuildCaminoGenesis(t *testing.T) { "Deposits and Staked Misalignment": { args: BuildGenesisArgs{ UTXOs: []UTXO{}, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ { - Staker: Staker{ + GenesisValidator: GenesisValidator{ StartTime: 0, EndTime: 20, NodeID: nodeID, @@ -530,9 +530,9 @@ func TestBuildCaminoGenesis(t *testing.T) { Amount: 0, }, }, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ { - Staker: Staker{ + GenesisValidator: GenesisValidator{ StartTime: 0, EndTime: 20, NodeID: nodeID, @@ -599,7 +599,7 @@ func TestBuildCaminoGenesis(t *testing.T) { expectedGenesis, err := tt.expectedGenesis(t) require.NoError(t, err) - bytes, err := genesis.Codec.Marshal(genesis.Version, expectedGenesis) + bytes, err := genesis.Codec.Marshal(genesis.CodecVersion, expectedGenesis) require.NoError(t, err) expectedReply.Bytes = string(bytes) diff --git a/vms/platformvm/api/static_client.go b/vms/platformvm/api/static_client.go deleted file mode 100644 index 9dea36664592..000000000000 --- a/vms/platformvm/api/static_client.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package api - -import ( - "context" - - "github.com/ava-labs/avalanchego/utils/rpc" -) - -var _ StaticClient = (*staticClient)(nil) - -// StaticClient for interacting with the platformvm static api -type StaticClient interface { - BuildGenesis( - ctx context.Context, - args *BuildGenesisArgs, - options ...rpc.Option, - ) (*BuildGenesisReply, error) -} - -// staticClient is an implementation of a platformvm client for interacting with -// the platformvm static api -type staticClient struct { - requester rpc.EndpointRequester -} - -// NewClient returns a platformvm client for interacting with the platformvm static api -func NewStaticClient(uri string) StaticClient { - return &staticClient{requester: rpc.NewEndpointRequester( - uri + "/ext/vm/platform", - )} -} - -func (c *staticClient) BuildGenesis( - ctx context.Context, - args *BuildGenesisArgs, - options ...rpc.Option, -) (resp *BuildGenesisReply, err error) { - resp = &BuildGenesisReply{} - err = c.requester.SendRequest(ctx, "platform.buildGenesis", args, resp, options...) - return resp, err -} diff --git a/vms/platformvm/api/static_service.go b/vms/platformvm/api/static_service.go index 53da746659b4..35bf3156018c 100644 --- a/vms/platformvm/api/static_service.go +++ b/vms/platformvm/api/static_service.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package api @@ -60,30 +60,25 @@ type UTXO struct { } // TODO can we define this on *UTXO? -func (utxo UTXO) Less(other UTXO) bool { - if utxo.Locktime < other.Locktime { - return true - } else if utxo.Locktime > other.Locktime { - return false +func (utxo UTXO) Compare(other UTXO) int { + if locktimeCmp := utils.Compare(utxo.Locktime, other.Locktime); locktimeCmp != 0 { + return locktimeCmp } - - if utxo.Amount < other.Amount { - return true - } else if utxo.Amount > other.Amount { - return false + if amountCmp := utils.Compare(utxo.Amount, other.Amount); amountCmp != 0 { + return amountCmp } utxoAddr, err := bech32ToID(utxo.Address) if err != nil { - return false + return 0 } otherAddr, err := bech32ToID(other.Address) if err != nil { - return false + return 0 } - return utxoAddr.Less(otherAddr) + return utxoAddr.Compare(otherAddr) } // TODO: Refactor APIStaker, APIValidators and merge them together for @@ -108,6 +103,9 @@ type Staker struct { StakeAmount *json.Uint64 `json:"stakeAmount,omitempty"` } +// GenesisValidator should to be used for genesis validators only. +type GenesisValidator Staker + // Owner is the repr. of a reward owner sent over APIs. type Owner struct { Locktime json.Uint64 `json:"locktime"` @@ -142,6 +140,16 @@ type PermissionlessValidator struct { Delegators *[]PrimaryDelegator `json:"delegators,omitempty"` } +// GenesisPermissionlessValidator should to be used for genesis validators only. +type GenesisPermissionlessValidator struct { + GenesisValidator + RewardOwner *Owner `json:"rewardOwner,omitempty"` + DelegationFee json.Float32 `json:"delegationFee"` + ExactDelegationFee *json.Uint32 `json:"exactDelegationFee,omitempty"` + Staked []UTXO `json:"staked,omitempty"` + Signer *signer.ProofOfPossession `json:"signer,omitempty"` +} + // PermissionedValidator is the repr. of a permissioned validator sent over APIs. type PermissionedValidator struct { Staker @@ -181,16 +189,16 @@ type Chain struct { // [Camino] are the camino specific genesis args. // [Time] is the Platform Chain's time at network genesis. type BuildGenesisArgs struct { - AvaxAssetID ids.ID `json:"avaxAssetID"` - NetworkID json.Uint32 `json:"networkID"` - UTXOs []UTXO `json:"utxos"` - Validators []PermissionlessValidator `json:"validators"` - Chains []Chain `json:"chains"` - Camino *Camino `json:"camino"` - Time json.Uint64 `json:"time"` - InitialSupply json.Uint64 `json:"initialSupply"` - Message string `json:"message"` - Encoding formatting.Encoding `json:"encoding"` + AvaxAssetID ids.ID `json:"avaxAssetID"` + NetworkID json.Uint32 `json:"networkID"` + UTXOs []UTXO `json:"utxos"` + Validators []GenesisPermissionlessValidator `json:"validators"` + Chains []Chain `json:"chains"` + Camino *Camino `json:"camino"` + Time json.Uint64 `json:"time"` + InitialSupply json.Uint64 `json:"initialSupply"` + Message string `json:"message"` + Encoding formatting.Encoding `json:"encoding"` } // BuildGenesisReply is the reply from BuildGenesis @@ -211,6 +219,7 @@ func bech32ToID(addrStr string) (ids.ShortID, error) { // BuildGenesis build the genesis state of the Platform Chain (and thereby the Avalanche network.) func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, reply *BuildGenesisReply) error { // Specify the UTXOs on the Platform chain that exist at genesis. + var vdrs txheap.TimedHeap if args.Camino != nil && args.Camino.LockModeBondDeposit { return buildCaminoGenesis(args, reply) } @@ -257,7 +266,7 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl } // Specify the validators that are validating the primary network at genesis. - vdrs := txheap.NewByEndTime() + vdrs = txheap.NewByEndTime() for _, vdr := range args.Validators { weight := uint64(0) stake := make([]*avax.TransferableOutput, len(vdr.Staked)) @@ -319,21 +328,39 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl delegationFee = uint32(*vdr.ExactDelegationFee) } - tx := &txs.Tx{Unsigned: &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + var ( + baseTx = txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: uint32(args.NetworkID), BlockchainID: ids.Empty, - }}, - Validator: txs.Validator{ + }} + validator = txs.Validator{ NodeID: vdr.NodeID, Start: uint64(args.Time), End: uint64(vdr.EndTime), Wght: weight, - }, - StakeOuts: stake, - RewardsOwner: owner, - DelegationShares: delegationFee, - }} + } + tx *txs.Tx + ) + if vdr.Signer == nil { + tx = &txs.Tx{Unsigned: &txs.AddValidatorTx{ + BaseTx: baseTx, + Validator: validator, + StakeOuts: stake, + RewardsOwner: owner, + DelegationShares: delegationFee, + }} + } else { + tx = &txs.Tx{Unsigned: &txs.AddPermissionlessValidatorTx{ + BaseTx: baseTx, + Validator: validator, + Signer: vdr.Signer, + StakeOuts: stake, + ValidatorRewardsOwner: owner, + DelegatorRewardsOwner: owner, + DelegationShares: delegationFee, + }} + } + if err := tx.Initialize(txs.GenesisCodec); err != nil { return err } @@ -381,7 +408,7 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl } // Marshal genesis to bytes - bytes, err := genesis.Codec.Marshal(genesis.Version, g) + bytes, err := genesis.Codec.Marshal(genesis.CodecVersion, g) if err != nil { return fmt.Errorf("couldn't marshal genesis: %w", err) } diff --git a/vms/platformvm/api/static_service_test.go b/vms/platformvm/api/static_service_test.go index 49822d9679d4..a0e62fa9a31f 100644 --- a/vms/platformvm/api/static_service_test.go +++ b/vms/platformvm/api/static_service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package api @@ -18,7 +18,7 @@ import ( func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1, 2, 3} + nodeID := ids.BuildTestNodeID([]byte{1, 2, 3}) addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) @@ -27,8 +27,8 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { Amount: 0, } weight := json.Uint64(987654321) - validator := PermissionlessValidator{ - Staker: Staker{ + validator := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ EndTime: 15, Weight: weight, NodeID: nodeID, @@ -47,7 +47,7 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { UTXOs: []UTXO{ utxo, }, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ validator, }, Time: 5, @@ -62,7 +62,7 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { func TestBuildGenesisInvalidStakeWeight(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1, 2, 3} + nodeID := ids.BuildTestNodeID([]byte{1, 2, 3}) addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) @@ -71,8 +71,8 @@ func TestBuildGenesisInvalidStakeWeight(t *testing.T) { Amount: 123456789, } weight := json.Uint64(0) - validator := PermissionlessValidator{ - Staker: Staker{ + validator := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ StartTime: 0, EndTime: 15, NodeID: nodeID, @@ -91,7 +91,7 @@ func TestBuildGenesisInvalidStakeWeight(t *testing.T) { UTXOs: []UTXO{ utxo, }, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ validator, }, Time: 5, @@ -106,7 +106,7 @@ func TestBuildGenesisInvalidStakeWeight(t *testing.T) { func TestBuildGenesisInvalidEndtime(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1, 2, 3} + nodeID := ids.BuildTestNodeID([]byte{1, 2, 3}) addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) @@ -116,8 +116,8 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { } weight := json.Uint64(987654321) - validator := PermissionlessValidator{ - Staker: Staker{ + validator := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ StartTime: 0, EndTime: 5, NodeID: nodeID, @@ -136,7 +136,7 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { UTXOs: []UTXO{ utxo, }, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ validator, }, Time: 5, @@ -151,7 +151,7 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { func TestBuildGenesisReturnsSortedValidators(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1} + nodeID := ids.BuildTestNodeID([]byte{1}) addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) @@ -161,8 +161,8 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { } weight := json.Uint64(987654321) - validator1 := PermissionlessValidator{ - Staker: Staker{ + validator1 := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ StartTime: 0, EndTime: 20, NodeID: nodeID, @@ -177,8 +177,8 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { }}, } - validator2 := PermissionlessValidator{ - Staker: Staker{ + validator2 := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ StartTime: 3, EndTime: 15, NodeID: nodeID, @@ -193,8 +193,8 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { }}, } - validator3 := PermissionlessValidator{ - Staker: Staker{ + validator3 := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ StartTime: 1, EndTime: 10, NodeID: nodeID, @@ -214,7 +214,7 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { UTXOs: []UTXO{ utxo, }, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ validator1, validator2, validator3, @@ -237,7 +237,7 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { require.Len(validators, 3) } -func TestUTXOLess(t *testing.T) { +func TestUTXOCompare(t *testing.T) { var ( smallerAddr = ids.ShortID{} largerAddr = ids.ShortID{1} @@ -251,72 +251,49 @@ func TestUTXOLess(t *testing.T) { name string utxo1 UTXO utxo2 UTXO - expected bool + expected int } tests := []test{ { name: "both empty", utxo1: UTXO{}, utxo2: UTXO{}, - expected: false, + expected: 0, }, { - name: "first locktime smaller", + name: "locktime smaller", utxo1: UTXO{}, utxo2: UTXO{ Locktime: 1, }, - expected: true, + expected: -1, }, { - name: "first locktime larger", - utxo1: UTXO{ - Locktime: 1, - }, - utxo2: UTXO{}, - expected: false, - }, - { - name: "first amount smaller", + name: "amount smaller", utxo1: UTXO{}, utxo2: UTXO{ Amount: 1, }, - expected: true, + expected: -1, }, { - name: "first amount larger", - utxo1: UTXO{ - Amount: 1, - }, - utxo2: UTXO{}, - expected: false, - }, - { - name: "first address smaller", + name: "address smaller", utxo1: UTXO{ Address: smallerAddrStr, }, utxo2: UTXO{ Address: largerAddrStr, }, - expected: true, - }, - { - name: "first address larger", - utxo1: UTXO{ - Address: largerAddrStr, - }, - utxo2: UTXO{ - Address: smallerAddrStr, - }, - expected: false, + expected: -1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.expected, tt.utxo1.Less(tt.utxo2)) + require := require.New(t) + + require.Equal(tt.expected, tt.utxo1.Compare(tt.utxo2)) + require.Equal(-tt.expected, tt.utxo2.Compare(tt.utxo1)) }) } } diff --git a/vms/platformvm/block/abort_block.go b/vms/platformvm/block/abort_block.go index cb8efcef144b..ace8087fd385 100644 --- a/vms/platformvm/block/abort_block.go +++ b/vms/platformvm/block/abort_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -43,7 +43,7 @@ func NewBanffAbortBlock( }, }, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } type ApricotAbortBlock struct { @@ -78,5 +78,5 @@ func NewApricotAbortBlock( Hght: height, }, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } diff --git a/vms/platformvm/block/abort_block_test.go b/vms/platformvm/block/abort_block_test.go index 149067aab8e4..a6517cef0137 100644 --- a/vms/platformvm/block/abort_block_test.go +++ b/vms/platformvm/block/abort_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/vms/platformvm/block/atomic_block.go b/vms/platformvm/block/atomic_block.go index dddddaa43685..35deda80c0b5 100644 --- a/vms/platformvm/block/atomic_block.go +++ b/vms/platformvm/block/atomic_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -52,5 +52,5 @@ func NewApricotAtomicBlock( }, Tx: tx, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } diff --git a/vms/platformvm/block/atomic_block_test.go b/vms/platformvm/block/atomic_block_test.go index 7436a0c24153..d81310184f23 100644 --- a/vms/platformvm/block/atomic_block_test.go +++ b/vms/platformvm/block/atomic_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/vms/platformvm/block/block.go b/vms/platformvm/block/block.go index 933667a83f68..30be125b8b43 100644 --- a/vms/platformvm/block/block.go +++ b/vms/platformvm/block/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -36,12 +36,14 @@ type BanffBlock interface { Timestamp() time.Time } -func initialize(blk Block) error { +func initialize(blk Block, commonBlk *CommonBlock) error { // We serialize this block as a pointer so that it can be deserialized into // a Block - bytes, err := Codec.Marshal(Version, &blk) + bytes, err := Codec.Marshal(CodecVersion, &blk) if err != nil { return fmt.Errorf("couldn't marshal block: %w", err) } - return blk.initialize(bytes) + + commonBlk.initialize(bytes) + return nil } diff --git a/vms/platformvm/block/builder/builder.go b/vms/platformvm/block/builder/builder.go index e65fae30f706..b198b3ccda20 100644 --- a/vms/platformvm/block/builder/builder.go +++ b/vms/platformvm/block/builder/builder.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder @@ -17,18 +17,19 @@ import ( "context" "errors" "fmt" + "sync" "time" "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/timer" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" @@ -44,52 +45,53 @@ const targetBlockSize = 128 * units.KiB var ( _ Builder = (*builder)(nil) - ErrEndOfTime = errors.New("program time is suspiciously far in the future") - ErrNoPendingBlocks = errors.New("no pending blocks") - ErrChainNotSynced = errors.New("chain not synced") + ErrEndOfTime = errors.New("program time is suspiciously far in the future") + ErrNoPendingBlocks = errors.New("no pending blocks") + errMissingPreferredState = errors.New("missing preferred block state") + errCalculatingNextStakerTime = errors.New("failed calculating next staker time") ) type Builder interface { mempool.Mempool - mempool.BlockTimer - Network - // set preferred block on top of which we'll build next - SetPreference(blockID ids.ID) + // StartBlockTimer starts to issue block creation requests to advance the + // chain timestamp. + StartBlockTimer() - // get preferred block on top of which we'll build next - Preferred() (snowman.Block, error) + // ResetBlockTimer forces the block timer to recalculate when it should + // advance the chain timestamp. + ResetBlockTimer() - // AddUnverifiedTx verifier the tx before adding it to mempool - AddUnverifiedTx(tx *txs.Tx) error + // ShutdownBlockTimer stops block creation requests to advance the chain + // timestamp. + // + // Invariant: Assumes the context lock is held when calling. + ShutdownBlockTimer() - // BuildBlock is called on timer clock to attempt to create - // next block + // BuildBlock can be called to attempt to create a new block BuildBlock(context.Context) (snowman.Block, error) - // Shutdown cleanly shuts Builder down - Shutdown() + // PackBlockTxs returns an array of txs that can fit into a valid block of + // size [targetBlockSize]. The returned txs are all verified against the + // preferred state. + // + // Note: This function does not call the consensus engine. + PackBlockTxs(targetBlockSize int) ([]*txs.Tx, error) } // builder implements a simple builder to convert txs into valid blocks type builder struct { mempool.Mempool - Network txBuilder txbuilder.Builder txExecutorBackend *txexecutor.Backend blkManager blockexecutor.Manager - // ID of the preferred block to build on top of - preferredBlockID ids.ID - - // channel to send messages to the consensus engine - toEngine chan<- common.Message - - // This timer goes off when it is time for the next validator to add/leave - // the validator set. When it goes off ResetTimer() is called, potentially - // triggering creation of a new block. - timer *timer.Timer + // resetTimer is used to signal that the block builder timer should update + // when it will trigger building of a block. + resetTimer chan struct{} + closed chan struct{} + closeOnce sync.Once } func New( @@ -97,139 +99,142 @@ func New( txBuilder txbuilder.Builder, txExecutorBackend *txexecutor.Backend, blkManager blockexecutor.Manager, - toEngine chan<- common.Message, - appSender common.AppSender, ) Builder { - builder := &builder{ + return &builder{ Mempool: mempool, txBuilder: txBuilder, txExecutorBackend: txExecutorBackend, blkManager: blkManager, - toEngine: toEngine, + resetTimer: make(chan struct{}, 1), + closed: make(chan struct{}), } - - builder.timer = timer.NewTimer(builder.setNextBuildBlockTime) - - builder.Network = NewNetwork( - txExecutorBackend.Ctx, - builder, - appSender, - ) - - go txExecutorBackend.Ctx.Log.RecoverAndPanic(builder.timer.Dispatch) - return builder } -func (b *builder) SetPreference(blockID ids.ID) { - if blockID == b.preferredBlockID { - // If the preference didn't change, then this is a noop - return - } - b.preferredBlockID = blockID - b.ResetBlockTimer() +func (b *builder) StartBlockTimer() { + go func() { + timer := time.NewTimer(0) + defer timer.Stop() + + for { + // Invariant: The [timer] is not stopped. + select { + case <-timer.C: + case <-b.resetTimer: + if !timer.Stop() { + <-timer.C + } + case <-b.closed: + return + } + + // Note: Because the context lock is not held here, it is possible + // that [ShutdownBlockTimer] is called concurrently with this + // execution. + for { + duration, err := b.durationToSleep() + if err != nil { + b.txExecutorBackend.Ctx.Log.Error("block builder encountered a fatal error", + zap.Error(err), + ) + return + } + + if duration > 0 { + timer.Reset(duration) + break + } + + // Block needs to be issued to advance time. + b.Mempool.RequestBuildBlock(true /*=emptyBlockPermitted*/) + + // Invariant: ResetBlockTimer is guaranteed to be called after + // [durationToSleep] returns a value <= 0. This is because we + // are guaranteed to attempt to build block. After building a + // valid block, the chain will have its preference updated which + // may change the duration to sleep and trigger a timer reset. + select { + case <-b.resetTimer: + case <-b.closed: + return + } + } + } + }() } -func (b *builder) Preferred() (snowman.Block, error) { - return b.blkManager.GetBlock(b.preferredBlockID) -} +func (b *builder) durationToSleep() (time.Duration, error) { + // Grabbing the lock here enforces that this function is not called mid-way + // through modifying of the state. + b.txExecutorBackend.Ctx.Lock.Lock() + defer b.txExecutorBackend.Ctx.Lock.Unlock() -// AddUnverifiedTx verifies a transaction and attempts to add it to the mempool -func (b *builder) AddUnverifiedTx(tx *txs.Tx) error { - if !b.txExecutorBackend.Bootstrapped.Get() { - return ErrChainNotSynced + // If [ShutdownBlockTimer] was called, we want to exit the block timer + // goroutine. We check this with the context lock held because + // [ShutdownBlockTimer] is expected to only be called with the context lock + // held. + select { + case <-b.closed: + return 0, nil + default: } - txID := tx.ID() - if b.Mempool.Has(txID) { - // If the transaction is already in the mempool - then it looks the same - // as if it was successfully added - return nil + preferredID := b.blkManager.Preferred() + preferredState, ok := b.blkManager.GetState(preferredID) + if !ok { + return 0, fmt.Errorf("%w: %s", errMissingPreferredState, preferredID) } - verifier := txexecutor.MempoolTxVerifier{ - Backend: b.txExecutorBackend, - ParentID: b.preferredBlockID, // We want to build off of the preferred block - StateVersions: b.blkManager, - Tx: tx, - } - if err := tx.Unsigned.Visit(&verifier); err != nil { - b.MarkDropped(txID, err) - return err + nextStakerChangeTime, err := txexecutor.GetNextStakerChangeTime(preferredState) + if err != nil { + return 0, fmt.Errorf("%w of %s: %w", errCalculatingNextStakerTime, preferredID, err) } - // If we are partially syncing the Primary Network, we should not be - // maintaining the transaction mempool locally. - if !b.txExecutorBackend.Config.PartialSyncPrimaryNetwork { - if err := b.Mempool.Add(tx); err != nil { - return err - } + now := b.txExecutorBackend.Clk.Time() + return nextStakerChangeTime.Sub(now), nil +} + +func (b *builder) ResetBlockTimer() { + // Ensure that the timer will be reset at least once. + select { + case b.resetTimer <- struct{}{}: + default: } - return b.GossipTx(tx) +} + +func (b *builder) ShutdownBlockTimer() { + b.closeOnce.Do(func() { + close(b.closed) + }) } // BuildBlock builds a block to be added to consensus. // This method removes the transactions from the returned // blocks from the mempool. func (b *builder) BuildBlock(context.Context) (snowman.Block, error) { - b.Mempool.DisableAdding() - defer func() { - b.Mempool.EnableAdding() - b.ResetBlockTimer() - }() + // If there are still transactions in the mempool, then we need to + // re-trigger block building. + defer b.Mempool.RequestBuildBlock(false /*=emptyBlockPermitted*/) - ctx := b.txExecutorBackend.Ctx - ctx.Log.Debug("starting to attempt to build a block") + b.txExecutorBackend.Ctx.Log.Debug("starting to attempt to build a block") - statelessBlk, err := b.buildBlock() - if err != nil { - return nil, err - } - - // Remove selected txs from mempool now that we are returning the block to - // the consensus engine. - txs := statelessBlk.Txs() - b.Mempool.Remove(txs) - return b.blkManager.NewBlock(statelessBlk), nil -} - -// Returns the block we want to build and issue. -// Only modifies state to remove expired proposal txs. -func (b *builder) buildBlock() (block.Block, error) { // Get the block to build on top of and retrieve the new block's context. - preferred, err := b.Preferred() + preferredID := b.blkManager.Preferred() + preferred, err := b.blkManager.GetBlock(preferredID) if err != nil { return nil, err } - preferredID := preferred.ID() nextHeight := preferred.Height() + 1 preferredState, ok := b.blkManager.GetState(preferredID) if !ok { return nil, fmt.Errorf("%w: %s", state.ErrMissingParentState, preferredID) } - timestamp := b.txExecutorBackend.Clk.Time() - if parentTime := preferred.Timestamp(); parentTime.After(timestamp) { - timestamp = parentTime - } - // [timestamp] = max(now, parentTime) - - nextStakerChangeTime, err := txexecutor.GetNextStakerChangeTime(preferredState) + timestamp, timeWasCapped, err := txexecutor.NextBlockTime(preferredState, b.txExecutorBackend.Clk) if err != nil { return nil, fmt.Errorf("could not calculate next staker change time: %w", err) } - // timeWasCapped means that [timestamp] was reduced to - // [nextStakerChangeTime]. It is used as a flag for [buildApricotBlock] to - // be willing to issue an advanceTimeTx. It is also used as a flag for - // [buildBanffBlock] to force the issuance of an empty block to advance - // the time forward; if there are no available transactions. - timeWasCapped := !timestamp.Before(nextStakerChangeTime) - if timeWasCapped { - timestamp = nextStakerChangeTime - } - // [timestamp] = min(max(now, parentTime), nextStakerChangeTime) - - return buildBlock( + statelessBlk, err := buildBlock( b, preferredID, nextHeight, @@ -237,114 +242,29 @@ func (b *builder) buildBlock() (block.Block, error) { timeWasCapped, preferredState, ) -} - -func (b *builder) Shutdown() { - // There is a potential deadlock if the timer is about to execute a timeout. - // So, the lock must be released before stopping the timer. - ctx := b.txExecutorBackend.Ctx - ctx.Lock.Unlock() - b.timer.Stop() - ctx.Lock.Lock() -} - -func (b *builder) ResetBlockTimer() { - // Next time the context lock is released, we can attempt to reset the block - // timer. - b.timer.SetTimeoutIn(0) -} - -// dropExpiredStakerTxs drops add validator/delegator transactions in the -// mempool whose start time is not sufficiently far in the future -// (i.e. within local time plus [MaxFutureStartFrom]). -func (b *builder) dropExpiredStakerTxs(timestamp time.Time) { - minStartTime := timestamp.Add(txexecutor.SyncBound) - for b.Mempool.HasStakerTx() { - tx := b.Mempool.PeekStakerTx() - startTime := tx.Unsigned.(txs.Staker).StartTime() - if !startTime.Before(minStartTime) { - // The next proposal tx in the mempool starts sufficiently far in - // the future. - return - } - - txID := tx.ID() - err := fmt.Errorf( - "synchrony bound (%s) is later than staker start time (%s)", - minStartTime, - startTime, - ) - - b.Mempool.Remove([]*txs.Tx{tx}) - b.Mempool.MarkDropped(txID, err) // cache tx as dropped - b.txExecutorBackend.Ctx.Log.Debug("dropping tx", - zap.Stringer("txID", txID), - zap.Error(err), - ) - } -} - -func (b *builder) setNextBuildBlockTime() { - ctx := b.txExecutorBackend.Ctx - - // Grabbing the lock here enforces that this function is not called mid-way - // through modifying of the state. - ctx.Lock.Lock() - defer ctx.Lock.Unlock() - - if !b.txExecutorBackend.Bootstrapped.Get() { - ctx.Log.Verbo("skipping block timer reset", - zap.String("reason", "not bootstrapped"), - ) - return + if err != nil { + return nil, err } - if _, err := b.buildBlock(); err == nil { - // We can build a block now - b.notifyBlockReady() - return - } + return b.blkManager.NewBlock(statelessBlk), nil +} - // Wake up when it's time to add/remove the next validator/delegator - preferredState, ok := b.blkManager.GetState(b.preferredBlockID) +func (b *builder) PackBlockTxs(targetBlockSize int) ([]*txs.Tx, error) { + preferredID := b.blkManager.Preferred() + preferredState, ok := b.blkManager.GetState(preferredID) if !ok { - // The preferred block should always be a decision block - ctx.Log.Error("couldn't get preferred block state", - zap.Stringer("preferredID", b.preferredBlockID), - zap.Stringer("lastAcceptedID", b.blkManager.LastAccepted()), - ) - return - } - - nextStakerChangeTime, err := txexecutor.GetNextStakerChangeTime(preferredState) - if err != nil { - ctx.Log.Error("couldn't get next staker change time", - zap.Stringer("preferredID", b.preferredBlockID), - zap.Stringer("lastAcceptedID", b.blkManager.LastAccepted()), - zap.Error(err), - ) - return + return nil, fmt.Errorf("%w: %s", errMissingPreferredState, preferredID) } - now := b.txExecutorBackend.Clk.Time() - waitTime := nextStakerChangeTime.Sub(now) - ctx.Log.Debug("setting next scheduled event", - zap.Time("nextEventTime", nextStakerChangeTime), - zap.Duration("timeUntil", waitTime), + return packBlockTxs( + preferredID, + preferredState, + b.Mempool, + b.txExecutorBackend, + b.blkManager, + b.txExecutorBackend.Clk.Time(), + targetBlockSize, ) - - // Wake up when it's time to add/remove the next validator - b.timer.SetTimeoutIn(waitTime) -} - -// notifyBlockReady tells the consensus engine that a new block is ready to be -// created -func (b *builder) notifyBlockReady() { - select { - case b.toEngine <- common.PendingTxs: - default: - b.txExecutorBackend.Ctx.Log.Debug("dropping message to consensus engine") - } } // [timestamp] is min(max(now, parent timestamp), next staker change time) @@ -369,11 +289,29 @@ func buildBlock( return nil, fmt.Errorf("could not build tx to reward staker: %w", err) } + var blockTxs []*txs.Tx + // TODO: Cleanup post-Durango + if builder.txExecutorBackend.Config.IsDurangoActivated(timestamp) { + blockTxs, err = packBlockTxs( + parentID, + parentState, + builder.Mempool, + builder.txExecutorBackend, + builder.blkManager, + timestamp, + targetBlockSize, + ) + if err != nil { + return nil, fmt.Errorf("failed to pack block txs: %w", err) + } + } + return block.NewBanffProposalBlock( timestamp, parentID, height, rewardValidatorTx, + blockTxs, ) } @@ -383,11 +321,21 @@ func buildBlock( return block, nil } - // Clean out the mempool's transactions with invalid timestamps. - builder.dropExpiredStakerTxs(timestamp) + blockTxs, err := packBlockTxs( + parentID, + parentState, + builder.Mempool, + builder.txExecutorBackend, + builder.blkManager, + timestamp, + targetBlockSize, + ) + if err != nil { + return nil, fmt.Errorf("failed to pack block txs: %w", err) + } // If there is no reason to build a block, don't. - if !builder.Mempool.HasTxs() && !forceAdvanceTime { + if len(blockTxs) == 0 && !forceAdvanceTime { builder.txExecutorBackend.Ctx.Log.Debug("no pending txs to issue into a block") return nil, ErrNoPendingBlocks } @@ -397,8 +345,90 @@ func buildBlock( timestamp, parentID, height, - builder.Mempool.PeekTxs(targetBlockSize), + blockTxs, + ) +} + +func packBlockTxs( + parentID ids.ID, + parentState state.Chain, + mempool mempool.Mempool, + backend *txexecutor.Backend, + manager blockexecutor.Manager, + timestamp time.Time, + remainingSize int, +) ([]*txs.Tx, error) { + stateDiff, err := state.NewDiffOn(parentState) + if err != nil { + return nil, err + } + + if _, err := txexecutor.AdvanceTimeTo(backend, stateDiff, timestamp); err != nil { + return nil, err + } + + var ( + blockTxs []*txs.Tx + inputs set.Set[ids.ID] ) + + for { + tx, exists := mempool.Peek() + if !exists { + break + } + txSize := len(tx.Bytes()) + if txSize > remainingSize { + break + } + mempool.Remove(tx) + + // Invariant: [tx] has already been syntactically verified. + + txDiff, err := state.NewDiffOn(stateDiff) + if err != nil { + return nil, err + } + + executor := &txexecutor.CaminoStandardTxExecutor{ + StandardTxExecutor: txexecutor.StandardTxExecutor{ + Backend: backend, + State: txDiff, + Tx: tx, + }, + } + + err = tx.Unsigned.Visit(executor) + if err != nil { + txID := tx.ID() + mempool.MarkDropped(txID, err) + continue + } + + if inputs.Overlaps(executor.Inputs) { + txID := tx.ID() + mempool.MarkDropped(txID, blockexecutor.ErrConflictingBlockTxs) + continue + } + err = manager.VerifyUniqueInputs(parentID, executor.Inputs) + if err != nil { + txID := tx.ID() + mempool.MarkDropped(txID, err) + continue + } + inputs.Union(executor.Inputs) + + txDiff.AddTx(tx, status.Committed) + err = txDiff.Apply(stateDiff) + if err != nil { + return nil, err + } + + remainingSize -= txSize + blockTxs = append(blockTxs, tx) + } + + return blockTxs, nil } // getNextStakerToReward returns the next staker txID to remove from the staking diff --git a/vms/platformvm/block/builder/builder_test.go b/vms/platformvm/block/builder/builder_test.go index bf754f928572..e3f75a65b0c5 100644 --- a/vms/platformvm/block/builder/builder_test.go +++ b/vms/platformvm/block/builder/builder_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder @@ -24,106 +24,503 @@ import ( "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" - txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" ) -var errTestingDropped = errors.New("testing dropped") +func TestBuildBlockBasic(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + // Create a valid transaction + tx, err := env.txBuilder.NewCreateChainTx( + testSubnet1.ID(), + nil, + constants.AVMID, + nil, + "chain name", + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + ids.ShortEmpty, + ) + require.NoError(err) + txID := tx.ID() -// shows that a locally generated CreateChainTx can be added to mempool and then -// removed by inclusion in a block -func TestBlockBuilderAddLocalTx(t *testing.T) { + // Issue the transaction + env.ctx.Lock.Unlock() + require.NoError(env.network.IssueTx(context.Background(), tx)) + env.ctx.Lock.Lock() + _, ok := env.mempool.Get(txID) + require.True(ok) + + // [BuildBlock] should build a block with the transaction + blkIntf, err := env.Builder.BuildBlock(context.Background()) + require.NoError(err) + + require.IsType(&blockexecutor.Block{}, blkIntf) + blk := blkIntf.(*blockexecutor.Block) + require.Len(blk.Txs(), 1) + require.Equal(txID, blk.Txs()[0].ID()) + + // Mempool should not contain the transaction or have marked it as dropped + _, ok = env.mempool.Get(txID) + require.False(ok) + require.NoError(env.mempool.GetDropReason(txID)) +} + +func TestBuildBlockDoesNotBuildWithEmptyMempool(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + tx, exists := env.mempool.Peek() + require.False(exists) + require.Nil(tx) + + // [BuildBlock] should not build an empty block + blk, err := env.Builder.BuildBlock(context.Background()) + require.ErrorIs(err, ErrNoPendingBlocks) + require.Nil(blk) +} + +func TestBuildBlockShouldReward(t *testing.T) { require := require.New(t) env := newEnvironment(t) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() + + var ( + now = env.backend.Clk.Time() + nodeID = ids.GenerateTestNodeID() - // add a tx to it - tx := getValidTx(env.txBuilder, t) + defaultValidatorStake = 100 * units.MilliAvax + validatorStartTime = now.Add(2 * txexecutor.SyncBound) + validatorEndTime = validatorStartTime.Add(360 * 24 * time.Hour) + ) + + // Create a valid [AddValidatorTx] + tx, err := env.txBuilder.NewAddValidatorTx( + defaultValidatorStake, + uint64(validatorStartTime.Unix()), + uint64(validatorEndTime.Unix()), + nodeID, + preFundedKeys[0].PublicKey().Address(), + reward.PercentDenominator, + []*secp256k1.PrivateKey{preFundedKeys[0]}, + preFundedKeys[0].PublicKey().Address(), + ) + require.NoError(err) txID := tx.ID() - env.sender.SendAppGossipF = func(context.Context, []byte) error { - return nil + // Issue the transaction + env.ctx.Lock.Unlock() + require.NoError(env.network.IssueTx(context.Background(), tx)) + env.ctx.Lock.Lock() + _, ok := env.mempool.Get(txID) + require.True(ok) + + // Build and accept a block with the tx + blk, err := env.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.IsType(&block.BanffStandardBlock{}, blk.(*blockexecutor.Block).Block) + require.Equal([]*txs.Tx{tx}, blk.(*blockexecutor.Block).Block.Txs()) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) + require.True(env.blkManager.SetPreference(blk.ID())) + + // Validator should now be current + staker, err := env.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.NoError(err) + require.Equal(txID, staker.TxID) + + // Should be rewarded at the end of staking period + env.backend.Clk.Set(validatorEndTime) + + for { + iter, err := env.state.GetCurrentStakerIterator() + require.NoError(err) + require.True(iter.Next()) + staker := iter.Value() + iter.Release() + + // Check that the right block was built + blk, err := env.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.NoError(blk.Verify(context.Background())) + require.IsType(&block.BanffProposalBlock{}, blk.(*blockexecutor.Block).Block) + + expectedTx, err := env.txBuilder.NewRewardValidatorTx(staker.TxID) + require.NoError(err) + require.Equal([]*txs.Tx{expectedTx}, blk.(*blockexecutor.Block).Block.Txs()) + + // Commit the [ProposalBlock] with a [CommitBlock] + proposalBlk, ok := blk.(snowman.OracleBlock) + require.True(ok) + options, err := proposalBlk.Options(context.Background()) + require.NoError(err) + + commit := options[0].(*blockexecutor.Block) + require.IsType(&block.BanffCommitBlock{}, commit.Block) + + require.NoError(blk.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) + require.NoError(commit.Accept(context.Background())) + require.True(env.blkManager.SetPreference(commit.ID())) + + // Stop rewarding once our staker is rewarded + if staker.TxID == txID { + break + } } - require.NoError(env.Builder.AddUnverifiedTx(tx)) - require.True(env.mempool.Has(txID)) - // show that build block include that tx and removes it from mempool + // Staking rewards should have been issued + rewardUTXOs, err := env.state.GetRewardUTXOs(txID) + require.NoError(err) + require.NotEmpty(rewardUTXOs) +} + +func TestBuildBlockAdvanceTime(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + var ( + now = env.backend.Clk.Time() + nextTime = now.Add(2 * txexecutor.SyncBound) + ) + + // Add a staker to [env.state] + env.state.PutCurrentValidator(&state.Staker{ + NextTime: nextTime, + Priority: txs.PrimaryNetworkValidatorCurrentPriority, + }) + + // Advance wall clock to [nextTime] + env.backend.Clk.Set(nextTime) + + // [BuildBlock] should build a block advancing the time to [NextTime] + blkIntf, err := env.Builder.BuildBlock(context.Background()) + require.NoError(err) + + require.IsType(&blockexecutor.Block{}, blkIntf) + blk := blkIntf.(*blockexecutor.Block) + require.Empty(blk.Txs()) + require.IsType(&block.BanffStandardBlock{}, blk.Block) + standardBlk := blk.Block.(*block.BanffStandardBlock) + require.Equal(nextTime.Unix(), standardBlk.Timestamp().Unix()) +} + +func TestBuildBlockForceAdvanceTime(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + // Create a valid transaction + tx, err := env.txBuilder.NewCreateChainTx( + testSubnet1.ID(), + nil, + constants.AVMID, + nil, + "chain name", + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + ids.ShortEmpty, + ) + require.NoError(err) + txID := tx.ID() + + // Issue the transaction + env.ctx.Lock.Unlock() + require.NoError(env.network.IssueTx(context.Background(), tx)) + env.ctx.Lock.Lock() + _, ok := env.mempool.Get(txID) + require.True(ok) + + var ( + now = env.backend.Clk.Time() + nextTime = now.Add(2 * txexecutor.SyncBound) + ) + + // Add a staker to [env.state] + env.state.PutCurrentValidator(&state.Staker{ + NextTime: nextTime, + Priority: txs.PrimaryNetworkValidatorCurrentPriority, + }) + + // Advance wall clock to [nextTime] + [txexecutor.SyncBound] + env.backend.Clk.Set(nextTime.Add(txexecutor.SyncBound)) + + // [BuildBlock] should build a block advancing the time to [nextTime], + // not the current wall clock. + blkIntf, err := env.Builder.BuildBlock(context.Background()) + require.NoError(err) + + require.IsType(&blockexecutor.Block{}, blkIntf) + blk := blkIntf.(*blockexecutor.Block) + require.Equal([]*txs.Tx{tx}, blk.Txs()) + require.IsType(&block.BanffStandardBlock{}, blk.Block) + standardBlk := blk.Block.(*block.BanffStandardBlock) + require.Equal(nextTime.Unix(), standardBlk.Timestamp().Unix()) +} + +func TestBuildBlockDropExpiredStakerTxs(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + // The [StartTime] in a staker tx is only validated pre-Durango. + // TODO: Delete this test post-Durango activation. + env.config.DurangoTime = mockable.MaxTime + + var ( + now = env.backend.Clk.Time() + defaultValidatorStake = 100 * units.MilliAvax + + // Add a validator with StartTime in the future within [MaxFutureStartTime] + validatorStartTime = now.Add(txexecutor.MaxFutureStartTime - 1*time.Second) + validatorEndTime = validatorStartTime.Add(360 * 24 * time.Hour) + ) + + tx1, err := env.txBuilder.NewAddValidatorTx( + defaultValidatorStake, + uint64(validatorStartTime.Unix()), + uint64(validatorEndTime.Unix()), + ids.GenerateTestNodeID(), + preFundedKeys[0].PublicKey().Address(), + reward.PercentDenominator, + []*secp256k1.PrivateKey{preFundedKeys[0]}, + preFundedKeys[0].PublicKey().Address(), + ) + require.NoError(err) + require.NoError(env.mempool.Add(tx1)) + tx1ID := tx1.ID() + _, ok := env.mempool.Get(tx1ID) + require.True(ok) + + // Add a validator with StartTime before current chain time + validator2StartTime := now.Add(-5 * time.Second) + validator2EndTime := validator2StartTime.Add(360 * 24 * time.Hour) + + tx2, err := env.txBuilder.NewAddValidatorTx( + defaultValidatorStake, + uint64(validator2StartTime.Unix()), + uint64(validator2EndTime.Unix()), + ids.GenerateTestNodeID(), + preFundedKeys[1].PublicKey().Address(), + reward.PercentDenominator, + []*secp256k1.PrivateKey{preFundedKeys[1]}, + preFundedKeys[1].PublicKey().Address(), + ) + require.NoError(err) + require.NoError(env.mempool.Add(tx2)) + tx2ID := tx2.ID() + _, ok = env.mempool.Get(tx2ID) + require.True(ok) + + // Add a validator with StartTime in the future past [MaxFutureStartTime] + validator3StartTime := now.Add(txexecutor.MaxFutureStartTime + 5*time.Second) + validator3EndTime := validator2StartTime.Add(360 * 24 * time.Hour) + + tx3, err := env.txBuilder.NewAddValidatorTx( + defaultValidatorStake, + uint64(validator3StartTime.Unix()), + uint64(validator3EndTime.Unix()), + ids.GenerateTestNodeID(), + preFundedKeys[2].PublicKey().Address(), + reward.PercentDenominator, + []*secp256k1.PrivateKey{preFundedKeys[2]}, + preFundedKeys[2].PublicKey().Address(), + ) + require.NoError(err) + require.NoError(env.mempool.Add(tx3)) + tx3ID := tx3.ID() + _, ok = env.mempool.Get(tx3ID) + require.True(ok) + + // Only tx1 should be in a built block blkIntf, err := env.Builder.BuildBlock(context.Background()) require.NoError(err) require.IsType(&blockexecutor.Block{}, blkIntf) blk := blkIntf.(*blockexecutor.Block) require.Len(blk.Txs(), 1) - require.Equal(txID, blk.Txs()[0].ID()) + require.Equal(tx1ID, blk.Txs()[0].ID()) + + // Mempool should have none of the txs + _, ok = env.mempool.Get(tx1ID) + require.False(ok) + _, ok = env.mempool.Get(tx2ID) + require.False(ok) + _, ok = env.mempool.Get(tx3ID) + require.False(ok) - require.False(env.mempool.Has(txID)) + // Only tx2 and tx3 should be dropped + require.NoError(env.mempool.GetDropReason(tx1ID)) + + tx2DropReason := env.mempool.GetDropReason(tx2ID) + require.ErrorIs(tx2DropReason, txexecutor.ErrTimestampNotBeforeStartTime) + + tx3DropReason := env.mempool.GetDropReason(tx3ID) + require.ErrorIs(tx3DropReason, txexecutor.ErrFutureStakeTime) } -func TestPreviouslyDroppedTxsCanBeReAddedToMempool(t *testing.T) { +func TestBuildBlockInvalidStakingDurations(t *testing.T) { require := require.New(t) env := newEnvironment(t) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() + + // Post-Durango, [StartTime] is no longer validated. Staking durations are + // based on the current chain timestamp and must be validated. + env.config.DurangoTime = time.Time{} + + var ( + now = env.backend.Clk.Time() + defaultValidatorStake = 100 * units.MilliAvax + + // Add a validator ending in [MaxStakeDuration] + validatorEndTime = now.Add(env.config.MaxStakeDuration) + ) + + tx1, err := env.txBuilder.NewAddValidatorTx( + defaultValidatorStake, + uint64(now.Unix()), + uint64(validatorEndTime.Unix()), + ids.GenerateTestNodeID(), + preFundedKeys[0].PublicKey().Address(), + reward.PercentDenominator, + []*secp256k1.PrivateKey{preFundedKeys[0]}, + preFundedKeys[0].PublicKey().Address(), + ) + require.NoError(err) + require.NoError(env.mempool.Add(tx1)) + tx1ID := tx1.ID() + _, ok := env.mempool.Get(tx1ID) + require.True(ok) + + // Add a validator ending past [MaxStakeDuration] + validator2EndTime := now.Add(env.config.MaxStakeDuration + time.Second) + + tx2, err := env.txBuilder.NewAddValidatorTx( + defaultValidatorStake, + uint64(now.Unix()), + uint64(validator2EndTime.Unix()), + ids.GenerateTestNodeID(), + preFundedKeys[2].PublicKey().Address(), + reward.PercentDenominator, + []*secp256k1.PrivateKey{preFundedKeys[2]}, + preFundedKeys[2].PublicKey().Address(), + ) + require.NoError(err) + require.NoError(env.mempool.Add(tx2)) + tx2ID := tx2.ID() + _, ok = env.mempool.Get(tx2ID) + require.True(ok) + + // Only tx1 should be in a built block since [MaxStakeDuration] is satisfied. + blkIntf, err := env.Builder.BuildBlock(context.Background()) + require.NoError(err) + + require.IsType(&blockexecutor.Block{}, blkIntf) + blk := blkIntf.(*blockexecutor.Block) + require.Len(blk.Txs(), 1) + require.Equal(tx1ID, blk.Txs()[0].ID()) + + // Mempool should have none of the txs + _, ok = env.mempool.Get(tx1ID) + require.False(ok) + _, ok = env.mempool.Get(tx2ID) + require.False(ok) + + // Only tx2 should be dropped + require.NoError(env.mempool.GetDropReason(tx1ID)) + + tx2DropReason := env.mempool.GetDropReason(tx2ID) + require.ErrorIs(tx2DropReason, txexecutor.ErrStakeTooLong) +} + +func TestPreviouslyDroppedTxsCannotBeReAddedToMempool(t *testing.T) { + require := require.New(t) - // create candidate tx - tx := getValidTx(env.txBuilder, t) + env := newEnvironment(t) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + // Create a valid transaction + tx, err := env.txBuilder.NewCreateChainTx( + testSubnet1.ID(), + nil, + constants.AVMID, + nil, + "chain name", + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + ids.ShortEmpty, + ) + require.NoError(err) txID := tx.ID() - // A tx simply added to mempool is obviously not marked as dropped - require.NoError(env.mempool.Add(tx)) - require.True(env.mempool.Has(txID)) - reason := env.mempool.GetDropReason(txID) - require.NoError(reason) + // Transaction should not be marked as dropped before being added to the + // mempool + require.NoError(env.mempool.GetDropReason(txID)) - // When a tx is marked as dropped, it is still available to allow re-issuance + // Mark the transaction as dropped + errTestingDropped := errors.New("testing dropped") env.mempool.MarkDropped(txID, errTestingDropped) - require.True(env.mempool.Has(txID)) // still available - reason = env.mempool.GetDropReason(txID) - require.ErrorIs(reason, errTestingDropped) - - // A previously dropped tx, popped then re-added to mempool, - // is not dropped anymore - env.mempool.Remove([]*txs.Tx{tx}) - require.NoError(env.mempool.Add(tx)) - - require.True(env.mempool.Has(txID)) - reason = env.mempool.GetDropReason(txID) - require.NoError(reason) + err = env.mempool.GetDropReason(txID) + require.ErrorIs(err, errTestingDropped) + + // Issue the transaction + env.ctx.Lock.Unlock() + err = env.network.IssueTx(context.Background(), tx) + require.ErrorIs(err, errTestingDropped) + env.ctx.Lock.Lock() + _, ok := env.mempool.Get(txID) + require.False(ok) + + // When issued again, the mempool should still be marked as dropped + err = env.mempool.GetDropReason(txID) + require.ErrorIs(err, errTestingDropped) } func TestNoErrorOnUnexpectedSetPreferenceDuringBootstrapping(t *testing.T) { + require := require.New(t) + env := newEnvironment(t) env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + env.isBootstrapped.Set(false) - env.ctx.Log = logging.NoWarn{} - defer func() { - require.NoError(t, shutdownEnvironment(env)) - }() - env.Builder.SetPreference(ids.GenerateTestID()) // should not panic + require.True(env.blkManager.SetPreference(ids.GenerateTestID())) // should not panic } func TestGetNextStakerToReward(t *testing.T) { + var ( + now = time.Now() + txID = ids.GenerateTestID() + ) + type test struct { name string timestamp time.Time @@ -133,10 +530,6 @@ func TestGetNextStakerToReward(t *testing.T) { expectedErr error } - var ( - now = time.Now() - txID = ids.GenerateTestID() - ) tests := []test{ { name: "end of time", @@ -308,392 +701,3 @@ func TestGetNextStakerToReward(t *testing.T) { }) } } - -func TestBuildBlock(t *testing.T) { - var ( - parentID = ids.GenerateTestID() - height = uint64(1337) - output = &avax.TransferableOutput{ - Asset: avax.Asset{ID: ids.GenerateTestID()}, - Out: &secp256k1fx.TransferOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{ids.GenerateTestShortID()}, - }, - }, - } - now = time.Now() - parentTimestamp = now.Add(-2 * time.Second) - transactions = []*txs.Tx{{ - Unsigned: &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - Ins: []*avax.TransferableInput{{ - Asset: avax.Asset{ID: ids.GenerateTestID()}, - In: &secp256k1fx.TransferInput{ - Input: secp256k1fx.Input{ - SigIndices: []uint32{0}, - }, - }, - }}, - Outs: []*avax.TransferableOutput{output}, - }}, - Validator: txs.Validator{ - // Shouldn't be dropped - Start: uint64(now.Add(2 * txexecutor.SyncBound).Unix()), - }, - StakeOuts: []*avax.TransferableOutput{output}, - RewardsOwner: &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{ids.GenerateTestShortID()}, - }, - }, - Creds: []verify.Verifiable{ - &secp256k1fx.Credential{ - Sigs: [][secp256k1.SignatureLen]byte{{1, 3, 3, 7}}, - }, - }, - }} - stakerTxID = ids.GenerateTestID() - ) - - type test struct { - name string - builderF func(*gomock.Controller) *builder - timestamp time.Time - forceAdvanceTime bool - parentStateF func(*gomock.Controller) state.Chain - expectedBlkF func(*require.Assertions) block.Block - expectedErr error - } - - tests := []test{ - { - name: "should reward", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // The tx builder should be asked to build a reward tx - txBuilder := txbuilder.NewMockBuilder(ctrl) - txBuilder.EXPECT().NewRewardValidatorTx(stakerTxID).Return(transactions[0], nil) - - return &builder{ - Mempool: mempool, - txBuilder: txBuilder, - } - }, - timestamp: parentTimestamp, - forceAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // add current validator that ends at [parentTimestamp] - // i.e. it should be rewarded - currentStakerIter := state.NewMockStakerIterator(ctrl) - currentStakerIter.EXPECT().Next().Return(true) - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - TxID: stakerTxID, - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - EndTime: parentTimestamp, - }) - currentStakerIter.EXPECT().Release() - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) - return s - }, - expectedBlkF: func(require *require.Assertions) block.Block { - expectedBlk, err := block.NewBanffProposalBlock( - parentTimestamp, - parentID, - height, - transactions[0], - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - { - name: "has decision txs", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // There are txs. - mempool.EXPECT().HasStakerTx().Return(false) - mempool.EXPECT().HasTxs().Return(true) - mempool.EXPECT().PeekTxs(targetBlockSize).Return(transactions) - return &builder{ - Mempool: mempool, - } - }, - timestamp: parentTimestamp, - forceAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime]. - // Next validator change time is in the future. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(time.Second), - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) block.Block { - expectedBlk, err := block.NewBanffStandardBlock( - parentTimestamp, - parentID, - height, - transactions, - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - { - name: "no stakers tx", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // There are no txs. - mempool.EXPECT().HasStakerTx().Return(false) - mempool.EXPECT().HasTxs().Return(false) - - clk := &mockable.Clock{} - clk.Set(now) - return &builder{ - Mempool: mempool, - txExecutorBackend: &txexecutor.Backend{ - Ctx: &snow.Context{ - Log: logging.NoLog{}, - }, - Clk: clk, - }, - } - }, - timestamp: parentTimestamp, - forceAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime]. - // Next validator change time is in the future. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(time.Second), - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(*require.Assertions) block.Block { - return nil - }, - expectedErr: ErrNoPendingBlocks, - }, - { - name: "should advance time", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // There are no txs. - mempool.EXPECT().HasStakerTx().Return(false) - mempool.EXPECT().HasTxs().Return(false) - mempool.EXPECT().PeekTxs(targetBlockSize).Return(nil) - - clk := &mockable.Clock{} - clk.Set(now) - return &builder{ - Mempool: mempool, - txExecutorBackend: &txexecutor.Backend{ - Clk: clk, - }, - } - }, - timestamp: now.Add(-1 * time.Second), - forceAdvanceTime: true, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // add current validator that ends at [now] - 1 second. - // That is, it ends in the past but after the current chain time. - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime] - // when determining whether to issue a reward tx. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(-1 * time.Second), - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) block.Block { - expectedBlk, err := block.NewBanffStandardBlock( - now.Add(-1*time.Second), // note the advanced time - parentID, - height, - nil, // empty block to advance time - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - { - name: "has a staker tx no force", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // There is a tx. - mempool.EXPECT().HasStakerTx().Return(false) - mempool.EXPECT().HasTxs().Return(true) - mempool.EXPECT().PeekTxs(targetBlockSize).Return([]*txs.Tx{transactions[0]}) - - clk := &mockable.Clock{} - clk.Set(now) - return &builder{ - Mempool: mempool, - txExecutorBackend: &txexecutor.Backend{ - Clk: clk, - }, - } - }, - timestamp: parentTimestamp, - forceAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime]. - // Next validator change time is in the future. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(time.Second), - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) block.Block { - expectedBlk, err := block.NewBanffStandardBlock( - parentTimestamp, - parentID, - height, - []*txs.Tx{transactions[0]}, - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - { - name: "has a staker tx with force", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // There are no decision txs - // There is a staker tx. - mempool.EXPECT().HasStakerTx().Return(false) - mempool.EXPECT().HasTxs().Return(true) - mempool.EXPECT().PeekTxs(targetBlockSize).Return([]*txs.Tx{transactions[0]}) - - clk := &mockable.Clock{} - clk.Set(now) - return &builder{ - Mempool: mempool, - txExecutorBackend: &txexecutor.Backend{ - Clk: clk, - }, - } - }, - timestamp: parentTimestamp, - forceAdvanceTime: true, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime]. - // Next validator change time is in the future. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(time.Second), - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) block.Block { - expectedBlk, err := block.NewBanffStandardBlock( - parentTimestamp, - parentID, - height, - []*txs.Tx{transactions[0]}, - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - - parentState := tt.parentStateF(ctrl).(*state.MockChain) - deferredStakerIter := state.NewMockStakerIterator(ctrl) - deferredStakerIter.EXPECT().Next().Return(false).AnyTimes() - deferredStakerIter.EXPECT().Release().AnyTimes() - parentState.EXPECT().GetDeferredStakerIterator().Return(deferredStakerIter, nil).AnyTimes() - - gotBlk, err := buildBlock( - tt.builderF(ctrl), - parentID, - height, - tt.timestamp, - tt.forceAdvanceTime, - parentState, - ) - if tt.expectedErr != nil { - require.ErrorIs(err, tt.expectedErr) - return - } - require.NoError(err) - require.Equal(tt.expectedBlkF(require), gotBlk) - }) - } -} diff --git a/vms/platformvm/block/builder/camino_builder.go b/vms/platformvm/block/builder/camino_builder.go index c1ee081b7cd5..974d4d20ef68 100644 --- a/vms/platformvm/block/builder/camino_builder.go +++ b/vms/platformvm/block/builder/camino_builder.go @@ -9,59 +9,13 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/platformvm/block" - blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" txBuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" - txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" ) -// Overriding axax block builder methods with caminoBuilder methods -// must be done with consideration, that network uses reference to avax builder, -// not to camino builder. So it will actually call avax builder methods. - -type caminoBuilder struct { - builder - caminoTxBuilder txBuilder.CaminoBuilder -} - -func CaminoNew( - mempool mempool.Mempool, - txBuilder txBuilder.CaminoBuilder, - txExecutorBackend *txexecutor.Backend, - blkManager blockexecutor.Manager, - toEngine chan<- common.Message, - appSender common.AppSender, -) Builder { - builder := &caminoBuilder{ - builder: builder{ - Mempool: mempool, - txExecutorBackend: txExecutorBackend, - blkManager: blkManager, - toEngine: toEngine, - txBuilder: txBuilder, - }, - caminoTxBuilder: txBuilder, - } - - builder.timer = timer.NewTimer(builder.setNextBuildBlockTime) - - builder.Network = NewCaminoNetwork( - txExecutorBackend.Ctx, - builder, - appSender, - builder.caminoTxBuilder, - ) - - go txExecutorBackend.Ctx.Log.RecoverAndPanic(builder.timer.Dispatch) - return builder -} - func caminoBuildBlock( builder *builder, parentID ids.ID, @@ -87,6 +41,13 @@ func caminoBuildBlock( return nil, fmt.Errorf("could not build tx to unlock deposits: %w", err) } + // User-signed unlockDepositTx with partial unlock and + // system-issued unlockDepositTx with full unlock for the same deposit + // will conflict with each other resulting in block rejection. + // After that, txs (depending on node config) could be re-added to mempool + // and this case could happen again. + // Because of this, we can't allow block with system unlockDepositTx contain other txs. + return block.NewBanffStandardBlock( timestamp, parentID, @@ -110,6 +71,14 @@ func caminoBuildBlock( return nil, fmt.Errorf("could not build tx to finish proposals: %w", err) } + // User-signed addVoteTx and system-issued finishProposalsTx for the same proposal + // will conflict with each other resulting either in block rejection or + // in possible unexpected proposal outcome (finishProposalsTx issuing desicion + // is happenning based on before this addVoteTx state). + // After that, if block is rejected, txs (depending on node config) could be re-added to mempool + // and this case could happen again. + // Because of this, we can't allow block with system finishProposalsTx contain other txs. + // FinishProposalsTx should never be in block with addVoteTx, // because it can affect state of proposals. return block.NewBanffStandardBlock( diff --git a/vms/platformvm/block/builder/helpers_test.go b/vms/platformvm/block/builder/helpers_test.go index 187c0eb92a70..f2d15195ad07 100644 --- a/vms/platformvm/block/builder/helpers_test.go +++ b/vms/platformvm/block/builder/helpers_test.go @@ -1,11 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder import ( "context" - "errors" "testing" "time" @@ -24,6 +23,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" @@ -40,6 +40,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" + "github.com/ava-labs/avalanchego/vms/platformvm/network" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" @@ -68,17 +69,22 @@ var ( defaultMinValidatorStake = 5 * units.MilliAvax defaultBalance = 100 * defaultMinValidatorStake preFundedKeys = secp256k1.TestKeys() - avaxAssetID = ids.ID{'y', 'e', 'e', 't'} defaultTxFee = uint64(100) - xChainID = ids.Empty.Prefix(0) - cChainID = ids.Empty.Prefix(1) testSubnet1 *txs.Tx testSubnet1ControlKeys = preFundedKeys[0:3] - errMissing = errors.New("missing") + // Node IDs of genesis validators. Initialized in init function + genesisNodeIDs []ids.NodeID ) +func init() { + genesisNodeIDs = make([]ids.NodeID, len(preFundedKeys)) + for i := range preFundedKeys { + genesisNodeIDs[i] = ids.GenerateTestNodeID() + } +} + type mutableSharedMemory struct { atomic.SharedMemory } @@ -87,6 +93,7 @@ type environment struct { Builder blkManager blockexecutor.Manager mempool mempool.Mempool + network network.Network sender *common.SenderTest isBootstrapped *utils.Atomic[bool] @@ -115,7 +122,14 @@ func newEnvironment(t *testing.T) *environment { res.isBootstrapped.Set(true) res.baseDB = versiondb.New(memdb.New()) - res.ctx, res.msm = defaultCtx(res.baseDB) + atomicDB := prefixdb.New([]byte{1}, res.baseDB) + m := atomic.NewMemory(atomicDB) + + res.ctx = snowtest.Context(t, snowtest.PChainID) + res.msm = &mutableSharedMemory{ + SharedMemory: m.NewSharedMemory(res.ctx.ChainID), + } + res.ctx.SharedMemory = res.msm res.ctx.Lock.Lock() defer res.ctx.Lock.Unlock() @@ -153,11 +167,14 @@ func newEnvironment(t *testing.T) *environment { registerer := prometheus.NewRegistry() res.sender = &common.SenderTest{T: t} + res.sender.SendAppGossipF = func(context.Context, []byte) error { + return nil + } metrics, err := metrics.New("", registerer) require.NoError(err) - res.mempool, err = mempool.NewMempool("mempool", registerer, res) + res.mempool, err = mempool.New("mempool", registerer, nil) require.NoError(err) res.blkManager = blockexecutor.NewManager( @@ -168,18 +185,50 @@ func newEnvironment(t *testing.T) *environment { pvalidators.TestManager, ) + txVerifier := network.NewLockedTxVerifier(&res.ctx.Lock, res.blkManager) + res.network, err = network.New( + res.backend.Ctx.Log, + res.backend.Ctx.NodeID, + res.backend.Ctx.SubnetID, + res.backend.Ctx.ValidatorState, + txVerifier, + res.mempool, + res.backend.Config.PartialSyncPrimaryNetwork, + res.sender, + registerer, + network.DefaultConfig, + ) + require.NoError(err) + res.Builder = New( res.mempool, res.txBuilder, &res.backend, res.blkManager, - nil, // toEngine, - res.sender, ) + res.Builder.StartBlockTimer() - res.Builder.SetPreference(genesisID) + res.blkManager.SetPreference(genesisID) addSubnet(t, res) + t.Cleanup(func() { + res.ctx.Lock.Lock() + defer res.ctx.Lock.Unlock() + + res.Builder.ShutdownBlockTimer() + + if res.isBootstrapped.Get() { + validatorIDs := res.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) + + require.NoError(res.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID)) + + require.NoError(res.state.Commit()) + } + + require.NoError(res.state.Close()) + require.NoError(res.baseDB.Close()) + }) + return res } @@ -236,7 +285,6 @@ func defaultState( ctx, metrics.Noop, rewards, - &utils.Atomic[bool]{}, ) require.NoError(err) @@ -246,38 +294,6 @@ func defaultState( return state } -func defaultCtx(db database.Database) (*snow.Context, *mutableSharedMemory) { - ctx := snow.DefaultContextTest() - ctx.NetworkID = 10 - ctx.XChainID = xChainID - ctx.CChainID = cChainID - ctx.AVAXAssetID = avaxAssetID - - atomicDB := prefixdb.New([]byte{1}, db) - m := atomic.NewMemory(atomicDB) - - msm := &mutableSharedMemory{ - SharedMemory: m.NewSharedMemory(ctx.ChainID), - } - ctx.SharedMemory = msm - - ctx.ValidatorState = &validators.TestState{ - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - constants.PlatformChainID: constants.PrimaryNetworkID, - xChainID: constants.PrimaryNetworkID, - cChainID: constants.PrimaryNetworkID, - }[chainID] - if !ok { - return ids.Empty, errMissing - } - return subnetID, nil - }, - } - - return ctx, msm -} - func defaultConfig() *config.Config { return &config.Config{ Chains: chains.TestManager, @@ -332,7 +348,7 @@ func defaultFx(t *testing.T, clk *mockable.Clock, log logging.Logger, isBootstra require := require.New(t) fxVMInt := &fxVMInt{ - registry: linearcodec.NewDefault(), + registry: linearcodec.NewDefault(time.Time{}), clk: clk, log: log, } @@ -358,13 +374,12 @@ func buildGenesisTest(t *testing.T, ctx *snow.Context) []byte { } } - genesisValidators := make([]api.PermissionlessValidator, len(preFundedKeys)) - for i, key := range preFundedKeys { - nodeID := ids.NodeID(key.PublicKey().Address()) + genesisValidators := make([]api.GenesisPermissionlessValidator, len(genesisNodeIDs)) + for i, nodeID := range genesisNodeIDs { addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidators[i] = api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), EndTime: json.Uint64(defaultValidateEndTime.Unix()), NodeID: nodeID, @@ -401,23 +416,3 @@ func buildGenesisTest(t *testing.T, ctx *snow.Context) []byte { return genesisBytes } - -func shutdownEnvironment(env *environment) error { - env.Builder.Shutdown() - - if env.isBootstrapped.Get() { - validatorIDs := env.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) - - if err := env.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID); err != nil { - return err - } - if err := env.state.Commit(); err != nil { - return err - } - } - - return utils.Err( - env.state.Close(), - env.baseDB.Close(), - ) -} diff --git a/vms/platformvm/block/builder/main_test.go b/vms/platformvm/block/builder/main_test.go index 01135c523738..31149bfbcca8 100644 --- a/vms/platformvm/block/builder/main_test.go +++ b/vms/platformvm/block/builder/main_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder diff --git a/vms/platformvm/block/builder/network.go b/vms/platformvm/block/builder/network.go deleted file mode 100644 index 3e1576d958fb..000000000000 --- a/vms/platformvm/block/builder/network.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// TODO: consider moving the network implementation to a separate package - -package builder - -import ( - "context" - "fmt" - "time" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/vms/components/message" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -const ( - // We allow [recentCacheSize] to be fairly large because we only store hashes - // in the cache, not entire transactions. - recentCacheSize = 512 -) - -var _ Network = (*network)(nil) - -type Network interface { - common.AppHandler - - // GossipTx gossips the transaction to some of the connected peers - GossipTx(tx *txs.Tx) error -} - -type network struct { - ctx *snow.Context - blkBuilder *builder - - // gossip related attributes - appSender common.AppSender - recentTxs *cache.LRU[ids.ID, struct{}] -} - -func NewNetwork( - ctx *snow.Context, - blkBuilder *builder, - appSender common.AppSender, -) Network { - return &network{ - ctx: ctx, - blkBuilder: blkBuilder, - appSender: appSender, - recentTxs: &cache.LRU[ids.ID, struct{}]{Size: recentCacheSize}, - } -} - -func (*network) CrossChainAppRequestFailed(context.Context, ids.ID, uint32) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (*network) CrossChainAppRequest(context.Context, ids.ID, uint32, time.Time, []byte) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (*network) CrossChainAppResponse(context.Context, ids.ID, uint32, []byte) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (*network) AppRequestFailed(context.Context, ids.NodeID, uint32) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (*network) AppRequest(context.Context, ids.NodeID, uint32, time.Time, []byte) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (*network) AppResponse(context.Context, ids.NodeID, uint32, []byte) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (n *network) AppGossip(_ context.Context, nodeID ids.NodeID, msgBytes []byte) error { - n.ctx.Log.Debug("called AppGossip message handler", - zap.Stringer("nodeID", nodeID), - zap.Int("messageLen", len(msgBytes)), - ) - - if n.blkBuilder.txExecutorBackend.Config.PartialSyncPrimaryNetwork { - n.ctx.Log.Debug("dropping AppGossip message", - zap.String("reason", "primary network is not being fully synced"), - ) - return nil - } - - msgIntf, err := message.Parse(msgBytes) - if err != nil { - n.ctx.Log.Debug("dropping AppGossip message", - zap.String("reason", "failed to parse message"), - ) - return nil - } - - msg, ok := msgIntf.(*message.Tx) - if !ok { - n.ctx.Log.Debug("dropping unexpected message", - zap.Stringer("nodeID", nodeID), - ) - return nil - } - - tx, err := txs.Parse(txs.Codec, msg.Tx) - if err != nil { - n.ctx.Log.Verbo("received invalid tx", - zap.Stringer("nodeID", nodeID), - zap.Binary("tx", msg.Tx), - zap.Error(err), - ) - return nil - } - - txID := tx.ID() - - // We need to grab the context lock here to avoid racy behavior with - // transaction verification + mempool modifications. - n.ctx.Lock.Lock() - defer n.ctx.Lock.Unlock() - - if reason := n.blkBuilder.GetDropReason(txID); reason != nil { - // If the tx is being dropped - just ignore it - return nil - } - - // add to mempool - if err := n.blkBuilder.AddUnverifiedTx(tx); err != nil { - n.ctx.Log.Debug("tx failed verification", - zap.Stringer("nodeID", nodeID), - zap.Error(err), - ) - } - return nil -} - -func (n *network) GossipTx(tx *txs.Tx) error { - txID := tx.ID() - // Don't gossip a transaction if it has been recently gossiped. - if _, has := n.recentTxs.Get(txID); has { - return nil - } - n.recentTxs.Put(txID, struct{}{}) - - n.ctx.Log.Debug("gossiping tx", - zap.Stringer("txID", txID), - ) - - msg := &message.Tx{Tx: tx.Bytes()} - msgBytes, err := message.Build(msg) - if err != nil { - return fmt.Errorf("GossipTx: failed to build Tx message: %w", err) - } - return n.appSender.SendAppGossip(context.TODO(), msgBytes) -} diff --git a/vms/platformvm/block/builder/network_test.go b/vms/platformvm/block/builder/network_test.go deleted file mode 100644 index 365fc130553a..000000000000 --- a/vms/platformvm/block/builder/network_test.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package builder - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/vms/components/message" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - - txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" -) - -func getValidTx(txBuilder txbuilder.Builder, t *testing.T) *txs.Tx { - tx, err := txBuilder.NewCreateChainTx( - testSubnet1.ID(), - nil, - constants.AVMID, - nil, - "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, - ) - require.NoError(t, err) - return tx -} - -// show that a tx learned from gossip is validated and added to mempool -func TestMempoolValidGossipedTxIsAddedToMempool(t *testing.T) { - require := require.New(t) - - env := newEnvironment(t) - env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - - var gossipedBytes []byte - env.sender.SendAppGossipF = func(_ context.Context, b []byte) error { - gossipedBytes = b - return nil - } - - nodeID := ids.GenerateTestNodeID() - - // create a tx - tx := getValidTx(env.txBuilder, t) - txID := tx.ID() - - msg := message.Tx{Tx: tx.Bytes()} - msgBytes, err := message.Build(&msg) - require.NoError(err) - // Free lock because [AppGossip] waits for the context lock - env.ctx.Lock.Unlock() - // show that unknown tx is added to mempool - require.NoError(env.AppGossip(context.Background(), nodeID, msgBytes)) - require.True(env.Builder.Has(txID)) - // Grab lock back - env.ctx.Lock.Lock() - - // and gossiped if it has just been discovered - require.NotNil(gossipedBytes) - - // show gossiped bytes can be decoded to the original tx - replyIntf, err := message.Parse(gossipedBytes) - require.NoError(err) - - reply := replyIntf.(*message.Tx) - retrivedTx, err := txs.Parse(txs.Codec, reply.Tx) - require.NoError(err) - - require.Equal(txID, retrivedTx.ID()) -} - -// show that txs already marked as invalid are not re-requested on gossiping -func TestMempoolInvalidGossipedTxIsNotAddedToMempool(t *testing.T) { - require := require.New(t) - - env := newEnvironment(t) - env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - - // create a tx and mark as invalid - tx := getValidTx(env.txBuilder, t) - txID := tx.ID() - env.Builder.MarkDropped(txID, errTestingDropped) - - // show that the invalid tx is not requested - nodeID := ids.GenerateTestNodeID() - msg := message.Tx{Tx: tx.Bytes()} - msgBytes, err := message.Build(&msg) - require.NoError(err) - env.ctx.Lock.Unlock() - require.NoError(env.AppGossip(context.Background(), nodeID, msgBytes)) - env.ctx.Lock.Lock() - require.False(env.Builder.Has(txID)) -} - -// show that locally generated txs are gossiped -func TestMempoolNewLocaTxIsGossiped(t *testing.T) { - require := require.New(t) - - env := newEnvironment(t) - env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - - var gossipedBytes []byte - env.sender.SendAppGossipF = func(_ context.Context, b []byte) error { - gossipedBytes = b - return nil - } - - // add a tx to the mempool and show it gets gossiped - tx := getValidTx(env.txBuilder, t) - txID := tx.ID() - - require.NoError(env.Builder.AddUnverifiedTx(tx)) - require.NotNil(gossipedBytes) - - // show gossiped bytes can be decoded to the original tx - replyIntf, err := message.Parse(gossipedBytes) - require.NoError(err) - - reply := replyIntf.(*message.Tx) - retrivedTx, err := txs.Parse(txs.Codec, reply.Tx) - require.NoError(err) - - require.Equal(txID, retrivedTx.ID()) - - // show that transaction is not re-gossiped is recently added to mempool - gossipedBytes = nil - env.Builder.Remove([]*txs.Tx{tx}) - require.NoError(env.Builder.Add(tx)) - - require.Nil(gossipedBytes) -} diff --git a/vms/platformvm/block/builder/standard_block_test.go b/vms/platformvm/block/builder/standard_block_test.go index 827d7357728b..6064b2153113 100644 --- a/vms/platformvm/block/builder/standard_block_test.go +++ b/vms/platformvm/block/builder/standard_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder @@ -24,9 +24,7 @@ func TestAtomicTxImports(t *testing.T) { env := newEnvironment(t) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() utxoID := avax.UTXOID{ TxID: ids.Empty.Prefix(1), @@ -41,7 +39,7 @@ func TestAtomicTxImports(t *testing.T) { peerSharedMemory := m.NewSharedMemory(env.ctx.XChainID) utxo := &avax.UTXO{ UTXOID: utxoID, - Asset: avax.Asset{ID: avaxAssetID}, + Asset: avax.Asset{ID: env.ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: amount, OutputOwners: secp256k1fx.OutputOwners{ @@ -50,7 +48,7 @@ func TestAtomicTxImports(t *testing.T) { }, }, } - utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) require.NoError(err) inputID := utxo.InputID() diff --git a/vms/platformvm/block/codec.go b/vms/platformvm/block/codec.go index 184762ec9b65..6e8bd9605e5b 100644 --- a/vms/platformvm/block/codec.go +++ b/vms/platformvm/block/codec.go @@ -8,13 +8,14 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -23,23 +24,25 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) -// Version is the current default codec version -const Version = txs.Version +const CodecVersion = txs.CodecVersion -// GenesisCode allows blocks of larger than usual size to be parsed. -// While this gives flexibility in accommodating large genesis blocks -// it must not be used to parse new, unverified blocks which instead -// must be processed by Codec var ( - Codec codec.Manager + // GenesisCodec allows blocks of larger than usual size to be parsed. + // While this gives flexibility in accommodating large genesis blocks + // it must not be used to parse new, unverified blocks which instead + // must be processed by Codec GenesisCodec codec.Manager + + Codec codec.Manager ) -func init() { - c := linearcodec.NewCaminoDefault() - Codec = codec.NewDefaultManager() - gc := linearcodec.NewCaminoCustomMaxLength(math.MaxInt32) - GenesisCodec = codec.NewManager(math.MaxInt32) +// TODO: Remove after v1.11.x has activated +// +// Invariant: InitCodec, Codec, and GenesisCodec must not be accessed +// concurrently +func InitCodec(durangoTime time.Time) error { + c := linearcodec.NewCaminoDefault(durangoTime) + gc := linearcodec.NewCaminoCustomMaxLength(time.Time{}, math.MaxInt32) errs := wrappers.Errs{} for _, c := range []linearcodec.CaminoCodec{c, gc} { @@ -50,12 +53,25 @@ func init() { txs.RegisterDUnsignedTxsTypes(c), ) } + + newCodec := codec.NewDefaultManager() + newGenesisCodec := codec.NewManager(math.MaxInt32) errs.Add( - Codec.RegisterCodec(Version, c), - GenesisCodec.RegisterCodec(Version, gc), + newCodec.RegisterCodec(CodecVersion, c), + newGenesisCodec.RegisterCodec(CodecVersion, gc), ) if errs.Errored() { - panic(errs.Err) + return errs.Err + } + + Codec = newCodec + GenesisCodec = newGenesisCodec + return nil +} + +func init() { + if err := InitCodec(time.Time{}); err != nil { + panic(err) } } diff --git a/vms/platformvm/block/commit_block.go b/vms/platformvm/block/commit_block.go index 02cbadb4e1cd..ac6dbb1ed88f 100644 --- a/vms/platformvm/block/commit_block.go +++ b/vms/platformvm/block/commit_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -43,7 +43,7 @@ func NewBanffCommitBlock( }, }, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } type ApricotCommitBlock struct { @@ -75,5 +75,5 @@ func NewApricotCommitBlock( Hght: height, }, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } diff --git a/vms/platformvm/block/commit_block_test.go b/vms/platformvm/block/commit_block_test.go index 256af6539c96..f89489d521a1 100644 --- a/vms/platformvm/block/commit_block_test.go +++ b/vms/platformvm/block/commit_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/vms/platformvm/block/common_block.go b/vms/platformvm/block/common_block.go index 899fd57b7fcb..f4b46b816b87 100644 --- a/vms/platformvm/block/common_block.go +++ b/vms/platformvm/block/common_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/vms/platformvm/block/executor/acceptor.go b/vms/platformvm/block/executor/acceptor.go index c66440aa83fe..cc2bcef0521f 100644 --- a/vms/platformvm/block/executor/acceptor.go +++ b/vms/platformvm/block/executor/acceptor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -33,11 +33,11 @@ type acceptor struct { } func (a *acceptor) BanffAbortBlock(b *block.BanffAbortBlock) error { - return a.abortBlock(b, "banff abort") + return a.optionBlock(b, "banff abort") } func (a *acceptor) BanffCommitBlock(b *block.BanffCommitBlock) error { - return a.commitBlock(b, "apricot commit") + return a.optionBlock(b, "banff commit") } func (a *acceptor) BanffProposalBlock(b *block.BanffProposalBlock) error { @@ -50,11 +50,11 @@ func (a *acceptor) BanffStandardBlock(b *block.BanffStandardBlock) error { } func (a *acceptor) ApricotAbortBlock(b *block.ApricotAbortBlock) error { - return a.abortBlock(b, "apricot abort") + return a.optionBlock(b, "apricot abort") } func (a *acceptor) ApricotCommitBlock(b *block.ApricotCommitBlock) error { - return a.commitBlock(b, "apricot commit") + return a.optionBlock(b, "apricot commit") } func (a *acceptor) ApricotProposalBlock(b *block.ApricotProposalBlock) error { @@ -116,46 +116,14 @@ func (a *acceptor) ApricotAtomicBlock(b *block.ApricotAtomicBlock) error { return nil } -func (a *acceptor) abortBlock(b block.Block, blockType string) error { +func (a *acceptor) optionBlock(b block.Block, blockType string) error { parentID := b.Parent() parentState, ok := a.blkIDToState[parentID] if !ok { return fmt.Errorf("%w: %s", state.ErrMissingParentState, parentID) } - if a.bootstrapped.Get() { - if parentState.initiallyPreferCommit { - a.metrics.MarkOptionVoteLost() - } else { - a.metrics.MarkOptionVoteWon() - } - } - - return a.optionBlock(b, parentState.statelessBlock, blockType) -} - -func (a *acceptor) commitBlock(b block.Block, blockType string) error { - parentID := b.Parent() - parentState, ok := a.blkIDToState[parentID] - if !ok { - return fmt.Errorf("%w: %s", state.ErrMissingParentState, parentID) - } - - if a.bootstrapped.Get() { - if parentState.initiallyPreferCommit { - a.metrics.MarkOptionVoteWon() - } else { - a.metrics.MarkOptionVoteLost() - } - } - - return a.optionBlock(b, parentState.statelessBlock, blockType) -} - -func (a *acceptor) optionBlock(b, parent block.Block, blockType string) error { blkID := b.ID() - parentID := parent.ID() - defer func() { // Note: we assume this block's sibling doesn't // need the parent's state when it's rejected. @@ -164,7 +132,7 @@ func (a *acceptor) optionBlock(b, parent block.Block, blockType string) error { }() // Note that the parent must be accepted first. - if err := a.commonAccept(parent); err != nil { + if err := a.commonAccept(parentState.statelessBlock); err != nil { return err } @@ -172,6 +140,12 @@ func (a *acceptor) optionBlock(b, parent block.Block, blockType string) error { return err } + if parentState.onDecisionState != nil { + if err := parentState.onDecisionState.Apply(a.state); err != nil { + return err + } + } + blkState, ok := a.blkIDToState[blkID] if !ok { return fmt.Errorf("%w %s", errMissingBlockState, blkID) @@ -180,8 +154,23 @@ func (a *acceptor) optionBlock(b, parent block.Block, blockType string) error { return err } - if err := a.state.Commit(); err != nil { - return err + defer a.state.Abort() + batch, err := a.state.CommitBatch() + if err != nil { + return fmt.Errorf( + "failed to commit VM's database for block %s: %w", + blkID, + err, + ) + } + + // Note that this method writes [batch] to the database. + if err := a.ctx.SharedMemory.Apply(parentState.atomicRequests, batch); err != nil { + return fmt.Errorf("failed to apply vm's state to shared memory: %w", err) + } + + if onAcceptFunc := parentState.onAcceptFunc; onAcceptFunc != nil { + onAcceptFunc() } a.ctx.Log.Trace( diff --git a/vms/platformvm/block/executor/acceptor_test.go b/vms/platformvm/block/executor/acceptor_test.go index c9fafb4445d0..45fd1d54d189 100644 --- a/vms/platformvm/block/executor/acceptor_test.go +++ b/vms/platformvm/block/executor/acceptor_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -124,7 +125,7 @@ func TestAcceptorVisitAtomicBlock(t *testing.T) { // Set [blk]'s state in the map as though it had been verified. onAcceptState := state.NewMockDiff(ctrl) childID := ids.GenerateTestID() - atomicRequests := map[ids.ID]*atomic.Requests{ids.GenerateTestID(): nil} + atomicRequests := make(map[ids.ID]*atomic.Requests) acceptor.backend.blkIDToState[blk.ID()] = &blockState{ onAcceptState: onAcceptState, atomicRequests: atomicRequests, @@ -207,16 +208,15 @@ func TestAcceptorVisitStandardBlock(t *testing.T) { // Set [blk]'s state in the map as though it had been verified. onAcceptState := state.NewMockDiff(ctrl) childID := ids.GenerateTestID() - atomicRequests := map[ids.ID]*atomic.Requests{ids.GenerateTestID(): nil} + atomicRequests := make(map[ids.ID]*atomic.Requests) calledOnAcceptFunc := false acceptor.backend.blkIDToState[blk.ID()] = &blockState{ - onAcceptState: onAcceptState, - atomicRequests: atomicRequests, - standardBlockState: standardBlockState{ - onAcceptFunc: func() { - calledOnAcceptFunc = true - }, + onAcceptState: onAcceptState, + onAcceptFunc: func() { + calledOnAcceptFunc = true }, + + atomicRequests: atomicRequests, } // Give [blk] a child. childOnAcceptState := state.NewMockDiff(ctrl) @@ -281,13 +281,21 @@ func TestAcceptorVisitCommitBlock(t *testing.T) { parentOnAbortState := state.NewMockDiff(ctrl) parentOnCommitState := state.NewMockDiff(ctrl) parentStatelessBlk := block.NewMockBlock(ctrl) + calledOnAcceptFunc := false + atomicRequests := make(map[ids.ID]*atomic.Requests) parentState := &blockState{ - statelessBlock: parentStatelessBlk, - onAcceptState: parentOnAcceptState, proposalBlockState: proposalBlockState{ onAbortState: parentOnAbortState, onCommitState: parentOnCommitState, }, + statelessBlock: parentStatelessBlk, + + onAcceptState: parentOnAcceptState, + onAcceptFunc: func() { + calledOnAcceptFunc = true + }, + + atomicRequests: atomicRequests, } acceptor.backend.blkIDToState[parentID] = parentState @@ -295,7 +303,7 @@ func TestAcceptorVisitCommitBlock(t *testing.T) { // Set expected calls on dependencies. // Make sure the parent is accepted first. gomock.InOrder( - parentStatelessBlk.EXPECT().ID().Return(parentID).Times(2), + parentStatelessBlk.EXPECT().ID().Return(parentID).Times(1), s.EXPECT().SetLastAccepted(parentID).Times(1), parentStatelessBlk.EXPECT().Height().Return(blk.Height()-1).Times(1), s.EXPECT().SetHeight(blk.Height()-1).Times(1), @@ -309,17 +317,25 @@ func TestAcceptorVisitCommitBlock(t *testing.T) { err = acceptor.ApricotCommitBlock(blk) require.ErrorIs(err, errMissingBlockState) + parentOnCommitState.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + // Set [blk]'s state in the map as though it had been verified. acceptor.backend.blkIDToState[parentID] = parentState - onAcceptState := state.NewMockDiff(ctrl) acceptor.backend.blkIDToState[blkID] = &blockState{ - onAcceptState: onAcceptState, + onAcceptState: parentState.onCommitState, + onAcceptFunc: parentState.onAcceptFunc, + + inputs: parentState.inputs, + timestamp: parentOnCommitState.GetTimestamp(), + atomicRequests: parentState.atomicRequests, } + batch := database.NewMockBatch(ctrl) + // Set expected calls on dependencies. // Make sure the parent is accepted first. gomock.InOrder( - parentStatelessBlk.EXPECT().ID().Return(parentID).Times(2), + parentStatelessBlk.EXPECT().ID().Return(parentID).Times(1), s.EXPECT().SetLastAccepted(parentID).Times(1), parentStatelessBlk.EXPECT().Height().Return(blk.Height()-1).Times(1), s.EXPECT().SetHeight(blk.Height()-1).Times(1), @@ -329,12 +345,15 @@ func TestAcceptorVisitCommitBlock(t *testing.T) { s.EXPECT().SetHeight(blk.Height()).Times(1), s.EXPECT().AddStatelessBlock(blk).Times(1), - onAcceptState.EXPECT().Apply(s).Times(1), - s.EXPECT().Commit().Return(nil).Times(1), + parentOnCommitState.EXPECT().Apply(s).Times(1), + s.EXPECT().CommitBatch().Return(batch, nil).Times(1), + sharedMemory.EXPECT().Apply(atomicRequests, batch).Return(nil).Times(1), s.EXPECT().Checksum().Return(ids.Empty).Times(1), + s.EXPECT().Abort().Times(1), ) require.NoError(acceptor.ApricotCommitBlock(blk)) + require.True(calledOnAcceptFunc) require.Equal(blk.ID(), acceptor.backend.lastAccepted) } @@ -372,13 +391,21 @@ func TestAcceptorVisitAbortBlock(t *testing.T) { parentOnAbortState := state.NewMockDiff(ctrl) parentOnCommitState := state.NewMockDiff(ctrl) parentStatelessBlk := block.NewMockBlock(ctrl) + calledOnAcceptFunc := false + atomicRequests := make(map[ids.ID]*atomic.Requests) parentState := &blockState{ - statelessBlock: parentStatelessBlk, - onAcceptState: parentOnAcceptState, proposalBlockState: proposalBlockState{ onAbortState: parentOnAbortState, onCommitState: parentOnCommitState, }, + statelessBlock: parentStatelessBlk, + + onAcceptState: parentOnAcceptState, + onAcceptFunc: func() { + calledOnAcceptFunc = true + }, + + atomicRequests: atomicRequests, } acceptor.backend.blkIDToState[parentID] = parentState @@ -386,7 +413,7 @@ func TestAcceptorVisitAbortBlock(t *testing.T) { // Set expected calls on dependencies. // Make sure the parent is accepted first. gomock.InOrder( - parentStatelessBlk.EXPECT().ID().Return(parentID).Times(2), + parentStatelessBlk.EXPECT().ID().Return(parentID).Times(1), s.EXPECT().SetLastAccepted(parentID).Times(1), parentStatelessBlk.EXPECT().Height().Return(blk.Height()-1).Times(1), s.EXPECT().SetHeight(blk.Height()-1).Times(1), @@ -400,18 +427,25 @@ func TestAcceptorVisitAbortBlock(t *testing.T) { err = acceptor.ApricotAbortBlock(blk) require.ErrorIs(err, errMissingBlockState) + parentOnAbortState.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + // Set [blk]'s state in the map as though it had been verified. acceptor.backend.blkIDToState[parentID] = parentState - - onAcceptState := state.NewMockDiff(ctrl) acceptor.backend.blkIDToState[blkID] = &blockState{ - onAcceptState: onAcceptState, + onAcceptState: parentState.onAbortState, + onAcceptFunc: parentState.onAcceptFunc, + + inputs: parentState.inputs, + timestamp: parentOnAbortState.GetTimestamp(), + atomicRequests: parentState.atomicRequests, } + batch := database.NewMockBatch(ctrl) + // Set expected calls on dependencies. // Make sure the parent is accepted first. gomock.InOrder( - parentStatelessBlk.EXPECT().ID().Return(parentID).Times(2), + parentStatelessBlk.EXPECT().ID().Return(parentID).Times(1), s.EXPECT().SetLastAccepted(parentID).Times(1), parentStatelessBlk.EXPECT().Height().Return(blk.Height()-1).Times(1), s.EXPECT().SetHeight(blk.Height()-1).Times(1), @@ -421,11 +455,14 @@ func TestAcceptorVisitAbortBlock(t *testing.T) { s.EXPECT().SetHeight(blk.Height()).Times(1), s.EXPECT().AddStatelessBlock(blk).Times(1), - onAcceptState.EXPECT().Apply(s).Times(1), - s.EXPECT().Commit().Return(nil).Times(1), + parentOnAbortState.EXPECT().Apply(s).Times(1), + s.EXPECT().CommitBatch().Return(batch, nil).Times(1), + sharedMemory.EXPECT().Apply(atomicRequests, batch).Return(nil).Times(1), s.EXPECT().Checksum().Return(ids.Empty).Times(1), + s.EXPECT().Abort().Times(1), ) require.NoError(acceptor.ApricotAbortBlock(blk)) + require.True(calledOnAcceptFunc) require.Equal(blk.ID(), acceptor.backend.lastAccepted) } diff --git a/vms/platformvm/block/executor/backend.go b/vms/platformvm/block/executor/backend.go index fd0e75d0f664..c4e56545634d 100644 --- a/vms/platformvm/block/executor/backend.go +++ b/vms/platformvm/block/executor/backend.go @@ -1,18 +1,22 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( + "errors" "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" ) +var errConflictingParentTxs = errors.New("block contains a transaction that conflicts with a transaction in a parent block") + // Shared fields used by visitors. type backend struct { mempool.Mempool @@ -95,3 +99,28 @@ func (b *backend) getTimestamp(blkID ids.ID) time.Time { // so we just return the chain time. return b.state.GetTimestamp() } + +// verifyUniqueInputs returns nil iff no blocks in the inclusive +// ancestry of [blkID] consume an input in [inputs]. +func (b *backend) verifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error { + if inputs.Len() == 0 { + return nil + } + + // Check for conflicts in ancestors. + for { + state, ok := b.blkIDToState[blkID] + if !ok { + // The parent state isn't pinned in memory. + // This means the parent must be accepted already. + return nil + } + + if state.inputs.Overlaps(inputs) { + return errConflictingParentTxs + } + + blk := state.statelessBlock + blkID = blk.Parent() + } +} diff --git a/vms/platformvm/block/executor/backend_test.go b/vms/platformvm/block/executor/backend_test.go index ce6744369593..19ad55c36d81 100644 --- a/vms/platformvm/block/executor/backend_test.go +++ b/vms/platformvm/block/executor/backend_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor diff --git a/vms/platformvm/block/executor/block.go b/vms/platformvm/block/executor/block.go index 733825412008..5cd5a02f709c 100644 --- a/vms/platformvm/block/executor/block.go +++ b/vms/platformvm/block/executor/block.go @@ -1,11 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( "context" - "fmt" "time" "go.uber.org/zap" @@ -83,22 +82,18 @@ func (b *Block) Timestamp() time.Time { } func (b *Block) Options(context.Context) ([2]snowman.Block, error) { - options := options{} + options := options{ + log: b.manager.ctx.Log, + primaryUptimePercentage: b.manager.txExecutorBackend.Config.UptimePercentage, + uptimes: b.manager.txExecutorBackend.Uptimes, + state: b.manager.backend.state, + } if err := b.Block.Visit(&options); err != nil { return [2]snowman.Block{}, err } - commitBlock := b.manager.NewBlock(options.commitBlock) - abortBlock := b.manager.NewBlock(options.abortBlock) - - blkID := b.ID() - blkState, ok := b.manager.blkIDToState[blkID] - if !ok { - return [2]snowman.Block{}, fmt.Errorf("block %s state not found", blkID) - } - - if blkState.initiallyPreferCommit { - return [2]snowman.Block{commitBlock, abortBlock}, nil - } - return [2]snowman.Block{abortBlock, commitBlock}, nil + return [2]snowman.Block{ + b.manager.NewBlock(options.preferredBlock), + b.manager.NewBlock(options.alternateBlock), + }, nil } diff --git a/vms/platformvm/block/executor/block_state.go b/vms/platformvm/block/executor/block_state.go index 9f0cd7e0860d..9d6b377c2644 100644 --- a/vms/platformvm/block/executor/block_state.go +++ b/vms/platformvm/block/executor/block_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -13,25 +13,22 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" ) -type standardBlockState struct { - onAcceptFunc func() - inputs set.Set[ids.ID] -} - type proposalBlockState struct { - initiallyPreferCommit bool - onCommitState state.Diff - onAbortState state.Diff + onDecisionState state.Diff + onCommitState state.Diff + onAbortState state.Diff } // The state of a block. // Note that not all fields will be set for a given block. type blockState struct { - standardBlockState proposalBlockState statelessBlock block.Block - onAcceptState state.Diff + onAcceptState state.Diff + onAcceptFunc func() + + inputs set.Set[ids.ID] timestamp time.Time atomicRequests map[ids.ID]*atomic.Requests } diff --git a/vms/platformvm/block/executor/block_test.go b/vms/platformvm/block/executor/block_test.go index 7153ff52d3cf..c26d71705857 100644 --- a/vms/platformvm/block/executor/block_test.go +++ b/vms/platformvm/block/executor/block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -6,6 +6,7 @@ package executor import ( "context" "testing" + "time" "github.com/stretchr/testify/require" @@ -14,9 +15,16 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/snowtest" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/status" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" ) func TestStatus(t *testing.T) { @@ -127,126 +135,485 @@ func TestStatus(t *testing.T) { func TestBlockOptions(t *testing.T) { type test struct { name string - blkF func() *Block + blkF func(*gomock.Controller) *Block expectedPreferenceType block.Block - expectedErr error } tests := []test{ { name: "apricot proposal block; commit preferred", - blkF: func() *Block { - innerBlk := &block.ApricotProposalBlock{} - blkID := innerBlk.ID() + blkF: func(ctrl *gomock.Controller) *Block { + state := state.NewMockState(ctrl) + + uptimes := uptime.NewMockCalculator(ctrl) manager := &manager{ backend: &backend{ - blkIDToState: map[ids.ID]*blockState{ - blkID: { - proposalBlockState: proposalBlockState{ - initiallyPreferCommit: true, + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.ApricotProposalBlock{}, + manager: manager, + } + }, + expectedPreferenceType: &block.ApricotCommitBlock{}, + }, + { + name: "banff proposal block; invalid proposal tx", + blkF: func(ctrl *gomock.Controller) *Block { + state := state.NewMockState(ctrl) + + uptimes := uptime.NewMockCalculator(ctrl) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.CreateChainTx{}, + }, + }, + }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; missing tx", + blkF: func(ctrl *gomock.Controller) *Block { + stakerTxID := ids.GenerateTestID() + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(nil, status.Unknown, database.ErrNotFound) + + uptimes := uptime.NewMockCalculator(ctrl) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, }, }, }, }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; error fetching staker tx", + blkF: func(ctrl *gomock.Controller) *Block { + stakerTxID := ids.GenerateTestID() + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(nil, status.Unknown, database.ErrClosed) + + uptimes := uptime.NewMockCalculator(ctrl) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, } return &Block{ - Block: innerBlk, + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, manager: manager, } }, - expectedPreferenceType: &block.ApricotCommitBlock{}, + expectedPreferenceType: &block.BanffCommitBlock{}, }, { - name: "apricot proposal block; abort preferred", - blkF: func() *Block { - innerBlk := &block.ApricotProposalBlock{} - blkID := innerBlk.ID() + name: "banff proposal block; unexpected staker tx type", + blkF: func(ctrl *gomock.Controller) *Block { + stakerTxID := ids.GenerateTestID() + stakerTx := &txs.Tx{ + Unsigned: &txs.CreateChainTx{}, + } + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + + uptimes := uptime.NewMockCalculator(ctrl) manager := &manager{ backend: &backend{ - blkIDToState: map[ids.ID]*blockState{ - blkID: {}, + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, }, + Uptimes: uptimes, }, } return &Block{ - Block: innerBlk, + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, manager: manager, } }, - expectedPreferenceType: &block.ApricotAbortBlock{}, + expectedPreferenceType: &block.BanffCommitBlock{}, }, { - name: "banff proposal block; commit preferred", - blkF: func() *Block { - innerBlk := &block.BanffProposalBlock{} - blkID := innerBlk.ID() + name: "banff proposal block; missing primary network validator", + blkF: func(ctrl *gomock.Controller) *Block { + var ( + stakerTxID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + subnetID = ids.GenerateTestID() + stakerTx = &txs.Tx{ + Unsigned: &txs.AddPermissionlessValidatorTx{ + Validator: txs.Validator{ + NodeID: nodeID, + }, + Subnet: subnetID, + }, + } + ) + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(nil, database.ErrNotFound) + + uptimes := uptime.NewMockCalculator(ctrl) manager := &manager{ backend: &backend{ - blkIDToState: map[ids.ID]*blockState{ - blkID: { - proposalBlockState: proposalBlockState{ - initiallyPreferCommit: true, + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, }, }, }, }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; failed calculating primary network uptime", + blkF: func(ctrl *gomock.Controller) *Block { + var ( + stakerTxID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + subnetID = constants.PrimaryNetworkID + stakerTx = &txs.Tx{ + Unsigned: &txs.AddPermissionlessValidatorTx{ + Validator: txs.Validator{ + NodeID: nodeID, + }, + Subnet: subnetID, + }, + } + primaryNetworkValidatorStartTime = time.Now() + staker = &state.Staker{ + StartTime: primaryNetworkValidatorStartTime, + } + ) + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(staker, nil) + + uptimes := uptime.NewMockCalculator(ctrl) + uptimes.EXPECT().CalculateUptimePercentFrom(nodeID, constants.PrimaryNetworkID, primaryNetworkValidatorStartTime).Return(0.0, database.ErrNotFound) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, } return &Block{ - Block: innerBlk, + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, manager: manager, } }, expectedPreferenceType: &block.BanffCommitBlock{}, }, { - name: "banff proposal block; abort preferred", - blkF: func() *Block { - innerBlk := &block.BanffProposalBlock{} - blkID := innerBlk.ID() + name: "banff proposal block; failed fetching subnet transformation", + blkF: func(ctrl *gomock.Controller) *Block { + var ( + stakerTxID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + subnetID = ids.GenerateTestID() + stakerTx = &txs.Tx{ + Unsigned: &txs.AddPermissionlessValidatorTx{ + Validator: txs.Validator{ + NodeID: nodeID, + }, + Subnet: subnetID, + }, + } + primaryNetworkValidatorStartTime = time.Now() + staker = &state.Staker{ + StartTime: primaryNetworkValidatorStartTime, + } + ) + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(staker, nil) + state.EXPECT().GetSubnetTransformation(subnetID).Return(nil, database.ErrNotFound) + + uptimes := uptime.NewMockCalculator(ctrl) manager := &manager{ backend: &backend{ - blkIDToState: map[ids.ID]*blockState{ - blkID: {}, + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, }, + Uptimes: uptimes, }, } return &Block{ - Block: innerBlk, + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, manager: manager, } }, - expectedPreferenceType: &block.BanffAbortBlock{}, + expectedPreferenceType: &block.BanffCommitBlock{}, }, { - name: "non oracle block", - blkF: func() *Block { + name: "banff proposal block; prefers commit", + blkF: func(ctrl *gomock.Controller) *Block { + var ( + stakerTxID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + subnetID = ids.GenerateTestID() + stakerTx = &txs.Tx{ + Unsigned: &txs.AddPermissionlessValidatorTx{ + Validator: txs.Validator{ + NodeID: nodeID, + }, + Subnet: subnetID, + }, + } + primaryNetworkValidatorStartTime = time.Now() + staker = &state.Staker{ + StartTime: primaryNetworkValidatorStartTime, + } + transformSubnetTx = &txs.Tx{ + Unsigned: &txs.TransformSubnetTx{ + UptimeRequirement: .2 * reward.PercentDenominator, + }, + } + ) + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(staker, nil) + state.EXPECT().GetSubnetTransformation(subnetID).Return(transformSubnetTx, nil) + + uptimes := uptime.NewMockCalculator(ctrl) + uptimes.EXPECT().CalculateUptimePercentFrom(nodeID, constants.PrimaryNetworkID, primaryNetworkValidatorStartTime).Return(.5, nil) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: .8, + }, + Uptimes: uptimes, + }, + } + return &Block{ - Block: &block.BanffStandardBlock{}, - manager: &manager{}, + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, + manager: manager, } }, - expectedErr: snowman.ErrNotOracle, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; prefers abort", + blkF: func(ctrl *gomock.Controller) *Block { + var ( + stakerTxID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + subnetID = ids.GenerateTestID() + stakerTx = &txs.Tx{ + Unsigned: &txs.AddPermissionlessValidatorTx{ + Validator: txs.Validator{ + NodeID: nodeID, + }, + Subnet: subnetID, + }, + } + primaryNetworkValidatorStartTime = time.Now() + staker = &state.Staker{ + StartTime: primaryNetworkValidatorStartTime, + } + transformSubnetTx = &txs.Tx{ + Unsigned: &txs.TransformSubnetTx{ + UptimeRequirement: .6 * reward.PercentDenominator, + }, + } + ) + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(staker, nil) + state.EXPECT().GetSubnetTransformation(subnetID).Return(transformSubnetTx, nil) + + uptimes := uptime.NewMockCalculator(ctrl) + uptimes.EXPECT().CalculateUptimePercentFrom(nodeID, constants.PrimaryNetworkID, primaryNetworkValidatorStartTime).Return(.5, nil) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: .8, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffAbortBlock{}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) require := require.New(t) - blk := tt.blkF() + blk := tt.blkF(ctrl) options, err := blk.Options(context.Background()) - require.ErrorIs(err, tt.expectedErr) - if tt.expectedErr != nil { - return - } + require.NoError(err) require.IsType(tt.expectedPreferenceType, options[0].(*Block).Block) }) } diff --git a/vms/platformvm/block/executor/helpers_test.go b/vms/platformvm/block/executor/helpers_test.go index 7d5a67566472..1a3d2993328b 100644 --- a/vms/platformvm/block/executor/helpers_test.go +++ b/vms/platformvm/block/executor/helpers_test.go @@ -1,17 +1,17 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( - "context" - "errors" "fmt" "testing" "time" "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/chains" @@ -25,6 +25,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" @@ -63,8 +64,6 @@ const ( ) var ( - _ mempool.BlockTimer = (*environment)(nil) - defaultMinStakingDuration = 24 * time.Hour defaultMaxStakingDuration = 365 * 24 * time.Hour defaultGenesisTime = time.Date(1997, 1, 1, 0, 0, 0, 0, time.UTC) @@ -75,15 +74,21 @@ var ( preFundedKeys = secp256k1.TestKeys() avaxAssetID = ids.ID{'y', 'e', 'e', 't'} defaultTxFee = uint64(100) - xChainID = ids.Empty.Prefix(0) - cChainID = ids.Empty.Prefix(1) genesisBlkID ids.ID testSubnet1 *txs.Tx - errMissing = errors.New("missing") + // Node IDs of genesis validators. Initialized in init function + genesisNodeIDs []ids.NodeID ) +func init() { + genesisNodeIDs = make([]ids.NodeID, len(preFundedKeys)) + for i := range preFundedKeys { + genesisNodeIDs[i] = ids.GenerateTestNodeID() + } +} + type stakerStatus uint type staker struct { @@ -121,10 +126,6 @@ type environment struct { backend *executor.Backend } -func (*environment) ResetBlockTimer() { - // dummy call, do nothing for now -} - func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { res := &environment{ isBootstrapped: &utils.Atomic[bool]{}, @@ -134,7 +135,13 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { res.isBootstrapped.Set(true) res.baseDB = versiondb.New(memdb.New()) - res.ctx = defaultCtx(res.baseDB) + atomicDB := prefixdb.New([]byte{1}, res.baseDB) + m := atomic.NewMemory(atomicDB) + + res.ctx = snowtest.Context(t, snowtest.PChainID) + res.ctx.AVAXAssetID = avaxAssetID + res.ctx.SharedMemory = m.NewSharedMemory(res.ctx.ChainID) + res.fx = defaultFx(res.clk, res.ctx.Log, res.isBootstrapped.Get()) rewardsCalc := reward.NewCalculator(res.config.RewardConfig) @@ -189,7 +196,7 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { metrics := metrics.Noop var err error - res.mempool, err = mempool.NewMempool("mempool", registerer, res) + res.mempool, err = mempool.New("mempool", registerer, nil) if err != nil { panic(fmt.Errorf("failed to create mempool: %w", err)) } @@ -215,6 +222,31 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { // whatever we need } + t.Cleanup(func() { + res.ctx.Lock.Lock() + defer res.ctx.Lock.Unlock() + + if res.mockedState != nil { + // state is mocked, nothing to do here + return + } + + require := require.New(t) + + if res.isBootstrapped.Get() { + validatorIDs := res.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) + + require.NoError(res.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID)) + require.NoError(res.state.Commit()) + } + + if res.state != nil { + require.NoError(res.state.Close()) + } + + require.NoError(res.baseDB.Close()) + }) + return res } @@ -275,7 +307,6 @@ func defaultState( ctx, metrics.Noop, rewards, - &utils.Atomic[bool]{}, ) if err != nil { panic(err) @@ -290,35 +321,6 @@ func defaultState( return state } -func defaultCtx(db database.Database) *snow.Context { - ctx := snow.DefaultContextTest() - ctx.NetworkID = 10 - ctx.XChainID = xChainID - ctx.CChainID = cChainID - ctx.AVAXAssetID = avaxAssetID - - atomicDB := prefixdb.New([]byte{1}, db) - m := atomic.NewMemory(atomicDB) - - ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) - - ctx.ValidatorState = &validators.TestState{ - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - constants.PlatformChainID: constants.PrimaryNetworkID, - xChainID: constants.PrimaryNetworkID, - cChainID: constants.PrimaryNetworkID, - }[chainID] - if !ok { - return ids.Empty, errMissing - } - return subnetID, nil - }, - } - - return ctx -} - func defaultConfig() *config.Config { return &config.Config{ Chains: chains.TestManager, @@ -370,7 +372,7 @@ func (fvi *fxVMInt) Logger() logging.Logger { func defaultFx(clk *mockable.Clock, log logging.Logger, isBootstrapped bool) fx.Fx { fxVMInt := &fxVMInt{ - registry: linearcodec.NewDefault(), + registry: linearcodec.NewDefault(time.Time{}), clk: clk, log: log, } @@ -400,15 +402,14 @@ func buildGenesisTest(ctx *snow.Context) []byte { } } - genesisValidators := make([]api.PermissionlessValidator, len(preFundedKeys)) - for i, key := range preFundedKeys { - nodeID := ids.NodeID(key.PublicKey().Address()) + genesisValidators := make([]api.GenesisPermissionlessValidator, len(genesisNodeIDs)) + for i, nodeID := range genesisNodeIDs { addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) if err != nil { panic(err) } - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidators[i] = api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), EndTime: json.Uint64(defaultValidateEndTime.Unix()), NodeID: nodeID, @@ -450,33 +451,6 @@ func buildGenesisTest(ctx *snow.Context) []byte { return genesisBytes } -func shutdownEnvironment(t *environment) error { - if t.mockedState != nil { - // state is mocked, nothing to do here - return nil - } - - if t.isBootstrapped.Get() { - validatorIDs := t.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) - - if err := t.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID); err != nil { - return err - } - if err := t.state.Commit(); err != nil { - return err - } - } - - var err error - if t.state != nil { - err = t.state.Close() - } - return utils.Err( - err, - t.baseDB.Close(), - ) -} - func addPendingValidator( env *environment, startTime time.Time, diff --git a/vms/platformvm/block/executor/manager.go b/vms/platformvm/block/executor/manager.go index ea3609349b74..ee29684808f8 100644 --- a/vms/platformvm/block/executor/manager.go +++ b/vms/platformvm/block/executor/manager.go @@ -1,29 +1,49 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( + "errors" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" "github.com/ava-labs/avalanchego/vms/platformvm/validators" ) -var _ Manager = (*manager)(nil) +var ( + _ Manager = (*manager)(nil) + + ErrChainNotSynced = errors.New("chain not synced") +) type Manager interface { state.Versions // Returns the ID of the most recently accepted block. LastAccepted() ids.ID + + SetPreference(blkID ids.ID) (updated bool) + Preferred() ids.ID + GetBlock(blkID ids.ID) (snowman.Block, error) GetStatelessBlock(blkID ids.ID) (block.Block, error) NewBlock(block.Block) snowman.Block + + // VerifyTx verifies that the transaction can be issued based on the currently + // preferred state. This should *not* be used to verify transactions in a block. + VerifyTx(tx *txs.Tx) error + + // VerifyUniqueInputs verifies that the inputs are not duplicated in the + // provided blk or any of its ancestors pinned in memory. + VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error } func NewManager( @@ -33,9 +53,10 @@ func NewManager( txExecutorBackend *executor.Backend, validatorManager validators.Manager, ) Manager { + lastAccepted := s.GetLastAccepted() backend := &backend{ Mempool: mempool, - lastAccepted: s.GetLastAccepted(), + lastAccepted: lastAccepted, state: s, ctx: txExecutorBackend.Ctx, blkIDToState: map[ids.ID]*blockState{}, @@ -57,6 +78,8 @@ func NewManager( backend: backend, addTxsToMempool: !txExecutorBackend.Config.PartialSyncPrimaryNetwork, }, + preferred: lastAccepted, + txExecutorBackend: txExecutorBackend, } } @@ -65,6 +88,9 @@ type manager struct { verifier block.Visitor acceptor block.Visitor rejector block.Visitor + + preferred ids.ID + txExecutorBackend *executor.Backend } func (m *manager) GetBlock(blkID ids.ID) (snowman.Block, error) { @@ -85,3 +111,54 @@ func (m *manager) NewBlock(blk block.Block) snowman.Block { Block: blk, } } + +func (m *manager) SetPreference(blkID ids.ID) bool { + updated := m.preferred != blkID + m.preferred = blkID + return updated +} + +func (m *manager) Preferred() ids.ID { + return m.preferred +} + +func (m *manager) VerifyTx(tx *txs.Tx) error { + if !m.txExecutorBackend.Bootstrapped.Get() { + return ErrChainNotSynced + } + + stateDiff, err := state.NewDiff(m.preferred, m) + if err != nil { + return err + } + + nextBlkTime, _, err := executor.NextBlockTime(stateDiff, m.txExecutorBackend.Clk) + if err != nil { + return err + } + + _, err = executor.AdvanceTimeTo(m.txExecutorBackend, stateDiff, nextBlkTime) + if err != nil { + return err + } + + err = tx.Unsigned.Visit(&executor.CaminoStandardTxExecutor{ + StandardTxExecutor: executor.StandardTxExecutor{ + Backend: m.txExecutorBackend, + State: stateDiff, + Tx: tx, + }, + }) + // We ignore [errFutureStakeTime] here because the time will be advanced + // when this transaction is issued. + // + // TODO: Remove this check post-Durango. + if errors.Is(err, executor.ErrFutureStakeTime) { + return nil + } + return err +} + +func (m *manager) VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error { + return m.backend.verifyUniqueInputs(blkID, inputs) +} diff --git a/vms/platformvm/block/executor/manager_test.go b/vms/platformvm/block/executor/manager_test.go index 8ee784c4f9f1..55cf01d7c8ab 100644 --- a/vms/platformvm/block/executor/manager_test.go +++ b/vms/platformvm/block/executor/manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -72,3 +72,18 @@ func TestManagerLastAccepted(t *testing.T) { require.Equal(t, lastAcceptedID, manager.LastAccepted()) } + +func TestManagerSetPreference(t *testing.T) { + require := require.New(t) + + initialPreference := ids.GenerateTestID() + manager := &manager{ + preferred: initialPreference, + } + require.False(manager.SetPreference(initialPreference)) + + newPreference := ids.GenerateTestID() + require.True(manager.SetPreference(newPreference)) + require.False(manager.SetPreference(newPreference)) + require.True(manager.SetPreference(initialPreference)) +} diff --git a/vms/platformvm/block/executor/mock_manager.go b/vms/platformvm/block/executor/mock_manager.go index 07f163fad635..5e8222383071 100644 --- a/vms/platformvm/block/executor/mock_manager.go +++ b/vms/platformvm/block/executor/mock_manager.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/block/executor (interfaces: Manager) +// Source: vms/platformvm/block/executor/manager.go +// +// Generated by this command: +// +// mockgen -source=vms/platformvm/block/executor/manager.go -destination=vms/platformvm/block/executor/mock_manager.go -package=executor -exclude_interfaces= +// // Package executor is a generated GoMock package. package executor @@ -12,8 +14,10 @@ import ( ids "github.com/ava-labs/avalanchego/ids" snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" + set "github.com/ava-labs/avalanchego/utils/set" block "github.com/ava-labs/avalanchego/vms/platformvm/block" state "github.com/ava-labs/avalanchego/vms/platformvm/state" + txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" gomock "go.uber.org/mock/gomock" ) @@ -41,48 +45,48 @@ func (m *MockManager) EXPECT() *MockManagerMockRecorder { } // GetBlock mocks base method. -func (m *MockManager) GetBlock(arg0 ids.ID) (snowman.Block, error) { +func (m *MockManager) GetBlock(blkID ids.ID) (snowman.Block, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBlock", arg0) + ret := m.ctrl.Call(m, "GetBlock", blkID) ret0, _ := ret[0].(snowman.Block) ret1, _ := ret[1].(error) return ret0, ret1 } // GetBlock indicates an expected call of GetBlock. -func (mr *MockManagerMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetBlock(blkID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockManager)(nil).GetBlock), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockManager)(nil).GetBlock), blkID) } // GetState mocks base method. -func (m *MockManager) GetState(arg0 ids.ID) (state.Chain, bool) { +func (m *MockManager) GetState(blkID ids.ID) (state.Chain, bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetState", arg0) + ret := m.ctrl.Call(m, "GetState", blkID) ret0, _ := ret[0].(state.Chain) ret1, _ := ret[1].(bool) return ret0, ret1 } // GetState indicates an expected call of GetState. -func (mr *MockManagerMockRecorder) GetState(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetState(blkID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockManager)(nil).GetState), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockManager)(nil).GetState), blkID) } // GetStatelessBlock mocks base method. -func (m *MockManager) GetStatelessBlock(arg0 ids.ID) (block.Block, error) { +func (m *MockManager) GetStatelessBlock(blkID ids.ID) (block.Block, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStatelessBlock", arg0) + ret := m.ctrl.Call(m, "GetStatelessBlock", blkID) ret0, _ := ret[0].(block.Block) ret1, _ := ret[1].(error) return ret0, ret1 } // GetStatelessBlock indicates an expected call of GetStatelessBlock. -func (mr *MockManagerMockRecorder) GetStatelessBlock(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetStatelessBlock(blkID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockManager)(nil).GetStatelessBlock), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockManager)(nil).GetStatelessBlock), blkID) } // LastAccepted mocks base method. @@ -108,7 +112,63 @@ func (m *MockManager) NewBlock(arg0 block.Block) snowman.Block { } // NewBlock indicates an expected call of NewBlock. -func (mr *MockManagerMockRecorder) NewBlock(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) NewBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBlock", reflect.TypeOf((*MockManager)(nil).NewBlock), arg0) } + +// Preferred mocks base method. +func (m *MockManager) Preferred() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Preferred") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// Preferred indicates an expected call of Preferred. +func (mr *MockManagerMockRecorder) Preferred() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Preferred", reflect.TypeOf((*MockManager)(nil).Preferred)) +} + +// SetPreference mocks base method. +func (m *MockManager) SetPreference(blkID ids.ID) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetPreference", blkID) + ret0, _ := ret[0].(bool) + return ret0 +} + +// SetPreference indicates an expected call of SetPreference. +func (mr *MockManagerMockRecorder) SetPreference(blkID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreference", reflect.TypeOf((*MockManager)(nil).SetPreference), blkID) +} + +// VerifyTx mocks base method. +func (m *MockManager) VerifyTx(tx *txs.Tx) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyTx", tx) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyTx indicates an expected call of VerifyTx. +func (mr *MockManagerMockRecorder) VerifyTx(tx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyTx", reflect.TypeOf((*MockManager)(nil).VerifyTx), tx) +} + +// VerifyUniqueInputs mocks base method. +func (m *MockManager) VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyUniqueInputs", blkID, inputs) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyUniqueInputs indicates an expected call of VerifyUniqueInputs. +func (mr *MockManagerMockRecorder) VerifyUniqueInputs(blkID, inputs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyUniqueInputs", reflect.TypeOf((*MockManager)(nil).VerifyUniqueInputs), blkID, inputs) +} diff --git a/vms/platformvm/block/executor/options.go b/vms/platformvm/block/executor/options.go index 29a1d02922bd..f349caa661e4 100644 --- a/vms/platformvm/block/executor/options.go +++ b/vms/platformvm/block/executor/options.go @@ -1,22 +1,47 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( + "errors" "fmt" + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" ) -var _ block.Visitor = (*verifier)(nil) +var ( + _ block.Visitor = (*options)(nil) + + errUnexpectedProposalTxType = errors.New("unexpected proposal transaction type") + errFailedFetchingStakerTx = errors.New("failed fetching staker transaction") + errUnexpectedStakerTxType = errors.New("unexpected staker transaction type") + errFailedFetchingPrimaryStaker = errors.New("failed fetching primary staker") + errFailedFetchingSubnetTransformation = errors.New("failed fetching subnet transformation") + errFailedCalculatingUptime = errors.New("failed calculating uptime") +) // options supports build new option blocks type options struct { + // inputs populated before calling this struct's methods: + log logging.Logger + primaryUptimePercentage float64 + uptimes uptime.Calculator + state state.Chain + // outputs populated by this struct's methods: - commitBlock block.Block - abortBlock block.Block + preferredBlock block.Block + alternateBlock block.Block } func (*options) BanffAbortBlock(*block.BanffAbortBlock) error { @@ -32,8 +57,7 @@ func (o *options) BanffProposalBlock(b *block.BanffProposalBlock) error { blkID := b.ID() nextHeight := b.Height() + 1 - var err error - o.commitBlock, err = block.NewBanffCommitBlock(timestamp, blkID, nextHeight) + commitBlock, err := block.NewBanffCommitBlock(timestamp, blkID, nextHeight) if err != nil { return fmt.Errorf( "failed to create commit block: %w", @@ -41,13 +65,35 @@ func (o *options) BanffProposalBlock(b *block.BanffProposalBlock) error { ) } - o.abortBlock, err = block.NewBanffAbortBlock(timestamp, blkID, nextHeight) + abortBlock, err := block.NewBanffAbortBlock(timestamp, blkID, nextHeight) if err != nil { return fmt.Errorf( "failed to create abort block: %w", err, ) } + + prefersCommit, err := o.prefersCommit(b.Tx) + if err != nil { + o.log.Debug("falling back to prefer commit", + zap.Error(err), + ) + // We fall back to commit here to err on the side of over-rewarding + // rather than under-rewarding. + // + // Invariant: We must not return the error here, because the error would + // be treated as fatal. Errors can occur here due to a malicious block + // proposer or even in unusual virtuous cases. + prefersCommit = true + } + + if prefersCommit { + o.preferredBlock = commitBlock + o.alternateBlock = abortBlock + } else { + o.preferredBlock = abortBlock + o.alternateBlock = commitBlock + } return nil } @@ -68,7 +114,7 @@ func (o *options) ApricotProposalBlock(b *block.ApricotProposalBlock) error { nextHeight := b.Height() + 1 var err error - o.commitBlock, err = block.NewApricotCommitBlock(blkID, nextHeight) + o.preferredBlock, err = block.NewApricotCommitBlock(blkID, nextHeight) if err != nil { return fmt.Errorf( "failed to create commit block: %w", @@ -76,7 +122,7 @@ func (o *options) ApricotProposalBlock(b *block.ApricotProposalBlock) error { ) } - o.abortBlock, err = block.NewApricotAbortBlock(blkID, nextHeight) + o.alternateBlock, err = block.NewApricotAbortBlock(blkID, nextHeight) if err != nil { return fmt.Errorf( "failed to create abort block: %w", @@ -93,3 +139,58 @@ func (*options) ApricotStandardBlock(*block.ApricotStandardBlock) error { func (*options) ApricotAtomicBlock(*block.ApricotAtomicBlock) error { return snowman.ErrNotOracle } + +func (o *options) prefersCommit(tx *txs.Tx) (bool, error) { + var unsignedTx *txs.RewardValidatorTx + switch utx := tx.Unsigned.(type) { + case *txs.RewardValidatorTx: + unsignedTx = utx + case *txs.CaminoRewardValidatorTx: + // CaminoRewardValidatorTx doesn't have any difference + // between commmit and abort states, so we always prefer commit. + return true, nil + default: + return false, fmt.Errorf("%w: %T", errUnexpectedProposalTxType, tx.Unsigned) + } + + stakerTx, _, err := o.state.GetTx(unsignedTx.TxID) + if err != nil { + return false, fmt.Errorf("%w: %w", errFailedFetchingStakerTx, err) + } + + staker, ok := stakerTx.Unsigned.(txs.Staker) + if !ok { + return false, fmt.Errorf("%w: %T", errUnexpectedStakerTxType, stakerTx.Unsigned) + } + + nodeID := staker.NodeID() + primaryNetworkValidator, err := o.state.GetCurrentValidator( + constants.PrimaryNetworkID, + nodeID, + ) + if err != nil { + return false, fmt.Errorf("%w: %w", errFailedFetchingPrimaryStaker, err) + } + + expectedUptimePercentage := o.primaryUptimePercentage + if subnetID := staker.SubnetID(); subnetID != constants.PrimaryNetworkID { + transformSubnet, err := executor.GetTransformSubnetTx(o.state, subnetID) + if err != nil { + return false, fmt.Errorf("%w: %w", errFailedFetchingSubnetTransformation, err) + } + + expectedUptimePercentage = float64(transformSubnet.UptimeRequirement) / reward.PercentDenominator + } + + // TODO: calculate subnet uptimes + uptime, err := o.uptimes.CalculateUptimePercentFrom( + nodeID, + constants.PrimaryNetworkID, + primaryNetworkValidator.StartTime, + ) + if err != nil { + return false, fmt.Errorf("%w: %w", errFailedCalculatingUptime, err) + } + + return uptime >= expectedUptimePercentage, nil +} diff --git a/vms/platformvm/block/executor/options_test.go b/vms/platformvm/block/executor/options_test.go index bf8e6e3e67b6..54bef77919b7 100644 --- a/vms/platformvm/block/executor/options_test.go +++ b/vms/platformvm/block/executor/options_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor diff --git a/vms/platformvm/block/executor/proposal_block_test.go b/vms/platformvm/block/executor/proposal_block_test.go index 76b1c11c4fe2..badb0404d3e9 100644 --- a/vms/platformvm/block/executor/proposal_block_test.go +++ b/vms/platformvm/block/executor/proposal_block_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -28,6 +28,7 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/reward" @@ -43,9 +44,6 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { ctrl := gomock.NewController(t) env := newEnvironment(t, ctrl) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() // create apricotParentBlk. It's a standard one for simplicity parentHeight := uint64(2022) @@ -110,14 +108,6 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { }).Times(2) currentStakersIt.EXPECT().Release() onParentAccept.EXPECT().GetCurrentStakerIterator().Return(currentStakersIt, nil) - onParentAccept.EXPECT().GetCurrentValidator(utx.SubnetID(), utx.NodeID()).Return(&state.Staker{ - TxID: addValTx.ID(), - NodeID: utx.NodeID(), - SubnetID: utx.SubnetID(), - StartTime: utx.StartTime(), - NextTime: chainTime, - EndTime: chainTime, - }, nil) onParentAccept.EXPECT().GetTx(addValTx.ID()).Return(addValTx, status.Committed, nil) onParentAccept.EXPECT().GetCurrentSupply(constants.PrimaryNetworkID).Return(uint64(1000), nil).AnyTimes() onParentAccept.EXPECT().GetDelegateeReward(constants.PrimaryNetworkID, utx.NodeID()).Return(uint64(0), nil).AnyTimes() @@ -158,11 +148,9 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { ctrl := gomock.NewController(t) env := newEnvironment(t, ctrl) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.clk.Set(defaultGenesisTime) - env.config.BanffTime = time.Time{} // activate Banff + env.config.BanffTime = time.Time{} // activate Banff + env.config.DurangoTime = mockable.MaxTime // deactivate Durango // create parentBlock. It's a standard one for simplicity parentTime := defaultGenesisTime @@ -199,7 +187,6 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { } return nil, database.ErrNotFound }).AnyTimes() - onParentAccept.EXPECT().Config().Return(env.config, nil).AnyTimes() // setup state to validate proposal block transaction nextStakerTime := chainTime.Add(executor.SyncBound).Add(-1 * time.Second) @@ -223,13 +210,6 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(nextStakerTx.Initialize(txs.Codec)) nextStakerTxID := nextStakerTx.ID() - onParentAccept.EXPECT().GetCurrentValidator(unsignedNextStakerTx.SubnetID(), unsignedNextStakerTx.NodeID()).Return(&state.Staker{ - TxID: nextStakerTxID, - NodeID: unsignedNextStakerTx.NodeID(), - SubnetID: unsignedNextStakerTx.SubnetID(), - StartTime: unsignedNextStakerTx.StartTime(), - EndTime: chainTime, - }, nil) onParentAccept.EXPECT().GetTx(nextStakerTxID).Return(nextStakerTx, status.Processing, nil) currentStakersIt := state.NewMockStakerIterator(ctrl) @@ -280,6 +260,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { parentID, banffParentBlk.Height(), blkTx, + []*txs.Tx{}, ) require.NoError(err) @@ -309,6 +290,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { parentID, banffParentBlk.Height()+1, blkTx, + []*txs.Tx{}, ) require.NoError(err) @@ -326,6 +308,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { parentID, banffParentBlk.Height()+1, blkTx, + []*txs.Tx{}, ) require.NoError(err) @@ -343,6 +326,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { parentID, banffParentBlk.Height()+1, blkTx, + []*txs.Tx{}, ) require.NoError(err) @@ -364,6 +348,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { parentID, banffParentBlk.Height()+1, invalidTx, + []*txs.Tx{}, ) require.NoError(err) @@ -379,6 +364,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { parentID, banffParentBlk.Height()+1, blkTx, + []*txs.Tx{}, ) require.NoError(err) @@ -395,6 +381,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { parentID, banffParentBlk.Height()+1, blkTx, + []*txs.Tx{}, ) require.NoError(err) @@ -414,57 +401,52 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { // Staker5: |--------------------| // Staker0 it's here just to allow to issue a proposal block with the chosen endTime. - staker0RewardAddress := ids.ShortID{2} + + // In this test multiple stakers may join and leave the staker set at the same time. + // The order in which they do it is asserted; the order may depend on the staker.TxID, + // which in turns depend on every feature of the transaction creating the staker. + // So in this test we avoid ids.GenerateTestNodeID, in favour of ids.BuildTestNodeID + // so that TxID does not depend on the order we run tests. staker0 := staker{ - nodeID: ids.NodeID(staker0RewardAddress), - rewardAddress: staker0RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf0}), + rewardAddress: ids.ShortID{0xf0}, startTime: defaultGenesisTime, endTime: time.Time{}, // actual endTime depends on specific test } - staker1RewardAddress := ids.GenerateTestShortID() staker1 := staker{ - nodeID: ids.NodeID(staker1RewardAddress), - rewardAddress: staker1RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf1}), + rewardAddress: ids.ShortID{0xf1}, startTime: defaultGenesisTime.Add(1 * time.Minute), endTime: defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute), } - - staker2RewardAddress := ids.ShortID{1} staker2 := staker{ - nodeID: ids.NodeID(staker2RewardAddress), - rewardAddress: staker2RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf2}), + rewardAddress: ids.ShortID{0xf2}, startTime: staker1.startTime.Add(1 * time.Minute), endTime: staker1.startTime.Add(1 * time.Minute).Add(defaultMinStakingDuration), } - - staker3RewardAddress := ids.GenerateTestShortID() staker3 := staker{ - nodeID: ids.NodeID(staker3RewardAddress), - rewardAddress: staker3RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf3}), + rewardAddress: ids.ShortID{0xf3}, startTime: staker2.startTime.Add(1 * time.Minute), endTime: staker2.endTime.Add(1 * time.Minute), } - staker3Sub := staker{ - nodeID: staker3.nodeID, - rewardAddress: staker3.rewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf3}), + rewardAddress: ids.ShortID{0xff}, startTime: staker3.startTime.Add(1 * time.Minute), endTime: staker3.endTime.Add(-1 * time.Minute), } - - staker4RewardAddress := ids.GenerateTestShortID() staker4 := staker{ - nodeID: ids.NodeID(staker4RewardAddress), - rewardAddress: staker4RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf4}), + rewardAddress: ids.ShortID{0xf4}, startTime: staker3.startTime, endTime: staker3.endTime, } - - staker5RewardAddress := ids.GenerateTestShortID() staker5 := staker{ - nodeID: ids.NodeID(staker5RewardAddress), - rewardAddress: staker5RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf5}), + rewardAddress: ids.ShortID{0xf5}, startTime: staker2.endTime, endTime: staker2.endTime.Add(defaultMinStakingDuration), } @@ -563,15 +545,19 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { }, }, { - description: "advance time to staker5 end", + description: "advance time to staker5 start", stakers: []staker{staker1, staker2, staker3, staker4, staker5}, advanceTimeTo: []time.Time{staker1.startTime, staker2.startTime, staker3.startTime, staker5.startTime}, expectedStakers: map[ids.NodeID]stakerStatus{ staker1.nodeID: current, - // given its txID, staker2 will be - // rewarded and moved out of current stakers set - // staker2.nodeID: current, + // Staker2's end time matches staker5's start time, so typically + // the block builder would produce a ProposalBlock to remove + // staker2 when advancing the time. However, this test injects + // staker0 into the staker set artificially to advance the time. + // This means that staker2 is not removed by the ProposalBlock + // when advancing the time. + staker2.nodeID: current, staker3.nodeID: current, staker4.nodeID: current, staker5.nodeID: current, @@ -583,10 +569,6 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { t.Run(test.description, func(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - env.config.BanffTime = time.Time{} // activate Banff subnetID := testSubnet1.ID() @@ -658,9 +640,11 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { require.NoError(err) // store Staker0 to state + addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) staker0, err := state.NewCurrentStaker( addStaker0.ID(), - addStaker0.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -686,6 +670,7 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) @@ -734,17 +719,13 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff subnetID := testSubnet1.ID() env.config.TrackedSubnets.Add(subnetID) // Add a subnet validator to the staker set - subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) - // Starts after the corre + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultValidateStartTime subnetVdr1EndTime := defaultValidateStartTime.Add(defaultMinStakingDuration) tx, err := env.txBuilder.NewAddSubnetValidatorTx( @@ -758,9 +739,11 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { ) require.NoError(err) + addSubnetValTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddSubnetValidatorTx), + addSubnetValTx, + addSubnetValTx.StartTime(), 0, ) require.NoError(err) @@ -772,7 +755,7 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { // The above validator is now part of the staking set // Queue a staker that joins the staker set after the above validator leaves - subnetVdr2NodeID := ids.NodeID(preFundedKeys[1].PublicKey().Address()) + subnetVdr2NodeID := genesisNodeIDs[1] tx, err = env.txBuilder.NewAddSubnetValidatorTx( 1, // Weight uint64(subnetVdr1EndTime.Add(time.Second).Unix()), // Start time @@ -816,9 +799,11 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { require.NoError(err) // store Staker0 to state + addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) staker, err = state.NewCurrentStaker( addStaker0.ID(), - addStaker0.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -844,6 +829,7 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) @@ -873,9 +859,6 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { t.Run(fmt.Sprintf("tracked %t", tracked), func(ts *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff subnetID := testSubnet1.ID() @@ -884,8 +867,7 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { } // Add a subnet validator to the staker set - subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) - + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultGenesisTime.Add(1 * time.Minute) subnetVdr1EndTime := defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) tx, err := env.txBuilder.NewAddSubnetValidatorTx( @@ -929,9 +911,11 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { require.NoError(err) // store Staker0 to state + addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) staker, err = state.NewCurrentStaker( addStaker0.ID(), - addStaker0.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -957,6 +941,7 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) @@ -969,7 +954,7 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { require.NoError(propBlk.Accept(context.Background())) require.NoError(commitBlk.Accept(context.Background())) _, ok := env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) - require.Equal(tracked, ok) + require.True(ok) }) } } @@ -977,9 +962,6 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff // Case: Timestamp is after next validator start time @@ -1015,9 +997,11 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { require.NoError(err) // store Staker0 to state + addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( addStaker0.ID(), - addStaker0.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -1043,6 +1027,7 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) @@ -1106,9 +1091,11 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { require.NoError(err) // store Staker0 to state + addValTx = addStaker0.Unsigned.(*txs.AddValidatorTx) staker, err = state.NewCurrentStaker( addStaker0.ID(), - addStaker0.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -1134,6 +1121,7 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) @@ -1156,9 +1144,6 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { func TestBanffProposalBlockDelegatorStakers(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff // Case: Timestamp is after next validator start time @@ -1167,7 +1152,7 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { pendingValidatorEndTime := pendingValidatorStartTime.Add(defaultMinStakingDuration) nodeIDKey, _ := secp256k1.NewPrivateKey() rewardAddress := nodeIDKey.PublicKey().Address() - nodeID := ids.NodeID(rewardAddress) + nodeID := ids.BuildTestNodeID(rewardAddress[:]) _, err := addPendingValidator( env, @@ -1196,9 +1181,11 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { require.NoError(err) // store Staker0 to state + addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( addStaker0.ID(), - addStaker0.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -1224,6 +1211,7 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) @@ -1286,9 +1274,11 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { require.NoError(err) // store Staker0 to state + addValTx = addStaker0.Unsigned.(*txs.AddValidatorTx) staker, err = state.NewCurrentStaker( addStaker0.ID(), - addStaker0.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -1314,6 +1304,7 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) propBlk = env.blkManager.NewBlock(statelessProposalBlock) @@ -1331,3 +1322,142 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { vdrWeight = env.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) require.Equal(env.config.MinDelegatorStake+env.config.MinValidatorStake, vdrWeight) } + +func TestAddValidatorProposalBlock(t *testing.T) { + require := require.New(t) + env := newEnvironment(t, nil) + env.config.BanffTime = time.Time{} // activate Banff + env.config.DurangoTime = time.Time{} // activate Durango + + now := env.clk.Time() + + // Create validator tx + var ( + validatorStartTime = now.Add(2 * executor.SyncBound) + validatorEndTime = validatorStartTime.Add(env.config.MinStakeDuration) + nodeID = ids.GenerateTestNodeID() + ) + + addValidatorTx, err := env.txBuilder.NewAddValidatorTx( + env.config.MinValidatorStake, + uint64(validatorStartTime.Unix()), + uint64(validatorEndTime.Unix()), + nodeID, + preFundedKeys[0].PublicKey().Address(), + 10000, + []*secp256k1.PrivateKey{ + preFundedKeys[0], + preFundedKeys[1], + preFundedKeys[4], + }, + ids.ShortEmpty, + ) + require.NoError(err) + + // Add validator through a [StandardBlock] + preferredID := env.blkManager.Preferred() + preferred, err := env.blkManager.GetStatelessBlock(preferredID) + require.NoError(err) + + statelessBlk, err := block.NewBanffStandardBlock( + now.Add(executor.SyncBound), + preferredID, + preferred.Height()+1, + []*txs.Tx{addValidatorTx}, + ) + require.NoError(err) + blk := env.blkManager.NewBlock(statelessBlk) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) + require.True(env.blkManager.SetPreference(statelessBlk.ID())) + + // Should be current + staker, err := env.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.NoError(err) + require.NotNil(staker) + + // Advance time until next staker change time is [validatorEndTime] + for { + nextStakerChangeTime, err := executor.GetNextStakerChangeTime(env.state) + require.NoError(err) + if nextStakerChangeTime.Equal(validatorEndTime) { + break + } + + preferredID = env.blkManager.Preferred() + preferred, err = env.blkManager.GetStatelessBlock(preferredID) + require.NoError(err) + + statelessBlk, err = block.NewBanffStandardBlock( + nextStakerChangeTime, + preferredID, + preferred.Height()+1, + nil, + ) + require.NoError(err) + blk = env.blkManager.NewBlock(statelessBlk) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) + require.True(env.blkManager.SetPreference(statelessBlk.ID())) + } + + env.clk.Set(validatorEndTime) + now = env.clk.Time() + + // Create another validator tx + validatorStartTime = now.Add(2 * executor.SyncBound) + validatorEndTime = validatorStartTime.Add(env.config.MinStakeDuration) + nodeID = ids.GenerateTestNodeID() + + addValidatorTx2, err := env.txBuilder.NewAddValidatorTx( + env.config.MinValidatorStake, + uint64(validatorStartTime.Unix()), + uint64(validatorEndTime.Unix()), + nodeID, + preFundedKeys[0].PublicKey().Address(), + 10000, + []*secp256k1.PrivateKey{ + preFundedKeys[0], + preFundedKeys[1], + preFundedKeys[4], + }, + ids.ShortEmpty, + ) + require.NoError(err) + + // Add validator through a [ProposalBlock] and reward the last one + preferredID = env.blkManager.Preferred() + preferred, err = env.blkManager.GetStatelessBlock(preferredID) + require.NoError(err) + + rewardValidatorTx, err := env.txBuilder.NewRewardValidatorTx(addValidatorTx.ID()) + require.NoError(err) + + statelessProposalBlk, err := block.NewBanffProposalBlock( + now, + preferredID, + preferred.Height()+1, + rewardValidatorTx, + []*txs.Tx{addValidatorTx2}, + ) + require.NoError(err) + blk = env.blkManager.NewBlock(statelessProposalBlk) + require.NoError(blk.Verify(context.Background())) + + options, err := blk.(snowman.OracleBlock).Options(context.Background()) + require.NoError(err) + commitBlk := options[0] + require.NoError(commitBlk.Verify(context.Background())) + + require.NoError(blk.Accept(context.Background())) + require.NoError(commitBlk.Accept(context.Background())) + + // Should be current + staker, err = env.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.NoError(err) + require.NotNil(staker) + + rewardUTXOs, err := env.state.GetRewardUTXOs(addValidatorTx.ID()) + require.NoError(err) + require.NotEmpty(rewardUTXOs) +} diff --git a/vms/platformvm/block/executor/rejector.go b/vms/platformvm/block/executor/rejector.go index daa6939f05cd..b5dde1f6e84c 100644 --- a/vms/platformvm/block/executor/rejector.go +++ b/vms/platformvm/block/executor/rejector.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -82,5 +82,7 @@ func (r *rejector) rejectBlock(b block.Block, blockType string) error { } } + r.Mempool.RequestBuildBlock(false /*=emptyBlockPermitted*/) + return nil } diff --git a/vms/platformvm/block/executor/rejector_test.go b/vms/platformvm/block/executor/rejector_test.go index 1e1e5768618d..4391ed3d494c 100644 --- a/vms/platformvm/block/executor/rejector_test.go +++ b/vms/platformvm/block/executor/rejector_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -44,6 +44,7 @@ func TestRejectBlock(t *testing.T) { }, Creds: []verify.Verifiable{}, }, + []*txs.Tx{}, ) }, rejectFunc: func(r *rejector, b block.Block) error { @@ -142,6 +143,8 @@ func TestRejectBlock(t *testing.T) { mempool.EXPECT().Add(tx).Return(nil).Times(1) } + mempool.EXPECT().RequestBuildBlock(false).Times(1) + require.NoError(tt.rejectFunc(rejector, blk)) // Make sure block and its parent are removed from the state map. require.NotContains(rejector.blkIDToState, blk.ID()) diff --git a/vms/platformvm/block/executor/standard_block_test.go b/vms/platformvm/block/executor/standard_block_test.go index baa3e2c8077b..4e0d3621585b 100644 --- a/vms/platformvm/block/executor/standard_block_test.go +++ b/vms/platformvm/block/executor/standard_block_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -41,9 +41,6 @@ func TestApricotStandardBlockTimeVerification(t *testing.T) { ctrl := gomock.NewController(t) env := newEnvironment(t, ctrl) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() // setup and store parent block // it's a standard block for simplicity @@ -97,9 +94,6 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ctrl := gomock.NewController(t) env := newEnvironment(t, ctrl) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() now := env.clk.Time() env.clk.Set(now) env.config.BanffTime = time.Time{} // activate Banff @@ -129,7 +123,6 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { env.blkManager.(*manager).lastAccepted = parentID env.mockedState.EXPECT().GetLastAccepted().Return(parentID).AnyTimes() env.mockedState.EXPECT().GetTimestamp().Return(chainTime).AnyTimes() - onParentAccept.EXPECT().Config().Return(env.config, nil).AnyTimes() nextStakerTime := chainTime.Add(executor.SyncBound).Add(-1 * time.Second) @@ -319,9 +312,6 @@ func TestBanffStandardBlockUpdatePrimaryNetworkStakers(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff // Case: Timestamp is after next validator start time @@ -384,39 +374,45 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { // Staker3sub: |----------------| // Staker4: |------------------------| // Staker5: |--------------------| + + // In this test multiple stakers may join and leave the staker set at the same time. + // The order in which they do it is asserted; the order may depend on the staker.TxID, + // which in turns depend on every feature of the transaction creating the staker. + // So in this test we avoid ids.GenerateTestNodeID, in favour of ids.BuildTestNodeID + // so that TxID does not depend on the order we run tests. staker1 := staker{ - nodeID: ids.GenerateTestNodeID(), - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf1}), + rewardAddress: ids.ShortID{0xf1}, startTime: defaultGenesisTime.Add(1 * time.Minute), endTime: defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute), } staker2 := staker{ - nodeID: ids.GenerateTestNodeID(), - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf2}), + rewardAddress: ids.ShortID{0xf2}, startTime: staker1.startTime.Add(1 * time.Minute), endTime: staker1.startTime.Add(1 * time.Minute).Add(defaultMinStakingDuration), } staker3 := staker{ - nodeID: ids.GenerateTestNodeID(), - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf3}), + rewardAddress: ids.ShortID{0xf3}, startTime: staker2.startTime.Add(1 * time.Minute), endTime: staker2.endTime.Add(1 * time.Minute), } staker3Sub := staker{ - nodeID: staker3.nodeID, - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf3}), + rewardAddress: ids.ShortID{0xff}, startTime: staker3.startTime.Add(1 * time.Minute), endTime: staker3.endTime.Add(-1 * time.Minute), } staker4 := staker{ - nodeID: ids.GenerateTestNodeID(), - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf4}), + rewardAddress: ids.ShortID{0xf4}, startTime: staker3.startTime, endTime: staker3.endTime, } staker5 := staker{ - nodeID: ids.GenerateTestNodeID(), - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf5}), + rewardAddress: ids.ShortID{0xf5}, startTime: staker2.endTime, endTime: staker2.endTime.Add(defaultMinStakingDuration), } @@ -495,11 +491,17 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { }, }, { - description: "advance time to staker5 end", + description: "advance time to staker5 start", stakers: []staker{staker1, staker2, staker3, staker4, staker5}, advanceTimeTo: []time.Time{staker1.startTime, staker2.startTime, staker3.startTime, staker5.startTime}, expectedStakers: map[ids.NodeID]stakerStatus{ staker1.nodeID: current, + + // Staker2's end time matches staker5's start time, so typically + // the block builder would produce a ProposalBlock to remove + // staker2 when advancing the time. However, it is valid to only + // advance the time with a StandardBlock and not remove staker2, + // which is what this test does. staker2.nodeID: current, staker3.nodeID: current, staker4.nodeID: current, @@ -512,9 +514,6 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { t.Run(test.description, func(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff subnetID := testSubnet1.ID() @@ -614,17 +613,13 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff subnetID := testSubnet1.ID() env.config.TrackedSubnets.Add(subnetID) // Add a subnet validator to the staker set - subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) - // Starts after the corre + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultValidateStartTime subnetVdr1EndTime := defaultValidateStartTime.Add(defaultMinStakingDuration) tx, err := env.txBuilder.NewAddSubnetValidatorTx( @@ -638,9 +633,11 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { ) require.NoError(err) + addSubnetValTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddSubnetValidatorTx), + addSubnetValTx, + addSubnetValTx.StartTime(), 0, ) require.NoError(err) @@ -652,7 +649,7 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { // The above validator is now part of the staking set // Queue a staker that joins the staker set after the above validator leaves - subnetVdr2NodeID := ids.NodeID(preFundedKeys[1].PublicKey().Address()) + subnetVdr2NodeID := genesisNodeIDs[1] tx, err = env.txBuilder.NewAddSubnetValidatorTx( 1, // Weight uint64(subnetVdr1EndTime.Add(time.Second).Unix()), // Start time @@ -712,9 +709,6 @@ func TestBanffStandardBlockTrackedSubnet(t *testing.T) { t.Run(fmt.Sprintf("tracked %t", tracked), func(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff subnetID := testSubnet1.ID() @@ -723,8 +717,7 @@ func TestBanffStandardBlockTrackedSubnet(t *testing.T) { } // Add a subnet validator to the staker set - subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) - + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultGenesisTime.Add(1 * time.Minute) subnetVdr1EndTime := defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) tx, err := env.txBuilder.NewAddSubnetValidatorTx( @@ -768,7 +761,7 @@ func TestBanffStandardBlockTrackedSubnet(t *testing.T) { require.NoError(block.Verify(context.Background())) require.NoError(block.Accept(context.Background())) _, ok := env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) - require.Equal(tracked, ok) + require.True(ok) }) } } @@ -776,9 +769,6 @@ func TestBanffStandardBlockTrackedSubnet(t *testing.T) { func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff // Case: Timestamp is after next validator start time diff --git a/vms/platformvm/block/executor/verifier.go b/vms/platformvm/block/executor/verifier.go index 9a2b542374a1..ca030e54fad5 100644 --- a/vms/platformvm/block/executor/verifier.go +++ b/vms/platformvm/block/executor/verifier.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -30,13 +30,13 @@ import ( var ( _ block.Visitor = (*verifier)(nil) + ErrConflictingBlockTxs = errors.New("block contains conflicting transactions") + errApricotBlockIssuedAfterFork = errors.New("apricot block issued after fork") errBanffProposalBlockWithMultipleTransactions = errors.New("BanffProposalBlock contains multiple transactions") errBanffStandardBlockWithoutChanges = errors.New("BanffStandardBlock performs no state changes") errIncorrectBlockHeight = errors.New("incorrect block height") errChildBlockEarlierThanParent = errors.New("proposed timestamp before current chain time") - errConflictingBatchTxs = errors.New("block contains conflicting transactions") - errConflictingParentTxs = errors.New("block contains a transaction that conflicts with a transaction in a parent block") errOptionBlockTimestampNotMatchingParent = errors.New("option block proposed timestamp not matching parent block one") ) @@ -61,7 +61,8 @@ func (v *verifier) BanffCommitBlock(b *block.BanffCommitBlock) error { } func (v *verifier) BanffProposalBlock(b *block.BanffProposalBlock) error { - if len(b.Transactions) != 0 { + nextChainTime := b.Timestamp() + if !v.txExecutorBackend.Config.IsDurangoActivated(nextChainTime) && len(b.Transactions) != 0 { return errBanffProposalBlockWithMultipleTransactions } @@ -70,33 +71,40 @@ func (v *verifier) BanffProposalBlock(b *block.BanffProposalBlock) error { } parentID := b.Parent() - onCommitState, err := state.NewDiff(parentID, v.backend) + onDecisionState, err := state.NewDiff(parentID, v.backend) if err != nil { return err } - onAbortState, err := state.NewDiff(parentID, v.backend) - if err != nil { + + // Advance the time to [nextChainTime]. + if _, err := executor.AdvanceTimeTo(v.txExecutorBackend, onDecisionState, nextChainTime); err != nil { return err } - // Apply the changes, if any, from advancing the chain time. - nextChainTime := b.Timestamp() - changes, err := executor.AdvanceTimeTo( - v.txExecutorBackend, - onCommitState, - nextChainTime, - ) + inputs, atomicRequests, onAcceptFunc, err := v.processStandardTxs(b.Transactions, onDecisionState, b.Parent()) if err != nil { return err } - onCommitState.SetTimestamp(nextChainTime) - changes.Apply(onCommitState) + onCommitState, err := state.NewDiffOn(onDecisionState) + if err != nil { + return err + } - onAbortState.SetTimestamp(nextChainTime) - changes.Apply(onAbortState) + onAbortState, err := state.NewDiffOn(onDecisionState) + if err != nil { + return err + } - return v.proposalBlock(&b.ApricotProposalBlock, onCommitState, onAbortState) + return v.proposalBlock( + &b.ApricotProposalBlock, + onDecisionState, + onCommitState, + onAbortState, + inputs, + atomicRequests, + onAcceptFunc, + ) } func (v *verifier) BanffStandardBlock(b *block.BanffStandardBlock) error { @@ -110,12 +118,11 @@ func (v *verifier) BanffStandardBlock(b *block.BanffStandardBlock) error { return err } - // Apply the changes, if any, from advancing the chain time. - nextChainTime := b.Timestamp() - changes, err := executor.AdvanceTimeTo( + // Advance the time to [b.Timestamp()]. + changed, err := executor.AdvanceTimeTo( v.txExecutorBackend, onAcceptState, - nextChainTime, + b.Timestamp(), ) if err != nil { return err @@ -123,13 +130,10 @@ func (v *verifier) BanffStandardBlock(b *block.BanffStandardBlock) error { // If this block doesn't perform any changes, then it should never have been // issued. - if changes.Len() == 0 && len(b.Transactions) == 0 { + if !changed && len(b.Transactions) == 0 { return errBanffStandardBlockWithoutChanges } - onAcceptState.SetTimestamp(nextChainTime) - changes.Apply(onAcceptState) - return v.standardBlock(&b.ApricotStandardBlock, onAcceptState) } @@ -162,7 +166,7 @@ func (v *verifier) ApricotProposalBlock(b *block.ApricotProposalBlock) error { return err } - return v.proposalBlock(b, onCommitState, onAbortState) + return v.proposalBlock(b, nil, onCommitState, onAbortState, nil, nil, nil) } func (v *verifier) ApricotStandardBlock(b *block.ApricotStandardBlock) error { @@ -213,22 +217,22 @@ func (v *verifier) ApricotAtomicBlock(b *block.ApricotAtomicBlock) error { atomicExecutor.OnAccept.AddTx(b.Tx, status.Committed) - if err := v.verifyUniqueInputs(b, atomicExecutor.Inputs); err != nil { + if err := v.verifyUniqueInputs(parentID, atomicExecutor.Inputs); err != nil { return err } + v.Mempool.Remove(b.Tx) + blkID := b.ID() v.blkIDToState[blkID] = &blockState{ - standardBlockState: standardBlockState{ - inputs: atomicExecutor.Inputs, - }, statelessBlock: b, - onAcceptState: atomicExecutor.OnAccept, + + onAcceptState: atomicExecutor.OnAccept, + + inputs: atomicExecutor.Inputs, timestamp: atomicExecutor.OnAccept.GetTimestamp(), atomicRequests: atomicExecutor.AtomicRequests, } - - v.Mempool.Remove([]*txs.Tx{b.Tx}) return nil } @@ -330,7 +334,7 @@ func (v *verifier) commonBlock(b block.Block) error { // abortBlock populates the state of this block if [nil] is returned func (v *verifier) abortBlock(b block.Block) error { parentID := b.Parent() - onAcceptState, ok := v.getOnAbortState(parentID) + onAbortState, ok := v.getOnAbortState(parentID) if !ok { return fmt.Errorf("%w: %s", state.ErrMissingParentState, parentID) } @@ -338,8 +342,8 @@ func (v *verifier) abortBlock(b block.Block) error { blkID := b.ID() v.blkIDToState[blkID] = &blockState{ statelessBlock: b, - onAcceptState: onAcceptState, - timestamp: onAcceptState.GetTimestamp(), + onAcceptState: onAbortState, + timestamp: onAbortState.GetTimestamp(), } return nil } @@ -347,7 +351,7 @@ func (v *verifier) abortBlock(b block.Block) error { // commitBlock populates the state of this block if [nil] is returned func (v *verifier) commitBlock(b block.Block) error { parentID := b.Parent() - onAcceptState, ok := v.getOnCommitState(parentID) + onCommitState, ok := v.getOnCommitState(parentID) if !ok { return fmt.Errorf("%w: %s", state.ErrMissingParentState, parentID) } @@ -355,8 +359,8 @@ func (v *verifier) commitBlock(b block.Block) error { blkID := b.ID() v.blkIDToState[blkID] = &blockState{ statelessBlock: b, - onAcceptState: onAcceptState, - timestamp: onAcceptState.GetTimestamp(), + onAcceptState: onCommitState, + timestamp: onCommitState.GetTimestamp(), } return nil } @@ -364,8 +368,12 @@ func (v *verifier) commitBlock(b block.Block) error { // proposalBlock populates the state of this block if [nil] is returned func (v *verifier) proposalBlock( b *block.ApricotProposalBlock, + onDecisionState state.Diff, onCommitState state.Diff, onAbortState state.Diff, + inputs set.Set[ids.ID], + atomicRequests map[ids.ID]*atomic.Requests, + onAcceptFunc func(), ) error { txExecutor := executor.CaminoProposalTxExecutor{ ProposalTxExecutor: executor.ProposalTxExecutor{ @@ -385,21 +393,27 @@ func (v *verifier) proposalBlock( onCommitState.AddTx(b.Tx, status.Committed) onAbortState.AddTx(b.Tx, status.Aborted) + v.Mempool.Remove(b.Tx) + blkID := b.ID() v.blkIDToState[blkID] = &blockState{ proposalBlockState: proposalBlockState{ - onCommitState: onCommitState, - onAbortState: onAbortState, - initiallyPreferCommit: txExecutor.PrefersCommit, + onDecisionState: onDecisionState, + onCommitState: onCommitState, + onAbortState: onAbortState, }, + statelessBlock: b, + + onAcceptFunc: onAcceptFunc, + + inputs: inputs, // It is safe to use [b.onAbortState] here because the timestamp will // never be modified by an Apricot Abort block and the timestamp will // always be the same as the Banff Proposal Block. - timestamp: onAbortState.GetTimestamp(), + timestamp: onAbortState.GetTimestamp(), + atomicRequests: atomicRequests, } - - v.Mempool.Remove([]*txs.Tx{b.Tx}) return nil } @@ -408,45 +422,69 @@ func (v *verifier) standardBlock( b *block.ApricotStandardBlock, onAcceptState state.Diff, ) error { - blkState := &blockState{ + inputs, atomicRequests, onAcceptFunc, err := v.processStandardTxs(b.Transactions, onAcceptState, b.Parent()) + if err != nil { + return err + } + + v.Mempool.Remove(b.Transactions...) + + blkID := b.ID() + v.blkIDToState[blkID] = &blockState{ statelessBlock: b, - onAcceptState: onAcceptState, + + onAcceptState: onAcceptState, + onAcceptFunc: onAcceptFunc, + timestamp: onAcceptState.GetTimestamp(), - atomicRequests: make(map[ids.ID]*atomic.Requests), + inputs: inputs, + atomicRequests: atomicRequests, } + return nil +} - // Finally we process the transactions - funcs := make([]func(), 0, len(b.Transactions)) - for _, tx := range b.Transactions { +func (v *verifier) processStandardTxs(txs []*txs.Tx, state state.Diff, parentID ids.ID) ( + set.Set[ids.ID], + map[ids.ID]*atomic.Requests, + func(), + error, +) { + var ( + onAcceptFunc func() + inputs set.Set[ids.ID] + funcs = make([]func(), 0, len(txs)) + atomicRequests = make(map[ids.ID]*atomic.Requests) + ) + for _, tx := range txs { txExecutor := executor.CaminoStandardTxExecutor{ StandardTxExecutor: executor.StandardTxExecutor{ Backend: v.txExecutorBackend, - State: onAcceptState, + State: state, Tx: tx, }, } if err := tx.Unsigned.Visit(&txExecutor); err != nil { txID := tx.ID() v.MarkDropped(txID, err) // cache tx as dropped - return err + return nil, nil, nil, err } // ensure it doesn't overlap with current input batch - if blkState.inputs.Overlaps(txExecutor.Inputs) { - return errConflictingBatchTxs + if inputs.Overlaps(txExecutor.Inputs) { + return nil, nil, nil, ErrConflictingBlockTxs } // Add UTXOs to batch - blkState.inputs.Union(txExecutor.Inputs) + inputs.Union(txExecutor.Inputs) - onAcceptState.AddTx(tx, status.Committed) + state.AddTx(tx, status.Committed) if txExecutor.OnAccept != nil { funcs = append(funcs, txExecutor.OnAccept) } for chainID, txRequests := range txExecutor.AtomicRequests { // Add/merge in the atomic requests represented by [tx] - chainRequests, exists := blkState.atomicRequests[chainID] + chainRequests, exists := atomicRequests[chainID] if !exists { - blkState.atomicRequests[chainID] = txRequests + atomicRequests[chainID] = txRequests continue } @@ -455,48 +493,19 @@ func (v *verifier) standardBlock( } } - if err := v.verifyUniqueInputs(b, blkState.inputs); err != nil { - return err + if err := v.verifyUniqueInputs(parentID, inputs); err != nil { + return nil, nil, nil, err } if numFuncs := len(funcs); numFuncs == 1 { - blkState.onAcceptFunc = funcs[0] + onAcceptFunc = funcs[0] } else if numFuncs > 1 { - blkState.onAcceptFunc = func() { + onAcceptFunc = func() { for _, f := range funcs { f() } } } - blkID := b.ID() - v.blkIDToState[blkID] = blkState - - v.Mempool.Remove(b.Transactions) - return nil -} - -// verifyUniqueInputs verifies that the inputs of the given block are not -// duplicated in any of the parent blocks pinned in memory. -func (v *verifier) verifyUniqueInputs(block block.Block, inputs set.Set[ids.ID]) error { - if inputs.Len() == 0 { - return nil - } - - // Check for conflicts in ancestors. - for { - parentID := block.Parent() - parentState, ok := v.blkIDToState[parentID] - if !ok { - // The parent state isn't pinned in memory. - // This means the parent must be accepted already. - return nil - } - - if parentState.inputs.Overlaps(inputs) { - return errConflictingParentTxs - } - - block = parentState.statelessBlock - } + return inputs, atomicRequests, onAcceptFunc, nil } diff --git a/vms/platformvm/block/executor/verifier_test.go b/vms/platformvm/block/executor/verifier_test.go index bc4ecb746005..6136bd8c1a80 100644 --- a/vms/platformvm/block/executor/verifier_test.go +++ b/vms/platformvm/block/executor/verifier_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -321,6 +321,7 @@ func TestVerifierVisitCommitBlock(t *testing.T) { mempool := mempool.NewMockMempool(ctrl) parentID := ids.GenerateTestID() parentStatelessBlk := block.NewMockBlock(ctrl) + parentOnDecisionState := state.NewMockDiff(ctrl) parentOnCommitState := state.NewMockDiff(ctrl) parentOnAbortState := state.NewMockDiff(ctrl) @@ -329,10 +330,10 @@ func TestVerifierVisitCommitBlock(t *testing.T) { parentID: { statelessBlock: parentStatelessBlk, proposalBlockState: proposalBlockState{ - onCommitState: parentOnCommitState, - onAbortState: parentOnAbortState, + onDecisionState: parentOnDecisionState, + onCommitState: parentOnCommitState, + onAbortState: parentOnAbortState, }, - standardBlockState: standardBlockState{}, }, }, Mempool: mempool, @@ -391,6 +392,7 @@ func TestVerifierVisitAbortBlock(t *testing.T) { mempool := mempool.NewMockMempool(ctrl) parentID := ids.GenerateTestID() parentStatelessBlk := block.NewMockBlock(ctrl) + parentOnDecisionState := state.NewMockDiff(ctrl) parentOnCommitState := state.NewMockDiff(ctrl) parentOnAbortState := state.NewMockDiff(ctrl) @@ -399,10 +401,10 @@ func TestVerifierVisitAbortBlock(t *testing.T) { parentID: { statelessBlock: parentStatelessBlk, proposalBlockState: proposalBlockState{ - onCommitState: parentOnCommitState, - onAbortState: parentOnAbortState, + onDecisionState: parentOnDecisionState, + onCommitState: parentOnCommitState, + onAbortState: parentOnAbortState, }, - standardBlockState: standardBlockState{}, }, }, Mempool: mempool, @@ -559,9 +561,11 @@ func TestBanffAbortBlockTimestampChecks(t *testing.T) { // setup parent state parentTime := defaultGenesisTime - s.EXPECT().GetLastAccepted().Return(parentID).Times(2) - s.EXPECT().GetTimestamp().Return(parentTime).Times(2) + s.EXPECT().GetLastAccepted().Return(parentID).Times(3) + s.EXPECT().GetTimestamp().Return(parentTime).Times(3) + onDecisionState, err := state.NewDiff(parentID, backend) + require.NoError(err) onCommitState, err := state.NewDiff(parentID, backend) require.NoError(err) onAbortState, err := state.NewDiff(parentID, backend) @@ -570,8 +574,9 @@ func TestBanffAbortBlockTimestampChecks(t *testing.T) { timestamp: test.parentTime, statelessBlock: parentStatelessBlk, proposalBlockState: proposalBlockState{ - onCommitState: onCommitState, - onAbortState: onAbortState, + onDecisionState: onDecisionState, + onCommitState: onCommitState, + onAbortState: onAbortState, }, } @@ -652,9 +657,11 @@ func TestBanffCommitBlockTimestampChecks(t *testing.T) { // setup parent state parentTime := defaultGenesisTime - s.EXPECT().GetLastAccepted().Return(parentID).Times(2) - s.EXPECT().GetTimestamp().Return(parentTime).Times(2) + s.EXPECT().GetLastAccepted().Return(parentID).Times(3) + s.EXPECT().GetTimestamp().Return(parentTime).Times(3) + onDecisionState, err := state.NewDiff(parentID, backend) + require.NoError(err) onCommitState, err := state.NewDiff(parentID, backend) require.NoError(err) onAbortState, err := state.NewDiff(parentID, backend) @@ -663,8 +670,9 @@ func TestBanffCommitBlockTimestampChecks(t *testing.T) { timestamp: test.parentTime, statelessBlock: parentStatelessBlk, proposalBlockState: proposalBlockState{ - onCommitState: onCommitState, - onAbortState: onAbortState, + onDecisionState: onDecisionState, + onCommitState: onCommitState, + onAbortState: onAbortState, }, } @@ -696,11 +704,9 @@ func TestVerifierVisitStandardBlockWithDuplicateInputs(t *testing.T) { backend := &backend{ blkIDToState: map[ids.ID]*blockState{ grandParentID: { - standardBlockState: standardBlockState{ - inputs: atomicInputs, - }, statelessBlock: grandParentStatelessBlk, onAcceptState: grandParentState, + inputs: atomicInputs, }, parentID: { statelessBlock: parentStatelessBlk, @@ -794,7 +800,6 @@ func TestVerifierVisitApricotStandardBlockWithProposalBlockParent(t *testing.T) onCommitState: parentOnCommitState, onAbortState: parentOnAbortState, }, - standardBlockState: standardBlockState{}, }, }, Mempool: mempool, @@ -852,7 +857,6 @@ func TestVerifierVisitBanffStandardBlockWithProposalBlockParent(t *testing.T) { onCommitState: parentOnCommitState, onAbortState: parentOnAbortState, }, - standardBlockState: standardBlockState{}, }, }, Mempool: mempool, diff --git a/vms/platformvm/block/mock_block.go b/vms/platformvm/block/mock_block.go index 9cc2541de0d2..7bd281192253 100644 --- a/vms/platformvm/block/mock_block.go +++ b/vms/platformvm/block/mock_block.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/block (interfaces: Block) +// +// Generated by this command: +// +// mockgen -package=block -destination=vms/platformvm/block/mock_block.go github.com/ava-labs/avalanchego/vms/platformvm/block Block +// // Package block is a generated GoMock package. package block @@ -88,7 +90,7 @@ func (m *MockBlock) InitCtx(arg0 *snow.Context) { } // InitCtx indicates an expected call of InitCtx. -func (mr *MockBlockMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) InitCtx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockBlock)(nil).InitCtx), arg0) } @@ -130,7 +132,7 @@ func (m *MockBlock) Visit(arg0 Visitor) error { } // Visit indicates an expected call of Visit. -func (mr *MockBlockMockRecorder) Visit(arg0 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) Visit(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Visit", reflect.TypeOf((*MockBlock)(nil).Visit), arg0) } @@ -144,7 +146,7 @@ func (m *MockBlock) initialize(arg0 []byte) error { } // initialize indicates an expected call of initialize. -func (mr *MockBlockMockRecorder) initialize(arg0 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) initialize(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "initialize", reflect.TypeOf((*MockBlock)(nil).initialize), arg0) } diff --git a/vms/platformvm/block/parse.go b/vms/platformvm/block/parse.go index 1a97dca2e4fd..e667907947e4 100644 --- a/vms/platformvm/block/parse.go +++ b/vms/platformvm/block/parse.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/vms/platformvm/block/parse_test.go b/vms/platformvm/block/parse_test.go index 906824effd96..d21ae9da9409 100644 --- a/vms/platformvm/block/parse_test.go +++ b/vms/platformvm/block/parse_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -25,12 +25,12 @@ func TestStandardBlocks(t *testing.T) { blkTimestamp := time.Now() parentID := ids.ID{'p', 'a', 'r', 'e', 'n', 't', 'I', 'D'} height := uint64(2022) - txs, err := testDecisionTxs() + decisionTxs, err := testDecisionTxs() require.NoError(err) for _, cdc := range []codec.Manager{Codec, GenesisCodec} { // build block - apricotStandardBlk, err := NewApricotStandardBlock(parentID, height, txs) + apricotStandardBlk, err := NewApricotStandardBlock(parentID, height, decisionTxs) require.NoError(err) // parse block @@ -44,10 +44,10 @@ func TestStandardBlocks(t *testing.T) { require.Equal(apricotStandardBlk.Height(), parsed.Height()) require.IsType(&ApricotStandardBlock{}, parsed) - require.Equal(txs, parsed.Txs()) + require.Equal(decisionTxs, parsed.Txs()) // check that banff standard block can be built and parsed - banffStandardBlk, err := NewBanffStandardBlock(blkTimestamp, parentID, height, txs) + banffStandardBlk, err := NewBanffStandardBlock(blkTimestamp, parentID, height, decisionTxs) require.NoError(err) // parse block @@ -61,7 +61,7 @@ func TestStandardBlocks(t *testing.T) { require.Equal(banffStandardBlk.Height(), parsed.Height()) require.IsType(&BanffStandardBlock{}, parsed) parsedBanffStandardBlk := parsed.(*BanffStandardBlock) - require.Equal(txs, parsedBanffStandardBlk.Txs()) + require.Equal(decisionTxs, parsedBanffStandardBlk.Txs()) // timestamp check for banff blocks only require.Equal(banffStandardBlk.Timestamp(), parsedBanffStandardBlk.Timestamp()) @@ -77,7 +77,9 @@ func TestProposalBlocks(t *testing.T) { blkTimestamp := time.Now() parentID := ids.ID{'p', 'a', 'r', 'e', 'n', 't', 'I', 'D'} height := uint64(2022) - tx, err := testProposalTx() + proposalTx, err := testProposalTx() + require.NoError(err) + decisionTxs, err := testDecisionTxs() require.NoError(err) for _, cdc := range []codec.Manager{Codec, GenesisCodec} { @@ -85,7 +87,7 @@ func TestProposalBlocks(t *testing.T) { apricotProposalBlk, err := NewApricotProposalBlock( parentID, height, - tx, + proposalTx, ) require.NoError(err) @@ -101,14 +103,15 @@ func TestProposalBlocks(t *testing.T) { require.IsType(&ApricotProposalBlock{}, parsed) parsedApricotProposalBlk := parsed.(*ApricotProposalBlock) - require.Equal([]*txs.Tx{tx}, parsedApricotProposalBlk.Txs()) + require.Equal([]*txs.Tx{proposalTx}, parsedApricotProposalBlk.Txs()) // check that banff proposal block can be built and parsed banffProposalBlk, err := NewBanffProposalBlock( blkTimestamp, parentID, height, - tx, + proposalTx, + []*txs.Tx{}, ) require.NoError(err) @@ -119,17 +122,47 @@ func TestProposalBlocks(t *testing.T) { // compare content require.Equal(banffProposalBlk.ID(), parsed.ID()) require.Equal(banffProposalBlk.Bytes(), parsed.Bytes()) - require.Equal(banffProposalBlk.Parent(), banffProposalBlk.Parent()) + require.Equal(banffProposalBlk.Parent(), parsed.Parent()) require.Equal(banffProposalBlk.Height(), parsed.Height()) require.IsType(&BanffProposalBlock{}, parsed) parsedBanffProposalBlk := parsed.(*BanffProposalBlock) - require.Equal([]*txs.Tx{tx}, parsedBanffProposalBlk.Txs()) + require.Equal([]*txs.Tx{proposalTx}, parsedBanffProposalBlk.Txs()) // timestamp check for banff blocks only require.Equal(banffProposalBlk.Timestamp(), parsedBanffProposalBlk.Timestamp()) // backward compatibility check require.Equal(parsedApricotProposalBlk.Txs(), parsedBanffProposalBlk.Txs()) + + // check that banff proposal block with decisionTxs can be built and parsed + banffProposalBlkWithDecisionTxs, err := NewBanffProposalBlock( + blkTimestamp, + parentID, + height, + proposalTx, + decisionTxs, + ) + require.NoError(err) + + // parse block + parsed, err = Parse(cdc, banffProposalBlkWithDecisionTxs.Bytes()) + require.NoError(err) + + // compare content + require.Equal(banffProposalBlkWithDecisionTxs.ID(), parsed.ID()) + require.Equal(banffProposalBlkWithDecisionTxs.Bytes(), parsed.Bytes()) + require.Equal(banffProposalBlkWithDecisionTxs.Parent(), parsed.Parent()) + require.Equal(banffProposalBlkWithDecisionTxs.Height(), parsed.Height()) + require.IsType(&BanffProposalBlock{}, parsed) + parsedBanffProposalBlkWithDecisionTxs := parsed.(*BanffProposalBlock) + + l := len(decisionTxs) + expectedTxs := make([]*txs.Tx, l+1) + copy(expectedTxs, decisionTxs) + expectedTxs[l] = proposalTx + require.Equal(expectedTxs, parsedBanffProposalBlkWithDecisionTxs.Txs()) + + require.Equal(banffProposalBlkWithDecisionTxs.Timestamp(), parsedBanffProposalBlkWithDecisionTxs.Timestamp()) } } @@ -224,7 +257,7 @@ func TestAtomicBlock(t *testing.T) { require := require.New(t) parentID := ids.ID{'p', 'a', 'r', 'e', 'n', 't', 'I', 'D'} height := uint64(2022) - tx, err := testAtomicTx() + atomicTx, err := testAtomicTx() require.NoError(err) for _, cdc := range []codec.Manager{Codec, GenesisCodec} { @@ -232,7 +265,7 @@ func TestAtomicBlock(t *testing.T) { atomicBlk, err := NewApricotAtomicBlock( parentID, height, - tx, + atomicTx, ) require.NoError(err) @@ -248,7 +281,7 @@ func TestAtomicBlock(t *testing.T) { require.IsType(&ApricotAtomicBlock{}, parsed) parsedAtomicBlk := parsed.(*ApricotAtomicBlock) - require.Equal([]*txs.Tx{tx}, parsedAtomicBlk.Txs()) + require.Equal([]*txs.Tx{atomicTx}, parsedAtomicBlk.Txs()) } } diff --git a/vms/platformvm/block/proposal_block.go b/vms/platformvm/block/proposal_block.go index 05e23b649949..4160db57c4a9 100644 --- a/vms/platformvm/block/proposal_block.go +++ b/vms/platformvm/block/proposal_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -18,16 +18,23 @@ var ( ) type BanffProposalBlock struct { - Time uint64 `serialize:"true" json:"time"` - // Transactions is currently unused. This is populated so that introducing - // them in the future will not require a codec change. - // - // TODO: when Transactions is used, we must correctly verify and apply their - // changes. - Transactions []*txs.Tx `serialize:"true" json:"-"` + Time uint64 `serialize:"true" json:"time"` + Transactions []*txs.Tx `serialize:"true" json:"txs"` ApricotProposalBlock `serialize:"true"` } +func (b *BanffProposalBlock) initialize(bytes []byte) error { + if err := b.ApricotProposalBlock.initialize(bytes); err != nil { + return err + } + for _, tx := range b.Transactions { + if err := tx.Initialize(txs.Codec); err != nil { + return fmt.Errorf("failed to initialize tx: %w", err) + } + } + return nil +} + func (b *BanffProposalBlock) InitCtx(ctx *snow.Context) { for _, tx := range b.Transactions { tx.Unsigned.InitCtx(ctx) @@ -39,6 +46,14 @@ func (b *BanffProposalBlock) Timestamp() time.Time { return time.Unix(int64(b.Time), 0) } +func (b *BanffProposalBlock) Txs() []*txs.Tx { + l := len(b.Transactions) + txs := make([]*txs.Tx, l+1) + copy(txs, b.Transactions) + txs[l] = b.Tx + return txs +} + func (b *BanffProposalBlock) Visit(v Visitor) error { return v.BanffProposalBlock(b) } @@ -47,19 +62,21 @@ func NewBanffProposalBlock( timestamp time.Time, parentID ids.ID, height uint64, - tx *txs.Tx, + proposalTx *txs.Tx, + decisionTxs []*txs.Tx, ) (*BanffProposalBlock, error) { blk := &BanffProposalBlock{ - Time: uint64(timestamp.Unix()), + Transactions: decisionTxs, + Time: uint64(timestamp.Unix()), ApricotProposalBlock: ApricotProposalBlock{ CommonBlock: CommonBlock{ PrntID: parentID, Hght: height, }, - Tx: tx, + Tx: proposalTx, }, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } type ApricotProposalBlock struct { @@ -102,5 +119,5 @@ func NewApricotProposalBlock( }, Tx: tx, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } diff --git a/vms/platformvm/block/proposal_block_test.go b/vms/platformvm/block/proposal_block_test.go index 9c1038c51c98..7fbc44191f41 100644 --- a/vms/platformvm/block/proposal_block_test.go +++ b/vms/platformvm/block/proposal_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -10,53 +10,70 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestNewBanffProposalBlock(t *testing.T) { - require := require.New(t) - timestamp := time.Now().Truncate(time.Second) parentID := ids.GenerateTestID() height := uint64(1337) + proposalTx, err := testProposalTx() + require.NoError(t, err) + decisionTxs, err := testDecisionTxs() + require.NoError(t, err) + + type test struct { + name string + proposalTx *txs.Tx + decisionTxs []*txs.Tx + } - tx := &txs.Tx{ - Unsigned: &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{ - BaseTx: avax.BaseTx{ - Ins: []*avax.TransferableInput{}, - Outs: []*avax.TransferableOutput{}, - }, - }, - StakeOuts: []*avax.TransferableOutput{}, - Validator: txs.Validator{}, - RewardsOwner: &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{}, - }, + tests := []test{ + { + name: "no decision txs", + proposalTx: proposalTx, + decisionTxs: []*txs.Tx{}, + }, + { + name: "decision txs", + proposalTx: proposalTx, + decisionTxs: decisionTxs, }, - Creds: []verify.Verifiable{}, } - require.NoError(tx.Initialize(txs.Codec)) - blk, err := NewBanffProposalBlock( - timestamp, - parentID, - height, - tx, - ) - require.NoError(err) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) - // Make sure the block and tx are initialized - require.NotEmpty(blk.Bytes()) - require.NotEmpty(blk.Tx.Bytes()) - require.NotEqual(ids.Empty, blk.Tx.ID()) - require.Equal(tx.Bytes(), blk.Tx.Bytes()) - require.Equal(timestamp, blk.Timestamp()) - require.Equal(parentID, blk.Parent()) - require.Equal(height, blk.Height()) + blk, err := NewBanffProposalBlock( + timestamp, + parentID, + height, + test.proposalTx, + test.decisionTxs, + ) + require.NoError(err) + + require.NotEmpty(blk.Bytes()) + require.Equal(parentID, blk.Parent()) + require.Equal(height, blk.Height()) + require.Equal(timestamp, blk.Timestamp()) + + l := len(test.decisionTxs) + expectedTxs := make([]*txs.Tx, l+1) + copy(expectedTxs, test.decisionTxs) + expectedTxs[l] = test.proposalTx + + blkTxs := blk.Txs() + require.Equal(expectedTxs, blkTxs) + for i, blkTx := range blkTxs { + expectedTx := expectedTxs[i] + require.NotEmpty(blkTx.Bytes()) + require.NotEqual(ids.Empty, blkTx.ID()) + require.Equal(expectedTx.Bytes(), blkTx.Bytes()) + } + }) + } } func TestNewApricotProposalBlock(t *testing.T) { @@ -64,37 +81,28 @@ func TestNewApricotProposalBlock(t *testing.T) { parentID := ids.GenerateTestID() height := uint64(1337) - - tx := &txs.Tx{ - Unsigned: &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{ - BaseTx: avax.BaseTx{ - Ins: []*avax.TransferableInput{}, - Outs: []*avax.TransferableOutput{}, - }, - }, - StakeOuts: []*avax.TransferableOutput{}, - Validator: txs.Validator{}, - RewardsOwner: &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{}, - }, - }, - Creds: []verify.Verifiable{}, - } - require.NoError(tx.Initialize(txs.Codec)) + proposalTx, err := testProposalTx() + require.NoError(err) blk, err := NewApricotProposalBlock( parentID, height, - tx, + proposalTx, ) require.NoError(err) - // Make sure the block and tx are initialized require.NotEmpty(blk.Bytes()) - require.NotEmpty(blk.Tx.Bytes()) - require.NotEqual(ids.Empty, blk.Tx.ID()) - require.Equal(tx.Bytes(), blk.Tx.Bytes()) require.Equal(parentID, blk.Parent()) require.Equal(height, blk.Height()) + + expectedTxs := []*txs.Tx{proposalTx} + + blkTxs := blk.Txs() + require.Equal(blkTxs, expectedTxs) + for i, blkTx := range blkTxs { + expectedTx := expectedTxs[i] + require.NotEmpty(blkTx.Bytes()) + require.NotEqual(ids.Empty, blkTx.ID()) + require.Equal(expectedTx.Bytes(), blkTx.Bytes()) + } } diff --git a/vms/platformvm/block/serialization_test.go b/vms/platformvm/block/serialization_test.go index 031e527be25f..8e2002c3636c 100644 --- a/vms/platformvm/block/serialization_test.go +++ b/vms/platformvm/block/serialization_test.go @@ -1,14 +1,18 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( + "encoding/json" "fmt" "testing" "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) @@ -105,11 +109,118 @@ func TestBanffBlockSerialization(t *testing.T) { for _, test := range tests { testName := fmt.Sprintf("%T", test.block) + block := test.block t.Run(testName, func(t *testing.T) { require := require.New(t) - require.NoError(initialize(test.block)) - require.Equal(test.bytes, test.block.Bytes()) + got, err := Codec.Marshal(CodecVersion, &block) + require.NoError(err) + require.Equal(test.bytes, got) }) } } + +func TestBanffProposalBlockJSON(t *testing.T) { + require := require.New(t) + + simpleBanffProposalBlock := &BanffProposalBlock{ + Time: 123456, + ApricotProposalBlock: ApricotProposalBlock{ + CommonBlock: CommonBlock{ + PrntID: ids.ID{'p', 'a', 'r', 'e', 'n', 't', 'I', 'D'}, + Hght: 1337, + BlockID: ids.ID{'b', 'l', 'o', 'c', 'k', 'I', 'D'}, + }, + Tx: &txs.Tx{ + Unsigned: &txs.AdvanceTimeTx{ + Time: 123457, + }, + }, + }, + } + + simpleBanffProposalBlockBytes, err := json.MarshalIndent(simpleBanffProposalBlock, "", "\t") + require.NoError(err) + + require.Equal(`{ + "time": 123456, + "txs": null, + "parentID": "rVcYrvnGXdoJBeYQRm5ZNaCGHeVyqcHHJu8Yd89kJcef6V5Eg", + "height": 1337, + "id": "kM6h4d2UKYEDzQXm7KNqyeBJLjhb42J24m4L4WACB5didf3pk", + "tx": { + "unsignedTx": { + "time": 123457 + }, + "credentials": null, + "id": "11111111111111111111111111111111LpoYY" + } +}`, string(simpleBanffProposalBlockBytes)) + + complexBanffProposalBlock := simpleBanffProposalBlock + complexBanffProposalBlock.Transactions = []*txs.Tx{ + { + Unsigned: &txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{}, + Memo: []byte("KilroyWasHere"), + }, + }, + }, + { + Unsigned: &txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{}, + Memo: []byte("KilroyWasHere2"), + }, + }, + }, + } + + complexBanffProposalBlockBytes, err := json.MarshalIndent(complexBanffProposalBlock, "", "\t") + require.NoError(err) + + require.Equal(`{ + "time": 123456, + "txs": [ + { + "unsignedTx": { + "networkID": 1, + "blockchainID": "11111111111111111111111111111111LpoYY", + "outputs": [], + "inputs": [], + "memo": "0x4b696c726f7957617348657265" + }, + "credentials": null, + "id": "11111111111111111111111111111111LpoYY" + }, + { + "unsignedTx": { + "networkID": 1, + "blockchainID": "11111111111111111111111111111111LpoYY", + "outputs": [], + "inputs": [], + "memo": "0x4b696c726f795761734865726532" + }, + "credentials": null, + "id": "11111111111111111111111111111111LpoYY" + } + ], + "parentID": "rVcYrvnGXdoJBeYQRm5ZNaCGHeVyqcHHJu8Yd89kJcef6V5Eg", + "height": 1337, + "id": "kM6h4d2UKYEDzQXm7KNqyeBJLjhb42J24m4L4WACB5didf3pk", + "tx": { + "unsignedTx": { + "time": 123457 + }, + "credentials": null, + "id": "11111111111111111111111111111111LpoYY" + } +}`, string(complexBanffProposalBlockBytes)) +} diff --git a/vms/platformvm/block/standard_block.go b/vms/platformvm/block/standard_block.go index a088a9eab696..c7d35b12f70d 100644 --- a/vms/platformvm/block/standard_block.go +++ b/vms/platformvm/block/standard_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -46,7 +46,7 @@ func NewBanffStandardBlock( Transactions: txs, }, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } type ApricotStandardBlock struct { @@ -58,7 +58,7 @@ func (b *ApricotStandardBlock) initialize(bytes []byte) error { b.CommonBlock.initialize(bytes) for _, tx := range b.Transactions { if err := tx.Initialize(txs.Codec); err != nil { - return fmt.Errorf("failed to sign block: %w", err) + return fmt.Errorf("failed to initialize tx: %w", err) } } return nil @@ -93,5 +93,5 @@ func NewApricotStandardBlock( }, Transactions: txs, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } diff --git a/vms/platformvm/block/standard_block_test.go b/vms/platformvm/block/standard_block_test.go index d417a37fb96a..4162aadb05e1 100644 --- a/vms/platformvm/block/standard_block_test.go +++ b/vms/platformvm/block/standard_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/vms/platformvm/block/visitor.go b/vms/platformvm/block/visitor.go index f05dc5d05129..6c27b5386c1a 100644 --- a/vms/platformvm/block/visitor.go +++ b/vms/platformvm/block/visitor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/vms/platformvm/camino_helpers_test.go b/vms/platformvm/camino_helpers_test.go index b4af4304317c..17f48759b2d9 100644 --- a/vms/platformvm/camino_helpers_test.go +++ b/vms/platformvm/camino_helpers_test.go @@ -99,7 +99,9 @@ func newCaminoVM(t *testing.T, genesisConfig api.Camino, phase test.Phase, genes ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(testSubnet1)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), testSubnet1)) + vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) require.NoError(blk.Verify(context.Background())) diff --git a/vms/platformvm/camino_service.go b/vms/platformvm/camino_service.go index a7ce23a05224..92f127756ccb 100644 --- a/vms/platformvm/camino_service.go +++ b/vms/platformvm/camino_service.go @@ -244,7 +244,7 @@ func (s *CaminoService) GetConfiguration(_ *http.Request, _ *struct{}, reply *Ge reply.SupplyCap = json.Uint64(s.vm.RewardConfig.SupplyCap) // Codec information - reply.CodecVersion = json.Uint16(txs.Version) + reply.CodecVersion = json.Uint16(txs.CodecVersion) caminoConfig, err := s.vm.state.CaminoConfig() if err != nil { @@ -267,7 +267,7 @@ type SetAddressStateArgs struct { } // AddAdressState issues an AddAdressStateTx -func (s *CaminoService) SetAddressState(_ *http.Request, args *SetAddressStateArgs, response *api.JSONTxID) error { +func (s *CaminoService) SetAddressState(req *http.Request, args *SetAddressStateArgs, response *api.JSONTxID) error { s.vm.ctx.Log.Debug("Platform: SetAddressState called") s.vm.ctx.Lock.Lock() @@ -303,10 +303,7 @@ func (s *CaminoService) SetAddressState(_ *http.Request, args *SetAddressStateAr response.TxID = tx.ID() - if err = s.vm.Builder.AddUnverifiedTx(tx); err != nil { - return err - } - return nil + return s.vm.Network.IssueTx(req.Context(), tx) } // GetAddressStates retrieves the state applied to an address (see setAddressState) @@ -430,7 +427,7 @@ func (s *CaminoService) Spend(_ *http.Request, args *SpendArgs, response *SpendR return fmt.Errorf("%w: %w", errCreateTransferables, err) } - bytes, err := txs.Codec.Marshal(txs.Version, ins) + bytes, err := txs.Codec.Marshal(txs.CodecVersion, ins) if err != nil { return fmt.Errorf("%w: %w", errSerializeTransferables, err) } @@ -439,7 +436,7 @@ func (s *CaminoService) Spend(_ *http.Request, args *SpendArgs, response *SpendR return fmt.Errorf("%w: %w", errEncodeTransferables, err) } - bytes, err = txs.Codec.Marshal(txs.Version, outs) + bytes, err = txs.Codec.Marshal(txs.CodecVersion, outs) if err != nil { return fmt.Errorf("%w: %w", errSerializeTransferables, err) } @@ -456,7 +453,7 @@ func (s *CaminoService) Spend(_ *http.Request, args *SpendArgs, response *SpendR } } - bytes, err = txs.Codec.Marshal(txs.Version, owners) + bytes, err = txs.Codec.Marshal(txs.CodecVersion, owners) if err != nil { return fmt.Errorf("%w: %w", errSerializeOwners, err) } @@ -477,7 +474,7 @@ type RegisterNodeArgs struct { } // RegisterNode issues an RegisterNodeTx -func (s *CaminoService) RegisterNode(_ *http.Request, args *RegisterNodeArgs, reply *api.JSONTxID) error { +func (s *CaminoService) RegisterNode(req *http.Request, args *RegisterNodeArgs, reply *api.JSONTxID) error { s.vm.ctx.Log.Debug("Platform: RegisterNode called") s.vm.ctx.Lock.Lock() @@ -513,10 +510,7 @@ func (s *CaminoService) RegisterNode(_ *http.Request, args *RegisterNodeArgs, re reply.TxID = tx.ID() - if err = s.vm.Builder.AddUnverifiedTx(tx); err != nil { - return err - } - return nil + return s.vm.Network.IssueTx(req.Context(), tx) } type ClaimedAmount struct { @@ -536,7 +530,7 @@ type ClaimArgs struct { } // Claim issues an ClaimTx -func (s *CaminoService) Claim(_ *http.Request, args *ClaimArgs, reply *api.JSONTxID) error { +func (s *CaminoService) Claim(req *http.Request, args *ClaimArgs, reply *api.JSONTxID) error { s.vm.ctx.Log.Debug("Platform: Claim called") s.vm.ctx.Lock.Lock() @@ -592,11 +586,7 @@ func (s *CaminoService) Claim(_ *http.Request, args *ClaimArgs, reply *api.JSONT reply.TxID = tx.ID() - if err := s.vm.Builder.AddUnverifiedTx(tx); err != nil { - return fmt.Errorf("couldn't create tx: %w", err) - } - - return nil + return s.vm.Network.IssueTx(req.Context(), tx) } type TransferArgs struct { @@ -608,7 +598,7 @@ type TransferArgs struct { } // Transfer issues an BaseTx -func (s *CaminoService) Transfer(_ *http.Request, args *TransferArgs, reply *api.JSONTxID) error { +func (s *CaminoService) Transfer(req *http.Request, args *TransferArgs, reply *api.JSONTxID) error { s.vm.ctx.Log.Debug("Platform: Transfer called") s.vm.ctx.Lock.Lock() @@ -642,11 +632,7 @@ func (s *CaminoService) Transfer(_ *http.Request, args *TransferArgs, reply *api reply.TxID = tx.ID() - if err := s.vm.Builder.AddUnverifiedTx(tx); err != nil { - return fmt.Errorf("couldn't create tx: %w", err) - } - - return nil + return s.vm.Network.IssueTx(req.Context(), tx) } func (s *CaminoService) GetRegisteredShortIDLink(_ *http.Request, args *api.JSONAddress, response *api.JSONAddress) error { @@ -1077,8 +1063,21 @@ type GetValidatorsAtReply2 struct { Validators map[ids.NodeID]ConsortiumMemberValidator `json:"validators"` } +type GetValidatorsAtResponseWrapper struct { + LockModeBondDeposit bool + avax GetValidatorsAtReply + camino GetValidatorsAtReply2 +} + +func (response GetValidatorsAtResponseWrapper) MarshalJSON() ([]byte, error) { + if !response.LockModeBondDeposit { + return response.avax.MarshalJSON() + } + return stdjson.Marshal(response.camino) +} + // Overrides avax service GetValidatorsAt -func (s *CaminoService) GetValidatorsAt(r *http.Request, args *GetValidatorsAtArgs, reply *GetValidatorsAtReply2) error { +func (s *CaminoService) GetValidatorsAt(r *http.Request, args *GetValidatorsAtArgs, reply *GetValidatorsAtResponseWrapper) error { height := uint64(args.Height) s.vm.ctx.Log.Debug("API called", zap.String("service", "platform"), @@ -1088,13 +1087,23 @@ func (s *CaminoService) GetValidatorsAt(r *http.Request, args *GetValidatorsAtAr ) s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() + caminoConfig, err := s.vm.state.CaminoConfig() + s.vm.ctx.Lock.Unlock() + if err != nil { + return err + } + reply.LockModeBondDeposit = caminoConfig.LockModeBondDeposit + if !caminoConfig.LockModeBondDeposit { + return s.Service.GetValidatorsAt(r, args, &reply.avax) + } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() vdrs, err := s.vm.GetValidatorSet(r.Context(), height, args.SubnetID) if err != nil { return fmt.Errorf("failed to get validator set: %w", err) } - reply.Validators = make(map[ids.NodeID]ConsortiumMemberValidator, len(vdrs)) + reply.camino.Validators = make(map[ids.NodeID]ConsortiumMemberValidator, len(vdrs)) for _, vdr := range vdrs { cMemberAddr, err := s.vm.state.GetShortIDLink(ids.ShortID(vdr.NodeID), state.ShortLinkKeyRegisterNode) if err != nil { @@ -1106,7 +1115,7 @@ func (s *CaminoService) GetValidatorsAt(r *http.Request, args *GetValidatorsAtAr return fmt.Errorf("failed to format consortium member address: %w", err) } - reply.Validators[vdr.NodeID] = ConsortiumMemberValidator{ + reply.camino.Validators[vdr.NodeID] = ConsortiumMemberValidator{ ValidatorWeight: json.Uint64(vdr.Weight), ConsortiumMemberAddress: addrStr, } diff --git a/vms/platformvm/camino_vm_test.go b/vms/platformvm/camino_vm_test.go index 756120268063..5512e77e5b6a 100644 --- a/vms/platformvm/camino_vm_test.go +++ b/vms/platformvm/camino_vm_test.go @@ -179,11 +179,11 @@ func TestRemoveDeferredValidator(t *testing.T) { options, err := blk.(snowman.OracleBlock).Options(context.Background()) require.NoError(err) - commit := options[1].(*blockexecutor.Block) + commit := options[0].(*blockexecutor.Block) _, ok := commit.Block.(*block.BanffCommitBlock) require.True(ok) - abort := options[0].(*blockexecutor.Block) + abort := options[1].(*blockexecutor.Block) _, ok = abort.Block.(*block.BanffAbortBlock) require.True(ok) @@ -378,11 +378,11 @@ func TestRemoveReactivatedValidator(t *testing.T) { options, err := blk.(snowman.OracleBlock).Options(context.Background()) require.NoError(err) - commit := options[1].(*blockexecutor.Block) + commit := options[0].(*blockexecutor.Block) _, ok := commit.Block.(*block.BanffCommitBlock) require.True(ok) - abort := options[0].(*blockexecutor.Block) + abort := options[1].(*blockexecutor.Block) _, ok = abort.Block.(*block.BanffAbortBlock) require.True(ok) @@ -624,8 +624,8 @@ func TestProposals(t *testing.T) { // Try to vote on proposal, expect to fail vm.clock.Set(baseFeeProposalState.StartTime().Add(-time.Second)) addVoteTx := buildSimpleVoteTx(t, vm, proposerKey, fee, proposalTx.ID(), test.FundedKeys[0], 0) - err = vm.Builder.AddUnverifiedTx(addVoteTx) - require.ErrorIs(err, txexecutor.ErrProposalInactive) + err = issueTx(t, vm, addVoteTx) + require.ErrorIs(err, dac.ErrNotYetActive) vm.clock.Set(baseFeeProposalState.StartTime()) optionWeights := make([]uint32, len(baseFeeProposalState.Options)) @@ -1168,7 +1168,7 @@ func TestExcludeMemberProposals(t *testing.T) { if tt.moreExclude { excludeMemberProposalTx := buildExcludeMemberProposalTx(t, vm, fundsKey, proposalBondAmount, fee, consortiumSecretaryKey, memberToExcludeAddr, proposalStartTime, proposalStartTime.Add(time.Duration(dac.ExcludeMemberProposalMinDuration)*time.Second), true) - err = vm.Builder.AddUnverifiedTx(excludeMemberProposalTx) + err = issueTx(t, vm, excludeMemberProposalTx) require.ErrorIs(err, txexecutor.ErrInvalidProposal) height, err = vm.GetCurrentHeight(context.Background()) require.NoError(err) @@ -1279,7 +1279,7 @@ func TestExcludeMemberProposals(t *testing.T) { func buildAndAcceptBlock(t *testing.T, vm *VM, tx *txs.Tx) block.Block { t.Helper() if tx != nil { - require.NoError(t, vm.Builder.AddUnverifiedTx(tx)) + require.NoError(t, issueTx(t, vm, tx)) } blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(t, err) @@ -1381,7 +1381,7 @@ func buildBaseFeeProposalTx( End: uint64(endTime.Unix()), Options: options, }} - proposalBytes, err := txs.Codec.Marshal(txs.Version, proposal) + proposalBytes, err := txs.Codec.Marshal(txs.CodecVersion, proposal) require.NoError(t, err) proposalTx, err := txs.NewSigned(&txs.AddProposalTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -1428,7 +1428,7 @@ func buildAddMemberProposalTx( proposal = &dac.AdminProposal{Proposal: proposal} } wrapper := &txs.ProposalWrapper{Proposal: proposal} - proposalBytes, err := txs.Codec.Marshal(txs.Version, wrapper) + proposalBytes, err := txs.Codec.Marshal(txs.CodecVersion, wrapper) require.NoError(t, err) proposalTx, err := txs.NewSigned(&txs.AddProposalTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -1476,7 +1476,7 @@ func buildExcludeMemberProposalTx( proposal = &dac.AdminProposal{Proposal: proposal} } wrapper := &txs.ProposalWrapper{Proposal: proposal} - proposalBytes, err := txs.Codec.Marshal(txs.Version, wrapper) + proposalBytes, err := txs.Codec.Marshal(txs.CodecVersion, wrapper) require.NoError(t, err) proposalTx, err := txs.NewSigned(&txs.AddProposalTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -1535,7 +1535,7 @@ func buildSimpleVoteTx( nil, nil, 0, ) require.NoError(t, err) - voteBytes, err := txs.Codec.Marshal(txs.Version, &txs.VoteWrapper{Vote: &dac.SimpleVote{OptionIndex: votedOption}}) + voteBytes, err := txs.Codec.Marshal(txs.CodecVersion, &txs.VoteWrapper{Vote: &dac.SimpleVote{OptionIndex: votedOption}}) require.NoError(t, err) addVoteTx, err := txs.NewSigned(&txs.AddVoteTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -1610,3 +1610,10 @@ func buildAndAcceptBaseTx( require.Len(t, blk.Txs(), 1) checkTx(t, vm, blk.ID(), feeTestingTx.ID()) } + +func issueTx(t *testing.T, vm *VM, tx *txs.Tx) error { + t.Helper() + vm.ctx.Lock.Unlock() + defer vm.ctx.Lock.Lock() + return vm.issueTx(context.Background(), tx) +} diff --git a/vms/platformvm/client.go b/vms/platformvm/client.go index 35b72a9be9ac..4a659d6a766b 100644 --- a/vms/platformvm/client.go +++ b/vms/platformvm/client.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm diff --git a/vms/platformvm/client_permissionless_validator.go b/vms/platformvm/client_permissionless_validator.go index c9baac856073..3974f770658d 100644 --- a/vms/platformvm/client_permissionless_validator.go +++ b/vms/platformvm/client_permissionless_validator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm diff --git a/vms/platformvm/config/config.go b/vms/platformvm/config/config.go index 302586268c5c..250245db73ec 100644 --- a/vms/platformvm/config/config.go +++ b/vms/platformvm/config/config.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config @@ -115,8 +115,8 @@ type Config struct { // Time of the Cortina network upgrade CortinaTime time.Time - // Time of the D network upgrade - DTime time.Time + // Time of the Durango network upgrade + DurangoTime time.Time // Time of the Athens Phase network upgrade AthensPhaseTime time.Time @@ -155,9 +155,8 @@ func (c *Config) IsCortinaActivated(timestamp time.Time) bool { return !timestamp.Before(c.CortinaTime) } -// TODO: Rename -func (c *Config) IsDActivated(timestamp time.Time) bool { - return !timestamp.Before(c.DTime) +func (c *Config) IsDurangoActivated(timestamp time.Time) bool { + return !timestamp.Before(c.DurangoTime) } func (c *Config) IsAthensPhaseActivated(timestamp time.Time) bool { diff --git a/vms/platformvm/config/execution_config.go b/vms/platformvm/config/execution_config.go index bfdb191f1281..e182758e0c50 100644 --- a/vms/platformvm/config/execution_config.go +++ b/vms/platformvm/config/execution_config.go @@ -1,15 +1,18 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config import ( "encoding/json" + "time" "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm/network" ) var DefaultExecutionConfig = ExecutionConfig{ + Network: network.DefaultConfig, BlockCacheSize: 64 * units.MiB, TxCacheSize: 128 * units.MiB, TransformedSubnetTxCacheSize: 4 * units.MiB, @@ -19,19 +22,22 @@ var DefaultExecutionConfig = ExecutionConfig{ BlockIDCacheSize: 8192, FxOwnerCacheSize: 4 * units.MiB, ChecksumsEnabled: false, + MempoolPruneFrequency: 30 * time.Minute, } // ExecutionConfig provides execution parameters of PlatformVM type ExecutionConfig struct { - BlockCacheSize int `json:"block-cache-size"` - TxCacheSize int `json:"tx-cache-size"` - TransformedSubnetTxCacheSize int `json:"transformed-subnet-tx-cache-size"` - RewardUTXOsCacheSize int `json:"reward-utxos-cache-size"` - ChainCacheSize int `json:"chain-cache-size"` - ChainDBCacheSize int `json:"chain-db-cache-size"` - BlockIDCacheSize int `json:"block-id-cache-size"` - FxOwnerCacheSize int `json:"fx-owner-cache-size"` - ChecksumsEnabled bool `json:"checksums-enabled"` + Network network.Config `json:"network"` + BlockCacheSize int `json:"block-cache-size"` + TxCacheSize int `json:"tx-cache-size"` + TransformedSubnetTxCacheSize int `json:"transformed-subnet-tx-cache-size"` + RewardUTXOsCacheSize int `json:"reward-utxos-cache-size"` + ChainCacheSize int `json:"chain-cache-size"` + ChainDBCacheSize int `json:"chain-db-cache-size"` + BlockIDCacheSize int `json:"block-id-cache-size"` + FxOwnerCacheSize int `json:"fx-owner-cache-size"` + ChecksumsEnabled bool `json:"checksums-enabled"` + MempoolPruneFrequency time.Duration `json:"mempool-prune-frequency"` } // GetExecutionConfig returns an ExecutionConfig diff --git a/vms/platformvm/config/execution_config_test.go b/vms/platformvm/config/execution_config_test.go index 0adbd862bd2d..89fd5cd55b05 100644 --- a/vms/platformvm/config/execution_config_test.go +++ b/vms/platformvm/config/execution_config_test.go @@ -1,12 +1,15 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config import ( "testing" + "time" "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/vms/platformvm/network" ) func TestExecutionConfigUnmarshal(t *testing.T) { @@ -39,6 +42,68 @@ func TestExecutionConfigUnmarshal(t *testing.T) { t.Run("all values extracted from json", func(t *testing.T) { require := require.New(t) b := []byte(`{ + "network": { + "max-validator-set-staleness": 1, + "target-gossip-size": 2, + "pull-gossip-poll-size": 3, + "pull-gossip-frequency": 4, + "pull-gossip-throttling-period": 5, + "pull-gossip-throttling-limit": 6, + "expected-bloom-filter-elements":7, + "expected-bloom-filter-false-positive-probability": 8, + "max-bloom-filter-false-positive-probability": 9, + "legacy-push-gossip-cache-size": 10 + }, + "block-cache-size": 1, + "tx-cache-size": 2, + "transformed-subnet-tx-cache-size": 3, + "reward-utxos-cache-size": 5, + "chain-cache-size": 6, + "chain-db-cache-size": 7, + "block-id-cache-size": 8, + "fx-owner-cache-size": 9, + "checksums-enabled": true, + "mempool-prune-frequency": 60000000000 + }`) + ec, err := GetExecutionConfig(b) + require.NoError(err) + expected := &ExecutionConfig{ + Network: network.Config{ + MaxValidatorSetStaleness: 1, + TargetGossipSize: 2, + PullGossipPollSize: 3, + PullGossipFrequency: 4, + PullGossipThrottlingPeriod: 5, + PullGossipThrottlingLimit: 6, + ExpectedBloomFilterElements: 7, + ExpectedBloomFilterFalsePositiveProbability: 8, + MaxBloomFilterFalsePositiveProbability: 9, + LegacyPushGossipCacheSize: 10, + }, + BlockCacheSize: 1, + TxCacheSize: 2, + TransformedSubnetTxCacheSize: 3, + RewardUTXOsCacheSize: 5, + ChainCacheSize: 6, + ChainDBCacheSize: 7, + BlockIDCacheSize: 8, + FxOwnerCacheSize: 9, + ChecksumsEnabled: true, + MempoolPruneFrequency: time.Minute, + } + require.Equal(expected, ec) + }) + + t.Run("default values applied correctly", func(t *testing.T) { + require := require.New(t) + b := []byte(`{ + "network": { + "max-validator-set-staleness": 1, + "target-gossip-size": 2, + "pull-gossip-poll-size": 3, + "pull-gossip-frequency": 4, + "pull-gossip-throttling-period": 5 + }, "block-cache-size": 1, "tx-cache-size": 2, "transformed-subnet-tx-cache-size": 3, @@ -52,6 +117,18 @@ func TestExecutionConfigUnmarshal(t *testing.T) { ec, err := GetExecutionConfig(b) require.NoError(err) expected := &ExecutionConfig{ + Network: network.Config{ + MaxValidatorSetStaleness: 1, + TargetGossipSize: 2, + PullGossipPollSize: 3, + PullGossipFrequency: 4, + PullGossipThrottlingPeriod: 5, + PullGossipThrottlingLimit: DefaultExecutionConfig.Network.PullGossipThrottlingLimit, + ExpectedBloomFilterElements: DefaultExecutionConfig.Network.ExpectedBloomFilterElements, + ExpectedBloomFilterFalsePositiveProbability: DefaultExecutionConfig.Network.ExpectedBloomFilterFalsePositiveProbability, + MaxBloomFilterFalsePositiveProbability: DefaultExecutionConfig.Network.MaxBloomFilterFalsePositiveProbability, + LegacyPushGossipCacheSize: DefaultExecutionConfig.Network.LegacyPushGossipCacheSize, + }, BlockCacheSize: 1, TxCacheSize: 2, TransformedSubnetTxCacheSize: 3, @@ -61,6 +138,7 @@ func TestExecutionConfigUnmarshal(t *testing.T) { BlockIDCacheSize: 8, FxOwnerCacheSize: 9, ChecksumsEnabled: true, + MempoolPruneFrequency: 30 * time.Minute, } require.Equal(expected, ec) }) diff --git a/vms/platformvm/dac/camino_add_member_proposal.go b/vms/platformvm/dac/camino_add_member_proposal.go index 3ce3949941a1..3f89ba1f1230 100644 --- a/vms/platformvm/dac/camino_add_member_proposal.go +++ b/vms/platformvm/dac/camino_add_member_proposal.go @@ -101,9 +101,15 @@ func (p *AddMemberProposalState) EndTime() time.Time { return time.Unix(int64(p.End), 0) } -func (p *AddMemberProposalState) IsActiveAt(time time.Time) bool { +func (p *AddMemberProposalState) VerifyActive(time time.Time) error { timestamp := uint64(time.Unix()) - return p.Start <= timestamp && timestamp <= p.End + switch { + case timestamp < p.Start: + return ErrNotYetActive + case timestamp > p.End: + return ErrNotActive // should never happen, cause finished proposals removed from state + } + return nil } func (p *AddMemberProposalState) CanBeFinished() bool { diff --git a/vms/platformvm/dac/camino_base_fee_proposal.go b/vms/platformvm/dac/camino_base_fee_proposal.go index 7d51093b3fb0..bf8c8e10673a 100644 --- a/vms/platformvm/dac/camino_base_fee_proposal.go +++ b/vms/platformvm/dac/camino_base_fee_proposal.go @@ -121,9 +121,15 @@ func (p *BaseFeeProposalState) EndTime() time.Time { return time.Unix(int64(p.End), 0) } -func (p *BaseFeeProposalState) IsActiveAt(time time.Time) bool { +func (p *BaseFeeProposalState) VerifyActive(time time.Time) error { timestamp := uint64(time.Unix()) - return p.Start <= timestamp && timestamp <= p.End + switch { + case timestamp < p.Start: + return ErrNotYetActive + case timestamp > p.End: + return ErrNotActive // should never happen, cause finished proposals removed from state + } + return nil } func (p *BaseFeeProposalState) CanBeFinished() bool { diff --git a/vms/platformvm/dac/camino_codec.go b/vms/platformvm/dac/camino_codec.go index be4325d52af8..42b2a0de8dd2 100644 --- a/vms/platformvm/dac/camino_codec.go +++ b/vms/platformvm/dac/camino_codec.go @@ -5,6 +5,7 @@ package dac import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -17,9 +18,9 @@ const Version = 0 var Codec codec.Manager func init() { - c := linearcodec.NewCaminoDefault() + c := linearcodec.NewCaminoDefault(time.Time{}) Codec = codec.NewDefaultManager() - gc := linearcodec.NewCaminoCustomMaxLength(math.MaxInt32) + gc := linearcodec.NewCaminoCustomMaxLength(time.Time{}, math.MaxInt32) errs := wrappers.Errs{} for _, c := range []linearcodec.CaminoCodec{c, gc} { diff --git a/vms/platformvm/dac/camino_exclude_member_proposal.go b/vms/platformvm/dac/camino_exclude_member_proposal.go index e471e4a0142d..dcd6a0ef6199 100644 --- a/vms/platformvm/dac/camino_exclude_member_proposal.go +++ b/vms/platformvm/dac/camino_exclude_member_proposal.go @@ -110,9 +110,15 @@ func (p *ExcludeMemberProposalState) EndTime() time.Time { return time.Unix(int64(p.End), 0) } -func (p *ExcludeMemberProposalState) IsActiveAt(time time.Time) bool { +func (p *ExcludeMemberProposalState) VerifyActive(time time.Time) error { timestamp := uint64(time.Unix()) - return p.Start <= timestamp && timestamp <= p.End + switch { + case timestamp < p.Start: + return ErrNotYetActive + case timestamp > p.End: + return ErrNotActive // should never happen, cause finished proposals removed from state + } + return nil } func (p *ExcludeMemberProposalState) CanBeFinished() bool { diff --git a/vms/platformvm/dac/camino_fee_distribution_proposal.go b/vms/platformvm/dac/camino_fee_distribution_proposal.go index d50d4c19c12f..2a3cd5f92824 100644 --- a/vms/platformvm/dac/camino_fee_distribution_proposal.go +++ b/vms/platformvm/dac/camino_fee_distribution_proposal.go @@ -134,9 +134,15 @@ func (p *FeeDistributionProposalState) EndTime() time.Time { return time.Unix(int64(p.End), 0) } -func (p *FeeDistributionProposalState) IsActiveAt(time time.Time) bool { +func (p *FeeDistributionProposalState) VerifyActive(time time.Time) error { timestamp := uint64(time.Unix()) - return p.Start <= timestamp && timestamp <= p.End + switch { + case timestamp < p.Start: + return ErrNotYetActive + case timestamp > p.End: + return ErrNotActive // should never happen, cause finished proposals removed from state + } + return nil } func (p *FeeDistributionProposalState) CanBeFinished() bool { diff --git a/vms/platformvm/dac/camino_general_proposal.go b/vms/platformvm/dac/camino_general_proposal.go index 8a6c4c2bdb67..7454d94e2a62 100644 --- a/vms/platformvm/dac/camino_general_proposal.go +++ b/vms/platformvm/dac/camino_general_proposal.go @@ -165,9 +165,15 @@ func (p *GeneralProposalState) EndTime() time.Time { return time.Unix(int64(p.End), 0) } -func (p *GeneralProposalState) IsActiveAt(time time.Time) bool { +func (p *GeneralProposalState) VerifyActive(time time.Time) error { timestamp := uint64(time.Unix()) - return p.Start <= timestamp && timestamp <= p.End + switch { + case timestamp < p.Start: + return ErrNotYetActive + case timestamp > p.End: + return ErrNotActive // should never happen, cause finished proposals removed from state + } + return nil } func (p *GeneralProposalState) CanBeFinished() bool { diff --git a/vms/platformvm/dac/camino_mock_bond_tx_ids_getter.go b/vms/platformvm/dac/camino_mock_bond_tx_ids_getter.go index afd37bcc9412..462b5d792010 100644 --- a/vms/platformvm/dac/camino_mock_bond_tx_ids_getter.go +++ b/vms/platformvm/dac/camino_mock_bond_tx_ids_getter.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/dac (interfaces: BondTxIDsGetter) +// +// Generated by this command: +// +// mockgen -package=dac -destination=vms/platformvm/dac/camino_mock_bond_tx_ids_getter.go github.com/ava-labs/avalanchego/vms/platformvm/dac BondTxIDsGetter +// // Package dac is a generated GoMock package. package dac @@ -47,7 +49,7 @@ func (m *MockBondTxIDsGetter) AddMemberProposal(arg0 *AddMemberProposalState) ([ } // AddMemberProposal indicates an expected call of AddMemberProposal. -func (mr *MockBondTxIDsGetterMockRecorder) AddMemberProposal(arg0 interface{}) *gomock.Call { +func (mr *MockBondTxIDsGetterMockRecorder) AddMemberProposal(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddMemberProposal", reflect.TypeOf((*MockBondTxIDsGetter)(nil).AddMemberProposal), arg0) } @@ -62,7 +64,7 @@ func (m *MockBondTxIDsGetter) BaseFeeProposal(arg0 *BaseFeeProposalState) ([]ids } // BaseFeeProposal indicates an expected call of BaseFeeProposal. -func (mr *MockBondTxIDsGetterMockRecorder) BaseFeeProposal(arg0 interface{}) *gomock.Call { +func (mr *MockBondTxIDsGetterMockRecorder) BaseFeeProposal(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BaseFeeProposal", reflect.TypeOf((*MockBondTxIDsGetter)(nil).BaseFeeProposal), arg0) } @@ -77,7 +79,7 @@ func (m *MockBondTxIDsGetter) ExcludeMemberProposal(arg0 *ExcludeMemberProposalS } // ExcludeMemberProposal indicates an expected call of ExcludeMemberProposal. -func (mr *MockBondTxIDsGetterMockRecorder) ExcludeMemberProposal(arg0 interface{}) *gomock.Call { +func (mr *MockBondTxIDsGetterMockRecorder) ExcludeMemberProposal(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExcludeMemberProposal", reflect.TypeOf((*MockBondTxIDsGetter)(nil).ExcludeMemberProposal), arg0) } @@ -92,7 +94,7 @@ func (m *MockBondTxIDsGetter) FeeDistributionProposal(arg0 *FeeDistributionPropo } // FeeDistributionProposal indicates an expected call of FeeDistributionProposal. -func (mr *MockBondTxIDsGetterMockRecorder) FeeDistributionProposal(arg0 interface{}) *gomock.Call { +func (mr *MockBondTxIDsGetterMockRecorder) FeeDistributionProposal(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FeeDistributionProposal", reflect.TypeOf((*MockBondTxIDsGetter)(nil).FeeDistributionProposal), arg0) } @@ -107,7 +109,7 @@ func (m *MockBondTxIDsGetter) GeneralProposal(arg0 *GeneralProposalState) ([]ids } // GeneralProposal indicates an expected call of GeneralProposal. -func (mr *MockBondTxIDsGetterMockRecorder) GeneralProposal(arg0 interface{}) *gomock.Call { +func (mr *MockBondTxIDsGetterMockRecorder) GeneralProposal(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GeneralProposal", reflect.TypeOf((*MockBondTxIDsGetter)(nil).GeneralProposal), arg0) } diff --git a/vms/platformvm/dac/camino_proposal.go b/vms/platformvm/dac/camino_proposal.go index d21639573008..c19c5aeff78e 100644 --- a/vms/platformvm/dac/camino_proposal.go +++ b/vms/platformvm/dac/camino_proposal.go @@ -25,6 +25,8 @@ var ( errWrongOptionsCount = errors.New("wrong options count") errEndNotAfterStart = errors.New("proposal end-time is not after start-time") errWrongDuration = errors.New("wrong proposal duration") + ErrNotActive = errors.New("proposal is not active anymore") + ErrNotYetActive = errors.New("proposal is not yet active") ErrWrongVote = errors.New("this proposal can't be voted with this vote") ErrNotAllowedToVoteOnProposal = errors.New("this address has already voted or not allowed to vote on this proposal") @@ -78,7 +80,7 @@ type Proposal interface { type ProposalState interface { EndTime() time.Time - IsActiveAt(time time.Time) bool + VerifyActive(time time.Time) error // Once a proposal has become Finishable, it cannot be undone by adding more votes. Should only return true, when future votes cannot change the outcome of proposal. CanBeFinished() bool IsSuccessful() bool // should be called only for finished proposals diff --git a/vms/platformvm/docs/validators_versioning.md b/vms/platformvm/docs/validators_versioning.md index c4fce00399c5..7db716d12677 100644 --- a/vms/platformvm/docs/validators_versioning.md +++ b/vms/platformvm/docs/validators_versioning.md @@ -18,7 +18,7 @@ GetValidatorSet(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.No Validator data are collected in a struct named `validators.GetValidatorOutput` which holds for each active validator, its `NodeID`, its `Weight` and its `BLS Public Key` if it was registered. -Note that a validator `Weight` is not just its stake; its the aggregate value of the validator's own stake and all of its delegators' stake. A validator's `Weight` gauges how relevant its preference should be in consensus or Warp operations. +Note that a validator `Weight` is not just its stake; it's the aggregate value of the validator's own stake and all of its delegators' stake. A validator's `Weight` gauges how relevant its preference should be in consensus or Warp operations. We will see in the next section how the P-chain keeps track of this information over time as the validator set changes. @@ -35,7 +35,7 @@ These diffs are key to rebuilding the validator set at a given past height. In t The validators diffs track changes in a validator's `Weight` and `BLS Public key`. Along with the `NodeID` this is the data exposed by the `GetValidatorSet` method. -Note that `Weight` and `BLS Public key` behave differently throughout the validator lifetime: +Note that `Weight` and `BLS Public key` behave differently throughout the validator's lifetime: 1. `BLS Public key` cannot change through a validator's lifetime. It can only change when a validator is added/re-added and removed. 2. `Weight` can change throughout a validator's lifetime by the creation and removal of its delegators as well as by validator's own creation and removal. diff --git a/vms/platformvm/factory.go b/vms/platformvm/factory.go index 5673bebefd97..834c9c8f2450 100644 --- a/vms/platformvm/factory.go +++ b/vms/platformvm/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm diff --git a/vms/platformvm/fx/fx.go b/vms/platformvm/fx/fx.go index 10dfeac14eae..a607b903005c 100644 --- a/vms/platformvm/fx/fx.go +++ b/vms/platformvm/fx/fx.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package fx diff --git a/vms/platformvm/fx/mock_fx.go b/vms/platformvm/fx/mock_fx.go index eb5ebcf53ac4..af06d9f68c76 100644 --- a/vms/platformvm/fx/mock_fx.go +++ b/vms/platformvm/fx/mock_fx.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/fx (interfaces: Fx,Owner) +// +// Generated by this command: +// +// mockgen -package=fx -destination=vms/platformvm/fx/mock_fx.go github.com/ava-labs/avalanchego/vms/platformvm/fx Fx,Owner +// // Package fx is a generated GoMock package. package fx @@ -83,22 +85,22 @@ func (mr *MockFxMockRecorder) CollectMultisigAliases(arg0, arg1 interface{}) *go } // CreateOutput mocks base method. -func (m *MockFx) CreateOutput(arg0 uint64, arg1 interface{}) (interface{}, error) { +func (m *MockFx) CreateOutput(arg0 uint64, arg1 any) (any, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOutput", arg0, arg1) - ret0, _ := ret[0].(interface{}) + ret0, _ := ret[0].(any) ret1, _ := ret[1].(error) return ret0, ret1 } // CreateOutput indicates an expected call of CreateOutput. -func (mr *MockFxMockRecorder) CreateOutput(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockFxMockRecorder) CreateOutput(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOutput", reflect.TypeOf((*MockFx)(nil).CreateOutput), arg0, arg1) } // Initialize mocks base method. -func (m *MockFx) Initialize(arg0 interface{}) error { +func (m *MockFx) Initialize(arg0 any) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Initialize", arg0) ret0, _ := ret[0].(error) @@ -106,7 +108,7 @@ func (m *MockFx) Initialize(arg0 interface{}) error { } // Initialize indicates an expected call of Initialize. -func (mr *MockFxMockRecorder) Initialize(arg0 interface{}) *gomock.Call { +func (mr *MockFxMockRecorder) Initialize(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockFx)(nil).Initialize), arg0) } @@ -183,7 +185,7 @@ func (mr *MockFxMockRecorder) VerifyMultisigMessage(arg0, arg1, arg2, arg3, arg4 } // VerifyPermission mocks base method. -func (m *MockFx) VerifyPermission(arg0, arg1, arg2, arg3 interface{}) error { +func (m *MockFx) VerifyPermission(arg0, arg1, arg2, arg3 any) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "VerifyPermission", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) @@ -191,13 +193,13 @@ func (m *MockFx) VerifyPermission(arg0, arg1, arg2, arg3 interface{}) error { } // VerifyPermission indicates an expected call of VerifyPermission. -func (mr *MockFxMockRecorder) VerifyPermission(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockFxMockRecorder) VerifyPermission(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyPermission", reflect.TypeOf((*MockFx)(nil).VerifyPermission), arg0, arg1, arg2, arg3) } // VerifyTransfer mocks base method. -func (m *MockFx) VerifyTransfer(arg0, arg1, arg2, arg3 interface{}) error { +func (m *MockFx) VerifyTransfer(arg0, arg1, arg2, arg3 any) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "VerifyTransfer", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) @@ -205,7 +207,7 @@ func (m *MockFx) VerifyTransfer(arg0, arg1, arg2, arg3 interface{}) error { } // VerifyTransfer indicates an expected call of VerifyTransfer. -func (mr *MockFxMockRecorder) VerifyTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockFxMockRecorder) VerifyTransfer(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyTransfer", reflect.TypeOf((*MockFx)(nil).VerifyTransfer), arg0, arg1, arg2, arg3) } @@ -257,7 +259,7 @@ func (m *MockOwner) InitCtx(arg0 *snow.Context) { } // InitCtx indicates an expected call of InitCtx. -func (mr *MockOwnerMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { +func (mr *MockOwnerMockRecorder) InitCtx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockOwner)(nil).InitCtx), arg0) } @@ -275,3 +277,17 @@ func (mr *MockOwnerMockRecorder) Verify() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockOwner)(nil).Verify)) } + +// isState mocks base method. +func (m *MockOwner) isState() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "isState") + ret0, _ := ret[0].(error) + return ret0 +} + +// isState indicates an expected call of isState. +func (mr *MockOwnerMockRecorder) isState() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isState", reflect.TypeOf((*MockOwner)(nil).isState)) +} diff --git a/vms/platformvm/genesis/camino.go b/vms/platformvm/genesis/camino.go index 16d8172f1942..6e9b444c8f1b 100644 --- a/vms/platformvm/genesis/camino.go +++ b/vms/platformvm/genesis/camino.go @@ -7,6 +7,7 @@ import ( "time" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/vms/components/multisig" as "github.com/ava-labs/avalanchego/vms/platformvm/addrstate" @@ -76,8 +77,8 @@ func (b *Block) Init() error { return nil } -func (b *Block) Less(b1 *Block) bool { - return b.Timestamp < b1.Timestamp +func (b *Block) Compare(b1 *Block) int { + return utils.Compare(b.Timestamp, b1.Timestamp) } func (b *Block) Time() time.Time { @@ -93,7 +94,7 @@ func (b *Block) Txs() []*txs.Tx { // Generate deposit offer id from its bytes hash and set it to offer's ID field func SetDepositOfferID(offer *deposit.Offer) error { - bytes, err := txs.GenesisCodec.Marshal(txs.Version, offer) + bytes, err := txs.GenesisCodec.Marshal(txs.CodecVersion, offer) if err != nil { return err } diff --git a/vms/platformvm/genesis/codec.go b/vms/platformvm/genesis/codec.go index 7b68ac58d634..b18c40d60cca 100644 --- a/vms/platformvm/genesis/codec.go +++ b/vms/platformvm/genesis/codec.go @@ -1,10 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis import "github.com/ava-labs/avalanchego/vms/platformvm/block" -const Version = block.Version +const CodecVersion = block.CodecVersion var Codec = block.GenesisCodec diff --git a/vms/platformvm/genesis/genesis.go b/vms/platformvm/genesis/genesis.go index da792acde030..5720c685a720 100644 --- a/vms/platformvm/genesis/genesis.go +++ b/vms/platformvm/genesis/genesis.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis diff --git a/vms/platformvm/health.go b/vms/platformvm/health.go index 4ceed8f84adc..86c80b807b70 100644 --- a/vms/platformvm/health.go +++ b/vms/platformvm/health.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm diff --git a/vms/platformvm/main_test.go b/vms/platformvm/main_test.go index 88a571cfa5cb..d353d31664fe 100644 --- a/vms/platformvm/main_test.go +++ b/vms/platformvm/main_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm diff --git a/vms/platformvm/metrics/block_metrics.go b/vms/platformvm/metrics/block_metrics.go index 759f07d8e2e1..1afbdcb87959 100644 --- a/vms/platformvm/metrics/block_metrics.go +++ b/vms/platformvm/metrics/block_metrics.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics diff --git a/vms/platformvm/metrics/metrics.go b/vms/platformvm/metrics/metrics.go index 7c0e616dd9b2..98b611a017ed 100644 --- a/vms/platformvm/metrics/metrics.go +++ b/vms/platformvm/metrics/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -19,10 +19,6 @@ var _ Metrics = (*metrics)(nil) type Metrics interface { metric.APIInterceptor - // Mark that an option vote that we initially preferred was accepted. - MarkOptionVoteWon() - // Mark that an option vote that we initially preferred was rejected. - MarkOptionVoteLost() // Mark that the given block was accepted. MarkAccepted(block.Block) error // Mark that a validator set was created. @@ -75,17 +71,6 @@ func New( Help: "Amount (in nAVAX) of AVAX staked on the Primary Network", }), - numVotesWon: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "votes_won", - Help: "Total number of votes this node has won", - }), - numVotesLost: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "votes_lost", - Help: "Total number of votes this node has lost", - }), - validatorSetsCached: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Name: "validator_sets_cached", @@ -118,9 +103,6 @@ func New( registerer.Register(m.localStake), registerer.Register(m.totalStake), - registerer.Register(m.numVotesWon), - registerer.Register(m.numVotesLost), - registerer.Register(m.validatorSetsCreated), registerer.Register(m.validatorSetsCached), registerer.Register(m.validatorSetsHeightDiff), @@ -140,22 +122,12 @@ type metrics struct { localStake prometheus.Gauge totalStake prometheus.Gauge - numVotesWon, numVotesLost prometheus.Counter - validatorSetsCached prometheus.Counter validatorSetsCreated prometheus.Counter validatorSetsHeightDiff prometheus.Gauge validatorSetsDuration prometheus.Gauge } -func (m *metrics) MarkOptionVoteWon() { - m.numVotesWon.Inc() -} - -func (m *metrics) MarkOptionVoteLost() { - m.numVotesLost.Inc() -} - func (m *metrics) MarkAccepted(b block.Block) error { return b.Visit(m.blockMetrics) } diff --git a/vms/platformvm/metrics/no_op.go b/vms/platformvm/metrics/no_op.go index 45100b49bbf5..770e30c961a1 100644 --- a/vms/platformvm/metrics/no_op.go +++ b/vms/platformvm/metrics/no_op.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics diff --git a/vms/platformvm/metrics/tx_metrics.go b/vms/platformvm/metrics/tx_metrics.go index 9ed07bce7ec9..f56c84aac176 100644 --- a/vms/platformvm/metrics/tx_metrics.go +++ b/vms/platformvm/metrics/tx_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics diff --git a/vms/platformvm/block/builder/camino_network.go b/vms/platformvm/network/camino_network.go similarity index 54% rename from vms/platformvm/block/builder/camino_network.go rename to vms/platformvm/network/camino_network.go index 2ebef08754c8..be24529d276d 100644 --- a/vms/platformvm/block/builder/camino_network.go +++ b/vms/platformvm/network/camino_network.go @@ -1,51 +1,79 @@ // Copyright (C) 2022-2024, Chain4Travel AG. All rights reserved. // See the file LICENSE for licensing terms. -package builder +package network import ( "context" "errors" "fmt" + "sync" "time" "go.uber.org/zap" - "github.com/ava-labs/avalanchego/cache" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/components/message" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - txBuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" ) var errUnknownCrossChainMessage = errors.New("unknown cross-chain message") +type SystemTxBuilder interface { + NewRewardsImportTx() (*txs.Tx, error) +} + type caminoNetwork struct { - network - txBuilder txBuilder.CaminoBuilder + *network + txBuilder SystemTxBuilder + lock sync.Locker } -func NewCaminoNetwork( - ctx *snow.Context, - blkBuilder *caminoBuilder, +func NewCamino( + log logging.Logger, + nodeID ids.NodeID, + subnetID ids.ID, + vdrs validators.State, + txVerifier TxVerifier, + mempool mempool.Mempool, + partialSyncPrimaryNetwork bool, appSender common.AppSender, - txBuilder txBuilder.CaminoBuilder, -) Network { + registerer prometheus.Registerer, + config Config, + txBuilder SystemTxBuilder, + lock sync.Locker, +) (Network, error) { + avaxNetwork, err := New( + log, + nodeID, + subnetID, + vdrs, + txVerifier, + mempool, + partialSyncPrimaryNetwork, + appSender, + registerer, + config, + ) + if err != nil { + return nil, err + } + return &caminoNetwork{ - network: network{ - ctx: ctx, - blkBuilder: &blkBuilder.builder, - appSender: appSender, - recentTxs: &cache.LRU[ids.ID, struct{}]{Size: recentCacheSize}, - }, + network: avaxNetwork.(*network), txBuilder: txBuilder, - } + lock: lock, + }, nil } func (n *caminoNetwork) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, _ time.Time, request []byte) error { - n.ctx.Log.Debug("called CrossChainAppRequest message handler", + n.log.Debug("called CrossChainAppRequest message handler", zap.Stringer("chainID", chainID), zap.Uint32("requestID", requestID), zap.Int("messageLen", len(request)), @@ -62,7 +90,7 @@ func (n *caminoNetwork) CrossChainAppRequest(ctx context.Context, chainID ids.ID requestID, []byte(n.caminoRewardMessage()), ); err != nil { - n.ctx.Log.Error("caminoCrossChainAppRequest failed to send response", zap.Error(err)) + n.log.Error("caminoCrossChainAppRequest failed to send response", zap.Error(err)) // we don't want fatal here: response is for logging only, so // its better to not respond properly, than crash the whole node return nil @@ -81,33 +109,33 @@ func (n *caminoNetwork) caminoRewardMessage() string { if !ok { // should never happen err = fmt.Errorf("unexpected tx type: expected *txs.RewardsImportTx, got %T", utx) - n.ctx.Log.Error("caminoCrossChainAppRequest failed to create rewardsImportTx", zap.Error(err)) + n.log.Error("caminoCrossChainAppRequest failed to create rewardsImportTx", zap.Error(err)) return fmt.Sprintf("caminoCrossChainAppRequest failed to issue rewardsImportTx: %s", err) } - n.ctx.Lock.Lock() - defer n.ctx.Lock.Unlock() + n.lock.Lock() + defer n.lock.Unlock() - if err := n.blkBuilder.AddUnverifiedTx(tx); err != nil { - n.ctx.Log.Error("caminoCrossChainAppRequest failed to add unverified rewardsImportTx to block builder", zap.Error(err)) - return fmt.Sprintf("caminoCrossChainAppRequest failed to add unverified rewardsImportTx to block builder: %s", err) + if err := n.issueTx(tx); err != nil { + n.log.Error("caminoCrossChainAppRequest failed to issue rewardsImportTx", zap.Error(err)) + return fmt.Sprintf("caminoCrossChainAppRequest failed to issue rewardsImportTx: %s", err) } - amounts := make([]uint64, len(utx.Ins)) + amts := make([]uint64, len(utx.Ins)) for i := range utx.Ins { - amounts[i] = utx.Ins[i].In.Amount() + amts[i] = utx.Ins[i].In.Amount() } - return fmt.Sprintf("caminoCrossChainAppRequest issued rewardsImportTx with utxos with %v nCAM", amounts) + return fmt.Sprintf("caminoCrossChainAppRequest issued rewardsImportTx with utxos with %v nCAM", amts) } func (n *caminoNetwork) newRewardsImportTx() (*txs.Tx, error) { - n.ctx.Lock.Lock() - defer n.ctx.Lock.Unlock() + n.lock.Lock() + defer n.lock.Unlock() tx, err := n.txBuilder.NewRewardsImportTx() if err != nil { - n.ctx.Log.Error("caminoCrossChainAppRequest failed to create rewardsImportTx", zap.Error(err)) + n.log.Error("caminoCrossChainAppRequest failed to create rewardsImportTx", zap.Error(err)) return nil, fmt.Errorf("caminoCrossChainAppRequest failed to create rewardsImportTx: %w", err) } return tx, nil diff --git a/vms/platformvm/network/config.go b/vms/platformvm/network/config.go new file mode 100644 index 000000000000..8536504d8383 --- /dev/null +++ b/vms/platformvm/network/config.go @@ -0,0 +1,66 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "time" + + "github.com/ava-labs/avalanchego/utils/units" +) + +var DefaultConfig = Config{ + MaxValidatorSetStaleness: time.Minute, + TargetGossipSize: 20 * units.KiB, + PullGossipPollSize: 1, + PullGossipFrequency: 1500 * time.Millisecond, + PullGossipThrottlingPeriod: 10 * time.Second, + PullGossipThrottlingLimit: 2, + ExpectedBloomFilterElements: 8 * 1024, + ExpectedBloomFilterFalsePositiveProbability: .01, + MaxBloomFilterFalsePositiveProbability: .05, + LegacyPushGossipCacheSize: 512, +} + +type Config struct { + // MaxValidatorSetStaleness limits how old of a validator set the network + // will use for peer sampling and rate limiting. + MaxValidatorSetStaleness time.Duration `json:"max-validator-set-staleness"` + // TargetGossipSize is the number of bytes that will be attempted to be + // sent when pushing transactions and when responded to transaction pull + // requests. + TargetGossipSize int `json:"target-gossip-size"` + // PullGossipPollSize is the number of validators to sample when performing + // a round of pull gossip. + PullGossipPollSize int `json:"pull-gossip-poll-size"` + // PullGossipFrequency is how frequently rounds of pull gossip are + // performed. + PullGossipFrequency time.Duration `json:"pull-gossip-frequency"` + // PullGossipThrottlingPeriod is how large of a window the throttler should + // use. + PullGossipThrottlingPeriod time.Duration `json:"pull-gossip-throttling-period"` + // PullGossipThrottlingLimit is the number of pull querys that are allowed + // by a validator in every throttling window. + PullGossipThrottlingLimit int `json:"pull-gossip-throttling-limit"` + // ExpectedBloomFilterElements is the number of elements to expect when + // creating a new bloom filter. The larger this number is, the larger the + // bloom filter will be. + ExpectedBloomFilterElements int `json:"expected-bloom-filter-elements"` + // ExpectedBloomFilterFalsePositiveProbability is the expected probability + // of a false positive after having inserted ExpectedBloomFilterElements + // into a bloom filter. The smaller this number is, the larger the bloom + // filter will be. + ExpectedBloomFilterFalsePositiveProbability float64 `json:"expected-bloom-filter-false-positive-probability"` + // MaxBloomFilterFalsePositiveProbability is used to determine when the + // bloom filter should be refreshed. Once the expected probability of a + // false positive exceeds this value, the bloom filter will be regenerated. + // The smaller this number is, the more frequently that the bloom filter + // will be regenerated. + MaxBloomFilterFalsePositiveProbability float64 `json:"max-bloom-filter-false-positive-probability"` + // LegacyPushGossipCacheSize tracks the most recently received transactions + // and ensures to only gossip them once. + // + // Deprecated: The legacy push gossip mechanism is deprecated in favor of + // the p2p SDK's push gossip mechanism. + LegacyPushGossipCacheSize int `json:"legacy-push-gossip-cache-size"` +} diff --git a/vms/platformvm/network/gossip.go b/vms/platformvm/network/gossip.go new file mode 100644 index 000000000000..0cca1ab4e3f6 --- /dev/null +++ b/vms/platformvm/network/gossip.go @@ -0,0 +1,145 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/platformvm/dac" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" +) + +var ( + _ p2p.Handler = (*txGossipHandler)(nil) + _ gossip.Marshaller[*txs.Tx] = (*txMarshaller)(nil) + _ gossip.Gossipable = (*txs.Tx)(nil) +) + +// bloomChurnMultiplier is the number used to multiply the size of the mempool +// to determine how large of a bloom filter to create. +const bloomChurnMultiplier = 3 + +// txGossipHandler is the handler called when serving gossip messages +type txGossipHandler struct { + p2p.NoOpHandler + appGossipHandler p2p.Handler + appRequestHandler p2p.Handler +} + +func (t txGossipHandler) AppGossip( + ctx context.Context, + nodeID ids.NodeID, + gossipBytes []byte, +) { + t.appGossipHandler.AppGossip(ctx, nodeID, gossipBytes) +} + +func (t txGossipHandler) AppRequest( + ctx context.Context, + nodeID ids.NodeID, + deadline time.Time, + requestBytes []byte, +) ([]byte, error) { + return t.appRequestHandler.AppRequest(ctx, nodeID, deadline, requestBytes) +} + +type txMarshaller struct{} + +func (txMarshaller) MarshalGossip(tx *txs.Tx) ([]byte, error) { + return tx.Bytes(), nil +} + +func (txMarshaller) UnmarshalGossip(bytes []byte) (*txs.Tx, error) { + return txs.Parse(txs.Codec, bytes) +} + +func newGossipMempool( + mempool mempool.Mempool, + registerer prometheus.Registerer, + log logging.Logger, + txVerifier TxVerifier, + minTargetElements int, + targetFalsePositiveProbability, + resetFalsePositiveProbability float64, +) (*gossipMempool, error) { + bloom, err := gossip.NewBloomFilter(registerer, "mempool_bloom_filter", minTargetElements, targetFalsePositiveProbability, resetFalsePositiveProbability) + return &gossipMempool{ + Mempool: mempool, + log: log, + txVerifier: txVerifier, + bloom: bloom, + }, err +} + +type gossipMempool struct { + mempool.Mempool + log logging.Logger + txVerifier TxVerifier + + lock sync.RWMutex + bloom *gossip.BloomFilter +} + +func (g *gossipMempool) Add(tx *txs.Tx) error { + txID := tx.ID() + if _, ok := g.Mempool.Get(txID); ok { + return fmt.Errorf("tx %s dropped: %w", txID, mempool.ErrDuplicateTx) + } + + if reason := g.Mempool.GetDropReason(txID); reason != nil && !errors.Is(reason, dac.ErrNotYetActive) { + // If the tx is being dropped - just ignore it + // + // TODO: Should we allow re-verification of the transaction even if it + // failed previously? + return reason + } + + if err := g.txVerifier.VerifyTx(tx); err != nil { + g.Mempool.MarkDropped(txID, err) + return err + } + + if err := g.Mempool.Add(tx); err != nil { + g.Mempool.MarkDropped(txID, err) + return err + } + + g.lock.Lock() + defer g.lock.Unlock() + + g.bloom.Add(tx) + reset, err := gossip.ResetBloomFilterIfNeeded(g.bloom, g.Mempool.Len()*bloomChurnMultiplier) + if err != nil { + return err + } + + if reset { + g.log.Debug("resetting bloom filter") + g.Mempool.Iterate(func(tx *txs.Tx) bool { + g.bloom.Add(tx) + return true + }) + } + + g.Mempool.RequestBuildBlock(false) + return nil +} + +func (g *gossipMempool) GetFilter() (bloom []byte, salt []byte) { + g.lock.RLock() + defer g.lock.RUnlock() + + return g.bloom.Marshal() +} diff --git a/vms/platformvm/network/gossip_test.go b/vms/platformvm/network/gossip_test.go new file mode 100644 index 000000000000..a393515716b6 --- /dev/null +++ b/vms/platformvm/network/gossip_test.go @@ -0,0 +1,154 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "errors" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stretchr/testify/require" + + "go.uber.org/mock/gomock" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" +) + +var errFoo = errors.New("foo") + +// Add should error if verification errors +func TestGossipMempoolAddVerificationError(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + txID := ids.GenerateTestID() + tx := &txs.Tx{ + TxID: txID, + } + + mempool := mempool.NewMockMempool(ctrl) + txVerifier := testTxVerifier{err: errFoo} + + mempool.EXPECT().Get(txID).Return(nil, false) + mempool.EXPECT().GetDropReason(txID).Return(nil) + mempool.EXPECT().MarkDropped(txID, errFoo) + + gossipMempool, err := newGossipMempool( + mempool, + prometheus.NewRegistry(), + logging.NoLog{}, + txVerifier, + testConfig.ExpectedBloomFilterElements, + testConfig.ExpectedBloomFilterFalsePositiveProbability, + testConfig.MaxBloomFilterFalsePositiveProbability, + ) + require.NoError(err) + + err = gossipMempool.Add(tx) + require.ErrorIs(err, errFoo) + require.False(gossipMempool.bloom.Has(tx)) +} + +// Add should error if adding to the mempool errors +func TestGossipMempoolAddError(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + txID := ids.GenerateTestID() + tx := &txs.Tx{ + TxID: txID, + } + + txVerifier := testTxVerifier{} + mempool := mempool.NewMockMempool(ctrl) + + mempool.EXPECT().Get(txID).Return(nil, false) + mempool.EXPECT().GetDropReason(txID).Return(nil) + mempool.EXPECT().Add(tx).Return(errFoo) + mempool.EXPECT().MarkDropped(txID, errFoo).AnyTimes() + + gossipMempool, err := newGossipMempool( + mempool, + prometheus.NewRegistry(), + logging.NoLog{}, + txVerifier, + testConfig.ExpectedBloomFilterElements, + testConfig.ExpectedBloomFilterFalsePositiveProbability, + testConfig.MaxBloomFilterFalsePositiveProbability, + ) + require.NoError(err) + + err = gossipMempool.Add(tx) + require.ErrorIs(err, errFoo) + require.False(gossipMempool.bloom.Has(tx)) +} + +// Adding a duplicate to the mempool should return an error +func TestMempoolDuplicate(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + testMempool := mempool.NewMockMempool(ctrl) + txVerifier := testTxVerifier{} + + txID := ids.GenerateTestID() + tx := &txs.Tx{ + TxID: txID, + } + + testMempool.EXPECT().Get(txID).Return(tx, true) + + gossipMempool, err := newGossipMempool( + testMempool, + prometheus.NewRegistry(), + logging.NoLog{}, + txVerifier, + testConfig.ExpectedBloomFilterElements, + testConfig.ExpectedBloomFilterFalsePositiveProbability, + testConfig.MaxBloomFilterFalsePositiveProbability, + ) + require.NoError(err) + + err = gossipMempool.Add(tx) + require.ErrorIs(err, mempool.ErrDuplicateTx) + require.False(gossipMempool.bloom.Has(tx)) +} + +// Adding a tx to the mempool should add it to the bloom filter +func TestGossipAddBloomFilter(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + txID := ids.GenerateTestID() + tx := &txs.Tx{ + TxID: txID, + } + + txVerifier := testTxVerifier{} + mempool := mempool.NewMockMempool(ctrl) + + mempool.EXPECT().Get(txID).Return(nil, false) + mempool.EXPECT().GetDropReason(txID).Return(nil) + mempool.EXPECT().Add(tx).Return(nil) + mempool.EXPECT().Len().Return(0) + mempool.EXPECT().RequestBuildBlock(false) + + gossipMempool, err := newGossipMempool( + mempool, + prometheus.NewRegistry(), + logging.NoLog{}, + txVerifier, + testConfig.ExpectedBloomFilterElements, + testConfig.ExpectedBloomFilterFalsePositiveProbability, + testConfig.MaxBloomFilterFalsePositiveProbability, + ) + require.NoError(err) + + require.NoError(gossipMempool.Add(tx)) + require.True(gossipMempool.bloom.Has(tx)) +} diff --git a/vms/platformvm/network/main_test.go b/vms/platformvm/network/main_test.go new file mode 100644 index 000000000000..ed2cfd9ecee7 --- /dev/null +++ b/vms/platformvm/network/main_test.go @@ -0,0 +1,14 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/vms/platformvm/network/network.go b/vms/platformvm/network/network.go new file mode 100644 index 000000000000..39f6ee1dea0e --- /dev/null +++ b/vms/platformvm/network/network.go @@ -0,0 +1,296 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "context" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/components/message" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" +) + +const TxGossipHandlerID = 0 + +type Network interface { + common.AppHandler + + // Gossip starts gossiping transactions and blocks until it completes. + Gossip(ctx context.Context) + // IssueTx verifies the transaction at the currently preferred state, adds + // it to the mempool, and gossips it to the network. + IssueTx(context.Context, *txs.Tx) error +} + +type network struct { + *p2p.Network + + log logging.Logger + txVerifier TxVerifier + mempool *gossipMempool + partialSyncPrimaryNetwork bool + appSender common.AppSender + + txPushGossiper gossip.Accumulator[*txs.Tx] + txPullGossiper gossip.Gossiper + txGossipFrequency time.Duration + + // gossip related attributes + recentTxsLock sync.Mutex + recentTxs *cache.LRU[ids.ID, struct{}] +} + +func New( + log logging.Logger, + nodeID ids.NodeID, + subnetID ids.ID, + vdrs validators.State, + txVerifier TxVerifier, + mempool mempool.Mempool, + partialSyncPrimaryNetwork bool, + appSender common.AppSender, + registerer prometheus.Registerer, + config Config, +) (Network, error) { + p2pNetwork, err := p2p.NewNetwork(log, appSender, registerer, "p2p") + if err != nil { + return nil, err + } + + marshaller := txMarshaller{} + validators := p2p.NewValidators( + p2pNetwork.Peers, + log, + subnetID, + vdrs, + config.MaxValidatorSetStaleness, + ) + txGossipClient := p2pNetwork.NewClient( + TxGossipHandlerID, + p2p.WithValidatorSampling(validators), + ) + txGossipMetrics, err := gossip.NewMetrics(registerer, "tx") + if err != nil { + return nil, err + } + + txPushGossiper := gossip.NewPushGossiper[*txs.Tx]( + marshaller, + txGossipClient, + txGossipMetrics, + config.TargetGossipSize, + ) + + gossipMempool, err := newGossipMempool( + mempool, + registerer, + log, + txVerifier, + config.ExpectedBloomFilterElements, + config.ExpectedBloomFilterFalsePositiveProbability, + config.MaxBloomFilterFalsePositiveProbability, + ) + if err != nil { + return nil, err + } + + var txPullGossiper gossip.Gossiper + txPullGossiper = gossip.NewPullGossiper[*txs.Tx]( + log, + marshaller, + gossipMempool, + txGossipClient, + txGossipMetrics, + config.PullGossipPollSize, + ) + + // Gossip requests are only served if a node is a validator + txPullGossiper = gossip.ValidatorGossiper{ + Gossiper: txPullGossiper, + NodeID: nodeID, + Validators: validators, + } + + handler := gossip.NewHandler[*txs.Tx]( + log, + marshaller, + txPushGossiper, + gossipMempool, + txGossipMetrics, + config.TargetGossipSize, + ) + + validatorHandler := p2p.NewValidatorHandler( + p2p.NewThrottlerHandler( + handler, + p2p.NewSlidingWindowThrottler( + config.PullGossipThrottlingPeriod, + config.PullGossipThrottlingLimit, + ), + log, + ), + validators, + log, + ) + + // We allow pushing txs between all peers, but only serve gossip requests + // from validators + txGossipHandler := txGossipHandler{ + appGossipHandler: handler, + appRequestHandler: validatorHandler, + } + + if err := p2pNetwork.AddHandler(TxGossipHandlerID, txGossipHandler); err != nil { + return nil, err + } + + return &network{ + Network: p2pNetwork, + log: log, + txVerifier: txVerifier, + mempool: gossipMempool, + partialSyncPrimaryNetwork: partialSyncPrimaryNetwork, + appSender: appSender, + txPushGossiper: txPushGossiper, + txPullGossiper: txPullGossiper, + txGossipFrequency: config.PullGossipFrequency, + recentTxs: &cache.LRU[ids.ID, struct{}]{Size: config.LegacyPushGossipCacheSize}, + }, nil +} + +func (n *network) Gossip(ctx context.Context) { + // If the node is running partial sync, we should not perform any pull + // gossip. + if n.partialSyncPrimaryNetwork { + return + } + + gossip.Every(ctx, n.log, n.txPullGossiper, n.txGossipFrequency) +} + +func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []byte) error { + n.log.Debug("called AppGossip message handler", + zap.Stringer("nodeID", nodeID), + zap.Int("messageLen", len(msgBytes)), + ) + + if n.partialSyncPrimaryNetwork { + n.log.Debug("dropping AppGossip message", + zap.String("reason", "primary network is not being fully synced"), + ) + return nil + } + + msgIntf, err := message.Parse(msgBytes) + if err != nil { + n.log.Debug("forwarding AppGossip to p2p network", + zap.String("reason", "failed to parse message"), + ) + + return n.Network.AppGossip(ctx, nodeID, msgBytes) + } + + msg, ok := msgIntf.(*message.Tx) + if !ok { + n.log.Debug("dropping unexpected message", + zap.Stringer("nodeID", nodeID), + ) + return nil + } + + tx, err := txs.Parse(txs.Codec, msg.Tx) + if err != nil { + n.log.Verbo("received invalid tx", + zap.Stringer("nodeID", nodeID), + zap.Binary("tx", msg.Tx), + zap.Error(err), + ) + return nil + } + txID := tx.ID() + + if err := n.issueTx(tx); err == nil { + n.legacyGossipTx(ctx, txID, msgBytes) + + n.txPushGossiper.Add(tx) + return n.txPushGossiper.Gossip(ctx) + } + return nil +} + +func (n *network) IssueTx(ctx context.Context, tx *txs.Tx) error { + if err := n.issueTx(tx); err != nil { + return err + } + + txBytes := tx.Bytes() + msg := &message.Tx{ + Tx: txBytes, + } + msgBytes, err := message.Build(msg) + if err != nil { + return err + } + + txID := tx.ID() + n.legacyGossipTx(ctx, txID, msgBytes) + n.txPushGossiper.Add(tx) + return n.txPushGossiper.Gossip(ctx) +} + +// returns nil if the tx is in the mempool +func (n *network) issueTx(tx *txs.Tx) error { + // If we are partially syncing the Primary Network, we should not be + // maintaining the transaction mempool locally. + if n.partialSyncPrimaryNetwork { + return nil + } + + if err := n.mempool.Add(tx); err != nil { + n.log.Debug("tx failed to be added to the mempool", + zap.Stringer("txID", tx.ID()), + zap.Error(err), + ) + + return err + } + + return nil +} + +func (n *network) legacyGossipTx(ctx context.Context, txID ids.ID, msgBytes []byte) { + n.recentTxsLock.Lock() + _, has := n.recentTxs.Get(txID) + n.recentTxs.Put(txID, struct{}{}) + n.recentTxsLock.Unlock() + + // Don't gossip a transaction if it has been recently gossiped. + if has { + return + } + + n.log.Debug("gossiping tx", + zap.Stringer("txID", txID), + ) + + if err := n.appSender.SendAppGossip(ctx, msgBytes); err != nil { + n.log.Error("failed to gossip tx", + zap.Stringer("txID", txID), + zap.Error(err), + ) + } +} diff --git a/vms/platformvm/network/network_test.go b/vms/platformvm/network/network_test.go new file mode 100644 index 000000000000..181fbc163cea --- /dev/null +++ b/vms/platformvm/network/network_test.go @@ -0,0 +1,379 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stretchr/testify/require" + + "go.uber.org/mock/gomock" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/snowtest" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/message" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" +) + +var ( + errTest = errors.New("test error") + + testConfig = Config{ + MaxValidatorSetStaleness: time.Second, + TargetGossipSize: 1, + PullGossipPollSize: 1, + PullGossipFrequency: time.Second, + PullGossipThrottlingPeriod: time.Second, + PullGossipThrottlingLimit: 1, + ExpectedBloomFilterElements: 10, + ExpectedBloomFilterFalsePositiveProbability: .1, + MaxBloomFilterFalsePositiveProbability: .5, + LegacyPushGossipCacheSize: 512, + } +) + +var _ TxVerifier = (*testTxVerifier)(nil) + +type testTxVerifier struct { + err error +} + +func (t testTxVerifier) VerifyTx(*txs.Tx) error { + return t.err +} + +func TestNetworkAppGossip(t *testing.T) { + testTx := &txs.Tx{ + Unsigned: &txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: 1, + BlockchainID: ids.GenerateTestID(), + Ins: []*avax.TransferableInput{}, + Outs: []*avax.TransferableOutput{}, + }, + }, + } + require.NoError(t, testTx.Initialize(txs.Codec)) + + type test struct { + name string + msgBytesFunc func() []byte + mempoolFunc func(*gomock.Controller) mempool.Mempool + partialSyncPrimaryNetwork bool + appSenderFunc func(*gomock.Controller) common.AppSender + } + + tests := []test{ + { + // Shouldn't attempt to issue or gossip the tx + name: "invalid message bytes", + msgBytesFunc: func() []byte { + return []byte{0x00} + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + // Unused in this test + return nil + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Unused in this test + return nil + }, + }, + { + // Shouldn't attempt to issue or gossip the tx + name: "invalid tx bytes", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: []byte{0x00}, + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + // Unused in this test + return mempool.NewMockMempool(ctrl) + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Unused in this test + return common.NewMockSender(ctrl) + }, + }, + { + name: "issuance succeeds", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: testTx.Bytes(), + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + mempool.EXPECT().Add(gomock.Any()).Return(nil) + mempool.EXPECT().Len().Return(0) + mempool.EXPECT().RequestBuildBlock(false) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // we should gossip the tx twice because sdk and legacy gossip + // currently runs together + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Times(2) + return appSender + }, + }, + { + // Issue returns error because tx was dropped. We shouldn't gossip the tx. + name: "issuance fails", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: testTx.Bytes(), + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Unused in this test + return common.NewMockSender(ctrl) + }, + }, + { + name: "should AppGossip if primary network is not being fully synced", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: testTx.Bytes(), + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + // mempool.EXPECT().Has(gomock.Any()).Return(true) + return mempool + }, + partialSyncPrimaryNetwork: true, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + appSender := common.NewMockSender(ctrl) + // appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()) + return appSender + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctx := context.Background() + ctrl := gomock.NewController(t) + + snowCtx := snowtest.Context(t, ids.Empty) + n, err := New( + logging.NoLog{}, + ids.EmptyNodeID, + ids.Empty, + snowCtx.ValidatorState, + testTxVerifier{}, + tt.mempoolFunc(ctrl), + tt.partialSyncPrimaryNetwork, + tt.appSenderFunc(ctrl), + prometheus.NewRegistry(), + DefaultConfig, + ) + require.NoError(err) + + require.NoError(n.AppGossip(ctx, ids.GenerateTestNodeID(), tt.msgBytesFunc())) + }) + } +} + +func TestNetworkIssueTx(t *testing.T) { + tx := &txs.Tx{} + + type test struct { + name string + mempoolFunc func(*gomock.Controller) mempool.Mempool + txVerifier testTxVerifier + partialSyncPrimaryNetwork bool + appSenderFunc func(*gomock.Controller) common.AppSender + expectedErr error + } + + tests := []test{ + { + name: "mempool has transaction", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(tx, true) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + return common.NewMockSender(ctrl) + }, + expectedErr: mempool.ErrDuplicateTx, + }, + { + name: "transaction marked as dropped in mempool", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Shouldn't gossip the tx + return common.NewMockSender(ctrl) + }, + expectedErr: errTest, + }, + { + name: "transaction invalid", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) + return mempool + }, + txVerifier: testTxVerifier{err: errTest}, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Shouldn't gossip the tx + return common.NewMockSender(ctrl) + }, + expectedErr: errTest, + }, + { + name: "can't add transaction to mempool", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + mempool.EXPECT().Add(gomock.Any()).Return(errTest) + mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Shouldn't gossip the tx + return common.NewMockSender(ctrl) + }, + expectedErr: errTest, + }, + { + name: "AppGossip tx but do not add to mempool if primary network is not being fully synced", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + return mempool.NewMockMempool(ctrl) + }, + partialSyncPrimaryNetwork: true, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // we should gossip the tx twice because sdk and legacy gossip + // currently runs together + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil).Times(2) + return appSender + }, + expectedErr: nil, + }, + { + name: "happy path", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + mempool.EXPECT().Add(gomock.Any()).Return(nil) + mempool.EXPECT().Len().Return(0) + mempool.EXPECT().RequestBuildBlock(false) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // we should gossip the tx twice because sdk and legacy gossip + // currently runs together + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil).Times(2) + return appSender + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + snowCtx := snowtest.Context(t, ids.Empty) + n, err := New( + snowCtx.Log, + snowCtx.NodeID, + snowCtx.SubnetID, + snowCtx.ValidatorState, + tt.txVerifier, + tt.mempoolFunc(ctrl), + tt.partialSyncPrimaryNetwork, + tt.appSenderFunc(ctrl), + prometheus.NewRegistry(), + testConfig, + ) + require.NoError(err) + + err = n.IssueTx(context.Background(), tx) + require.ErrorIs(err, tt.expectedErr) + }) + } +} + +func TestNetworkGossipTx(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + appSender := common.NewMockSender(ctrl) + + snowCtx := snowtest.Context(t, ids.Empty) + nIntf, err := New( + snowCtx.Log, + snowCtx.NodeID, + snowCtx.SubnetID, + snowCtx.ValidatorState, + testTxVerifier{}, + mempool.NewMockMempool(ctrl), + false, + appSender, + prometheus.NewRegistry(), + testConfig, + ) + require.NoError(err) + require.IsType(&network{}, nIntf) + n := nIntf.(*network) + + // Case: Tx was recently gossiped + txID := ids.GenerateTestID() + n.recentTxs.Put(txID, struct{}{}) + n.legacyGossipTx(context.Background(), txID, []byte{}) + // Didn't make a call to SendAppGossip + + // Case: Tx was not recently gossiped + msgBytes := []byte{1, 2, 3} + appSender.EXPECT().SendAppGossip(gomock.Any(), msgBytes).Return(nil) + n.legacyGossipTx(context.Background(), ids.GenerateTestID(), msgBytes) + // Did make a call to SendAppGossip +} diff --git a/vms/platformvm/network/tx_verifier.go b/vms/platformvm/network/tx_verifier.go new file mode 100644 index 000000000000..ee76c8b0056b --- /dev/null +++ b/vms/platformvm/network/tx_verifier.go @@ -0,0 +1,36 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "sync" + + "github.com/ava-labs/avalanchego/vms/platformvm/txs" +) + +var _ TxVerifier = (*LockedTxVerifier)(nil) + +type TxVerifier interface { + // VerifyTx verifies that the transaction should be issued into the mempool. + VerifyTx(tx *txs.Tx) error +} + +type LockedTxVerifier struct { + lock sync.Locker + txVerifier TxVerifier +} + +func (l *LockedTxVerifier) VerifyTx(tx *txs.Tx) error { + l.lock.Lock() + defer l.lock.Unlock() + + return l.txVerifier.VerifyTx(tx) +} + +func NewLockedTxVerifier(lock sync.Locker, txVerifier TxVerifier) *LockedTxVerifier { + return &LockedTxVerifier{ + lock: lock, + txVerifier: txVerifier, + } +} diff --git a/vms/platformvm/reward/calculator.go b/vms/platformvm/reward/calculator.go index 30ba7c3270d7..79a845ef8980 100644 --- a/vms/platformvm/reward/calculator.go +++ b/vms/platformvm/reward/calculator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package reward diff --git a/vms/platformvm/reward/calculator_test.go b/vms/platformvm/reward/calculator_test.go index 1462bd2e3664..d2fd17ff9e2b 100644 --- a/vms/platformvm/reward/calculator_test.go +++ b/vms/platformvm/reward/calculator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package reward diff --git a/vms/platformvm/reward/config.go b/vms/platformvm/reward/config.go index 17a0a0d0e83b..ccabc398f83a 100644 --- a/vms/platformvm/reward/config.go +++ b/vms/platformvm/reward/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package reward diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index af4214e11054..b7b3526a0de0 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm @@ -508,7 +508,7 @@ func (s *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, response *ap response.UTXOs[i] = string(bytes) continue } - bytes, err := txs.Codec.Marshal(txs.Version, utxo) + bytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) if err != nil { return fmt.Errorf("couldn't serialize UTXO %q: %w", utxo.InputID(), err) } @@ -1205,12 +1205,27 @@ type AddValidatorArgs struct { // AddValidator creates and signs and issues a transaction to add a validator to // the primary network -func (s *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, reply *api.JSONTxIDChangeAddr) error { +func (s *Service) AddValidator(req *http.Request, args *AddValidatorArgs, reply *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "addValidator"), ) + tx, changeAddr, err := s.buildAddValidatorTx(args) + if err != nil { + return fmt.Errorf("couldn't create tx: %w", err) + } + + reply.TxID = tx.ID() + reply.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) + if err != nil { + return fmt.Errorf("couldn't format address: %w", err) + } + + return s.vm.issueTx(req.Context(), tx) +} + +func (s *Service) buildAddValidatorTx(args *AddValidatorArgs) (*txs.Tx, ids.ShortID, error) { now := s.vm.clock.Time() minAddStakerTime := now.Add(minAddStakerDelay) minAddStakerUnix := json.Uint64(minAddStakerTime.Unix()) @@ -1223,13 +1238,13 @@ func (s *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, reply *a switch { case args.RewardAddress == "": - return errNoRewardAddress + return nil, ids.ShortEmpty, errNoRewardAddress case args.StartTime < minAddStakerUnix: - return errStartTimeTooSoon + return nil, ids.ShortEmpty, errStartTimeTooSoon case args.StartTime > maxAddStakerUnix: - return errStartTimeTooLate + return nil, ids.ShortEmpty, errStartTimeTooLate case args.DelegationFeeRate < 0 || args.DelegationFeeRate > 100: - return errInvalidDelegationRate + return nil, ids.ShortEmpty, errInvalidDelegationRate } // Parse the node ID @@ -1243,13 +1258,13 @@ func (s *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, reply *a // Parse the node owner address nodeOwnerAddress, err := avax.ParseServiceAddress(s.addrManager, args.NodeOwnerAddress) if err != nil { - return fmt.Errorf("problem while parsing node owner address: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("problem while parsing node owner address: %w", err) } // Parse the reward address rewardAddress, err := avax.ParseServiceAddress(s.addrManager, args.RewardAddress) if err != nil { - return fmt.Errorf("problem while parsing reward address: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("problem while parsing reward address: %w", err) } s.vm.ctx.Lock.Lock() @@ -1257,14 +1272,14 @@ func (s *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, reply *a keys, err := s.getKeystoreKeys(&args.UserPass, &args.JSONFromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } changeAddr := keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { - return fmt.Errorf("couldn't parse changeAddr: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) } } @@ -1286,16 +1301,10 @@ func (s *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, reply *a changeAddr, ) if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) + return nil, ids.ShortEmpty, err } - reply.TxID = tx.ID() - reply.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - - return utils.Err( - err, - s.vm.Builder.AddUnverifiedTx(tx), - ) + return tx, changeAddr, nil } // AddDelegatorArgs are the arguments to AddDelegator @@ -1308,12 +1317,27 @@ type AddDelegatorArgs struct { // AddDelegator creates and signs and issues a transaction to add a delegator to // the primary network -func (s *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, reply *api.JSONTxIDChangeAddr) error { +func (s *Service) AddDelegator(req *http.Request, args *AddDelegatorArgs, reply *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "addDelegator"), ) + tx, changeAddr, err := s.buildAddDelegatorTx(args) + if err != nil { + return fmt.Errorf("couldn't create tx: %w", err) + } + + reply.TxID = tx.ID() + reply.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) + if err != nil { + return fmt.Errorf("couldn't format address: %w", err) + } + + return s.vm.issueTx(req.Context(), tx) +} + +func (s *Service) buildAddDelegatorTx(args *AddDelegatorArgs) (*txs.Tx, ids.ShortID, error) { now := s.vm.clock.Time() minAddStakerTime := now.Add(minAddStakerDelay) minAddStakerUnix := json.Uint64(minAddStakerTime.Unix()) @@ -1326,11 +1350,11 @@ func (s *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, reply *a switch { case args.RewardAddress == "": - return errNoRewardAddress + return nil, ids.ShortEmpty, errNoRewardAddress case args.StartTime < minAddStakerUnix: - return errStartTimeTooSoon + return nil, ids.ShortEmpty, errStartTimeTooSoon case args.StartTime > maxAddStakerUnix: - return errStartTimeTooLate + return nil, ids.ShortEmpty, errStartTimeTooLate } var nodeID ids.NodeID @@ -1343,7 +1367,7 @@ func (s *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, reply *a // Parse the reward address rewardAddress, err := avax.ParseServiceAddress(s.addrManager, args.RewardAddress) if err != nil { - return fmt.Errorf("problem parsing 'rewardAddress': %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing 'rewardAddress': %w", err) } s.vm.ctx.Lock.Lock() @@ -1351,14 +1375,14 @@ func (s *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, reply *a keys, err := s.getKeystoreKeys(&args.UserPass, &args.JSONFromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } changeAddr := keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { - return fmt.Errorf("couldn't parse changeAddr: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) } } @@ -1378,16 +1402,10 @@ func (s *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, reply *a changeAddr, // Change address ) if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) + return nil, ids.ShortEmpty, err } - reply.TxID = tx.ID() - reply.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - - return utils.Err( - err, - s.vm.Builder.AddUnverifiedTx(tx), - ) + return tx, changeAddr, nil } // AddSubnetValidatorArgs are the arguments to AddSubnetValidator @@ -1401,12 +1419,27 @@ type AddSubnetValidatorArgs struct { // AddSubnetValidator creates and signs and issues a transaction to add a // validator to a subnet other than the primary network -func (s *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValidatorArgs, response *api.JSONTxIDChangeAddr) error { +func (s *Service) AddSubnetValidator(req *http.Request, args *AddSubnetValidatorArgs, response *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "addSubnetValidator"), ) + tx, changeAddr, err := s.buildAddSubnetValidatorTx(args) + if err != nil { + return fmt.Errorf("couldn't create tx: %w", err) + } + + response.TxID = tx.ID() + response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) + if err != nil { + return fmt.Errorf("couldn't format address: %w", err) + } + + return s.vm.issueTx(req.Context(), tx) +} + +func (s *Service) buildAddSubnetValidatorTx(args *AddSubnetValidatorArgs) (*txs.Tx, ids.ShortID, error) { now := s.vm.clock.Time() minAddStakerTime := now.Add(minAddStakerDelay) minAddStakerUnix := json.Uint64(minAddStakerTime.Unix()) @@ -1419,20 +1452,20 @@ func (s *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValidatorAr switch { case args.SubnetID == "": - return errNoSubnetID + return nil, ids.ShortEmpty, errNoSubnetID case args.StartTime < minAddStakerUnix: - return errStartTimeTooSoon + return nil, ids.ShortEmpty, errStartTimeTooSoon case args.StartTime > maxAddStakerUnix: - return errStartTimeTooLate + return nil, ids.ShortEmpty, errStartTimeTooLate } // Parse the subnet ID subnetID, err := ids.FromString(args.SubnetID) if err != nil { - return fmt.Errorf("problem parsing subnetID %q: %w", args.SubnetID, err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing subnetID %q: %w", args.SubnetID, err) } if subnetID == constants.PrimaryNetworkID { - return errNamedSubnetCantBePrimary + return nil, ids.ShortEmpty, errNamedSubnetCantBePrimary } s.vm.ctx.Lock.Lock() @@ -1440,14 +1473,14 @@ func (s *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValidatorAr keys, err := s.getKeystoreKeys(&args.UserPass, &args.JSONFromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } changeAddr := keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { - return fmt.Errorf("couldn't parse changeAddr: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) } } @@ -1467,16 +1500,10 @@ func (s *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValidatorAr changeAddr, ) if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) + return nil, ids.ShortEmpty, err } - response.TxID = tx.ID() - response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - - return utils.Err( - err, - s.vm.Builder.AddUnverifiedTx(tx), - ) + return tx, changeAddr, nil } // CreateSubnetArgs are the arguments to CreateSubnet @@ -1489,30 +1516,45 @@ type CreateSubnetArgs struct { // CreateSubnet creates and signs and issues a transaction to create a new // subnet -func (s *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response *api.JSONTxIDChangeAddr) error { +func (s *Service) CreateSubnet(req *http.Request, args *CreateSubnetArgs, response *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "createSubnet"), ) + tx, changeAddr, err := s.buildCreateSubnetTx(args) + if err != nil { + return fmt.Errorf("couldn't create tx: %w", err) + } + + response.TxID = tx.ID() + response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) + if err != nil { + return fmt.Errorf("couldn't format address: %w", err) + } + + return s.vm.issueTx(req.Context(), tx) +} + +func (s *Service) buildCreateSubnetTx(args *CreateSubnetArgs) (*txs.Tx, ids.ShortID, error) { // Parse the control keys controlKeys, err := avax.ParseServiceAddresses(s.addrManager, args.ControlKeys) if err != nil { - return err + return nil, ids.ShortEmpty, err } s.vm.ctx.Lock.Lock() defer s.vm.ctx.Lock.Unlock() keys, err := s.getKeystoreKeys(&args.UserPass, &args.JSONFromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } changeAddr := keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { - return fmt.Errorf("couldn't parse changeAddr: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) } } @@ -1524,16 +1566,10 @@ func (s *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response changeAddr, ) if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) + return nil, ids.ShortEmpty, err } - response.TxID = tx.ID() - response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - - return utils.Err( - err, - s.vm.Builder.AddUnverifiedTx(tx), - ) + return tx, changeAddr, nil } // ExportAVAXArgs are the arguments to ExportAVAX @@ -1554,14 +1590,29 @@ type ExportAVAXArgs struct { // ExportAVAX exports AVAX from the P-Chain to the X-Chain // It must be imported on the X-Chain to complete the transfer -func (s *Service) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, response *api.JSONTxIDChangeAddr) error { +func (s *Service) ExportAVAX(req *http.Request, args *ExportAVAXArgs, response *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "exportAVAX"), ) + tx, changeAddr, err := s.buildExportAVAX(args) + if err != nil { + return fmt.Errorf("couldn't create tx: %w", err) + } + + response.TxID = tx.ID() + response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) + if err != nil { + return fmt.Errorf("couldn't format address: %w", err) + } + + return s.vm.issueTx(req.Context(), tx) +} + +func (s *Service) buildExportAVAX(args *ExportAVAXArgs) (*txs.Tx, ids.ShortID, error) { if args.Amount == 0 { - return errNoAmount + return nil, ids.ShortEmpty, errNoAmount } // Get the chainID and parse the to address @@ -1569,11 +1620,11 @@ func (s *Service) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, response *ap if err != nil { chainID, err = s.vm.ctx.BCLookup.Lookup(args.TargetChain) if err != nil { - return err + return nil, ids.ShortEmpty, err } to, err = ids.ShortFromString(args.To) if err != nil { - return err + return nil, ids.ShortEmpty, err } } @@ -1582,14 +1633,14 @@ func (s *Service) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, response *ap keys, err := s.getKeystoreKeys(&args.UserPass, &args.JSONFromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } changeAddr := keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { - return fmt.Errorf("couldn't parse changeAddr: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) } } @@ -1602,16 +1653,10 @@ func (s *Service) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, response *ap changeAddr, // Change address ) if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) + return nil, ids.ShortEmpty, err } - response.TxID = tx.ID() - response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - - return utils.Err( - err, - s.vm.Builder.AddUnverifiedTx(tx), - ) + return tx, changeAddr, nil } // ImportAVAXArgs are the arguments to ImportAVAX @@ -1628,22 +1673,37 @@ type ImportAVAXArgs struct { // ImportAVAX issues a transaction to import AVAX from the X-chain. The AVAX // must have already been exported from the X-Chain. -func (s *Service) ImportAVAX(_ *http.Request, args *ImportAVAXArgs, response *api.JSONTxIDChangeAddr) error { +func (s *Service) ImportAVAX(req *http.Request, args *ImportAVAXArgs, response *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "importAVAX"), ) + tx, changeAddr, err := s.buildImportAVAXTx(args) + if err != nil { + return fmt.Errorf("couldn't create tx: %w", err) + } + + response.TxID = tx.ID() + response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) + if err != nil { + return fmt.Errorf("problem formatting address: %w", err) + } + + return s.vm.issueTx(req.Context(), tx) +} + +func (s *Service) buildImportAVAXTx(args *ImportAVAXArgs) (*txs.Tx, ids.ShortID, error) { // Parse the sourceCHain chainID, err := s.vm.ctx.BCLookup.Lookup(args.SourceChain) if err != nil { - return fmt.Errorf("problem parsing chainID %q: %w", args.SourceChain, err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing chainID %q: %w", args.SourceChain, err) } // Parse the to address to, err := avax.ParseServiceAddress(s.addrManager, args.To) if err != nil { // Parse address - return fmt.Errorf("couldn't parse argument 'to' to an address: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse argument 'to' to an address: %w", err) } s.vm.ctx.Lock.Lock() @@ -1651,14 +1711,14 @@ func (s *Service) ImportAVAX(_ *http.Request, args *ImportAVAXArgs, response *ap keys, err := s.getKeystoreKeys(&args.UserPass, &args.JSONFromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } changeAddr := keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { - return fmt.Errorf("couldn't parse changeAddr: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) } } @@ -1669,16 +1729,10 @@ func (s *Service) ImportAVAX(_ *http.Request, args *ImportAVAXArgs, response *ap changeAddr, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } - response.TxID = tx.ID() - response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - - return utils.Err( - err, - s.vm.Builder.AddUnverifiedTx(tx), - ) + return tx, changeAddr, nil } /* @@ -1706,34 +1760,49 @@ type CreateBlockchainArgs struct { } // CreateBlockchain issues a transaction to create a new blockchain -func (s *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, response *api.JSONTxIDChangeAddr) error { +func (s *Service) CreateBlockchain(req *http.Request, args *CreateBlockchainArgs, response *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "createBlockchain"), ) + tx, changeAddr, err := s.buildCreateBlockchainTx(args) + if err != nil { + return fmt.Errorf("couldn't create tx: %w", err) + } + + response.TxID = tx.ID() + response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) + if err != nil { + return fmt.Errorf("problem formatting address: %w", err) + } + + return s.vm.issueTx(req.Context(), tx) +} + +func (s *Service) buildCreateBlockchainTx(args *CreateBlockchainArgs) (*txs.Tx, ids.ShortID, error) { switch { case args.Name == "": - return errMissingName + return nil, ids.ShortEmpty, errMissingName case args.VMID == "": - return errMissingVMID + return nil, ids.ShortEmpty, errMissingVMID } genesisBytes, err := formatting.Decode(args.Encoding, args.GenesisData) if err != nil { - return fmt.Errorf("problem parsing genesis data: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing genesis data: %w", err) } vmID, err := s.vm.Chains.LookupVM(args.VMID) if err != nil { - return fmt.Errorf("no VM with ID '%s' found", args.VMID) + return nil, ids.ShortEmpty, fmt.Errorf("no VM with ID '%s' found", args.VMID) } fxIDs := []ids.ID(nil) for _, fxIDStr := range args.FxIDs { fxID, err := s.vm.Chains.LookupVM(fxIDStr) if err != nil { - return fmt.Errorf("no FX with ID '%s' found", fxIDStr) + return nil, ids.ShortEmpty, fmt.Errorf("no FX with ID '%s' found", fxIDStr) } fxIDs = append(fxIDs, fxID) } @@ -1745,7 +1814,7 @@ func (s *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, } if args.SubnetID == constants.PrimaryNetworkID { - return txs.ErrCantValidatePrimaryNetwork + return nil, ids.ShortEmpty, txs.ErrCantValidatePrimaryNetwork } s.vm.ctx.Lock.Lock() @@ -1753,14 +1822,14 @@ func (s *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, keys, err := s.getKeystoreKeys(&args.UserPass, &args.JSONFromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } changeAddr := keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { - return fmt.Errorf("couldn't parse changeAddr: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) } } @@ -1775,16 +1844,10 @@ func (s *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, changeAddr, // Change address ) if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) + return nil, ids.ShortEmpty, err } - response.TxID = tx.ID() - response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - - return utils.Err( - err, - s.vm.Builder.AddUnverifiedTx(tx), - ) + return tx, changeAddr, nil } // GetBlockchainStatusArgs is the arguments for calling GetBlockchainStatus @@ -1844,11 +1907,8 @@ func (s *Service) GetBlockchainStatus(r *http.Request, args *GetBlockchainStatus return nil } - preferredBlk, err := s.vm.Preferred() - if err != nil { - return fmt.Errorf("could not retrieve preferred block, err %w", err) - } - preferred, err := s.chainExists(ctx, preferredBlk.ID(), blockchainID) + preferredBlkID := s.vm.manager.Preferred() + preferred, err := s.chainExists(ctx, preferredBlkID, blockchainID) if err != nil { return fmt.Errorf("problem looking up blockchain: %w", err) } @@ -2059,7 +2119,7 @@ func (s *Service) GetBlockchains(_ *http.Request, _ *struct{}, response *GetBloc return nil } -func (s *Service) IssueTx(_ *http.Request, args *api.FormattedTx, response *api.JSONTxID) error { +func (s *Service) IssueTx(req *http.Request, args *api.FormattedTx, response *api.JSONTxID) error { s.vm.ctx.Log.Debug("API called", zap.String("service", "platform"), zap.String("method", "issueTx"), @@ -2074,10 +2134,7 @@ func (s *Service) IssueTx(_ *http.Request, args *api.FormattedTx, response *api. return fmt.Errorf("couldn't parse tx: %w", err) } - s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() - - if err := s.vm.Builder.AddUnverifiedTx(tx); err != nil { + if err := s.vm.issueTx(req.Context(), tx); err != nil { return fmt.Errorf("couldn't issue tx: %w", err) } @@ -2147,12 +2204,7 @@ func (s *Service) GetTxStatus(_ *http.Request, args *GetTxStatusArgs, response * // The status of this transaction is not in the database - check if the tx // is in the preferred block's db. If so, return that it's processing. - prefBlk, err := s.vm.Preferred() - if err != nil { - return err - } - - preferredID := prefBlk.ID() + preferredID := s.vm.manager.Preferred() onAccept, ok := s.vm.manager.GetState(preferredID) if !ok { return fmt.Errorf("could not retrieve state for block %s", preferredID) @@ -2168,7 +2220,7 @@ func (s *Service) GetTxStatus(_ *http.Request, args *GetTxStatusArgs, response * return err } - if s.vm.Builder.Has(args.TxID) { + if _, ok := s.vm.Builder.Get(args.TxID); ok { // Found the tx in the mempool. Report tx is processing. response.Status = status.Processing return nil @@ -2282,7 +2334,7 @@ func (s *Service) GetStake(_ *http.Request, args *GetStakeArgs, response *GetSta response.Staked = response.Stakeds[s.vm.ctx.AVAXAssetID] response.Outputs = make([]string, len(stakedOuts)) for i, output := range stakedOuts { - bytes, err := txs.Codec.Marshal(txs.Version, output) + bytes, err := txs.Codec.Marshal(txs.CodecVersion, output) if err != nil { return fmt.Errorf("couldn't serialize output %s: %w", output.ID, err) } @@ -2464,9 +2516,9 @@ func (s *Service) GetRewardUTXOs(_ *http.Request, args *api.GetTxArgs, reply *Ge reply.NumFetched = json.Uint64(len(utxos)) reply.UTXOs = make([]string, len(utxos)) for i, utxo := range utxos { - utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.GenesisCodec.Marshal(txs.CodecVersion, utxo) if err != nil { - return fmt.Errorf("failed to encode UTXO to bytes: %w", err) + return fmt.Errorf("couldn't encode UTXO to bytes: %w", err) } utxoStr, err := formatting.Encode(args.Encoding, utxoBytes) diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 9db8921b9729..1feb411bd70b 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm @@ -83,7 +83,7 @@ var ( ) func defaultService(t *testing.T) (*Service, *mutableSharedMemory) { - vm, _, mutableSharedMemory := defaultVM(t) + vm, _, mutableSharedMemory := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() ks := keystore.New(logging.NoLog{}, memdb.New()) @@ -142,11 +142,6 @@ func TestExportKey(t *testing.T) { service, _ := defaultService(t) defaultAddress(t, service) - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() reply := ExportKeyReply{} require.NoError(service.ExportKey(nil, &args, &reply)) @@ -161,11 +156,6 @@ func TestImportKey(t *testing.T) { require.NoError(stdjson.Unmarshal([]byte(jsonString), &args)) service, _ := defaultService(t) - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() reply := api.JSONAddress{} require.NoError(service.ImportKey(nil, &args, &reply)) @@ -178,11 +168,6 @@ func TestGetTxStatus(t *testing.T) { service, mutableSharedMemory := defaultService(t) defaultAddress(t, service) service.vm.ctx.Lock.Lock() - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() recipientKey, err := secp256k1.NewPrivateKey() require.NoError(err) @@ -190,7 +175,7 @@ func TestGetTxStatus(t *testing.T) { m := atomic.NewMemory(prefixdb.New([]byte{}, service.vm.db)) sm := m.NewSharedMemory(service.vm.ctx.ChainID) - peerSharedMemory := m.NewSharedMemory(xChainID) + peerSharedMemory := m.NewSharedMemory(service.vm.ctx.XChainID) // #nosec G404 utxo := &avax.UTXO{ @@ -198,7 +183,7 @@ func TestGetTxStatus(t *testing.T) { TxID: ids.GenerateTestID(), OutputIndex: rand.Uint32(), }, - Asset: avax.Asset{ID: avaxAssetID}, + Asset: avax.Asset{ID: service.vm.ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: 1234567, OutputOwners: secp256k1fx.OutputOwners{ @@ -208,7 +193,7 @@ func TestGetTxStatus(t *testing.T) { }, }, } - utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) require.NoError(err) inputID := utxo.InputID() @@ -226,14 +211,16 @@ func TestGetTxStatus(t *testing.T) { }, })) - oldSharedMemory := mutableSharedMemory.SharedMemory mutableSharedMemory.SharedMemory = sm - tx, err := service.vm.txBuilder.NewImportTx(xChainID, ids.ShortEmpty, []*secp256k1.PrivateKey{recipientKey}, ids.ShortEmpty) + tx, err := service.vm.txBuilder.NewImportTx( + service.vm.ctx.XChainID, + ids.ShortEmpty, + []*secp256k1.PrivateKey{recipientKey}, + ids.ShortEmpty, + ) require.NoError(err) - mutableSharedMemory.SharedMemory = oldSharedMemory - service.vm.ctx.Lock.Unlock() var ( @@ -244,15 +231,9 @@ func TestGetTxStatus(t *testing.T) { require.Equal(status.Unknown, resp.Status) require.Zero(resp.Reason) - service.vm.ctx.Lock.Lock() - // put the chain in existing chain list - err = service.vm.Builder.AddUnverifiedTx(tx) - require.ErrorIs(err, database.ErrNotFound) // Missing shared memory UTXO - - mutableSharedMemory.SharedMemory = sm - - require.NoError(service.vm.Builder.AddUnverifiedTx(tx)) + require.NoError(service.vm.Network.IssueTx(context.Background(), tx)) + service.vm.ctx.Lock.Lock() block, err := service.vm.BuildBlock(context.Background()) require.NoError(err) @@ -346,10 +327,9 @@ func TestGetTx(t *testing.T) { err = service.GetTx(nil, arg, &response) require.ErrorIs(err, database.ErrNotFound) // We haven't issued the tx yet + require.NoError(service.vm.Network.IssueTx(context.Background(), tx)) service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Builder.AddUnverifiedTx(tx)) - blk, err := service.vm.BuildBlock(context.Background()) require.NoError(err) @@ -388,10 +368,6 @@ func TestGetTx(t *testing.T) { require.NoError(err) require.Equal(expectedTxJSON, []byte(response.Tx)) } - - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() }) } } @@ -401,15 +377,10 @@ func TestGetBalance(t *testing.T) { require := require.New(t) service, _ := defaultService(t) defaultAddress(t, service) - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() // Ensure GetStake is correct for each of the genesis validators - genesis, _ := defaultGenesis(t) - for _, utxo := range genesis.UTXOs { + genesis, _ := defaultGenesis(t, service.vm.ctx.AVAXAssetID) + for idx, utxo := range genesis.UTXOs { request := GetBalanceRequest{ Addresses: []string{ fmt.Sprintf("P-%s", utxo.Address), @@ -418,9 +389,14 @@ func TestGetBalance(t *testing.T) { reply := GetBalanceResponse{} require.NoError(service.GetBalance(nil, &request, &reply)) - - require.Equal(json.Uint64(defaultBalance), reply.Balance) - require.Equal(json.Uint64(defaultBalance), reply.Unlocked) + balance := defaultBalance + if idx == 0 { + // we use the first key to fund a subnet creation in [defaultGenesis]. + // As such we need to account for the subnet creation fee + balance = defaultBalance - service.vm.Config.GetCreateSubnetTxFee(service.vm.clock.Time()) + } + require.Equal(json.Uint64(balance), reply.Balance) + require.Equal(json.Uint64(balance), reply.Unlocked) require.Equal(json.Uint64(0), reply.LockedStakeable) require.Equal(json.Uint64(0), reply.LockedNotStakeable) } @@ -430,14 +406,9 @@ func TestGetStake(t *testing.T) { require := require.New(t) service, _ := defaultService(t) defaultAddress(t, service) - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() // Ensure GetStake is correct for each of the genesis validators - genesis, _ := defaultGenesis(t) + genesis, _ := defaultGenesis(t, service.vm.ctx.AVAXAssetID) addrsStrs := []string{} for i, validator := range genesis.Validators { addr := fmt.Sprintf("P-%s", validator.RewardOwner.Addresses[0]) @@ -503,12 +474,13 @@ func TestGetStake(t *testing.T) { // Add a delegator stakeAmount := service.vm.MinDelegatorStake + 12345 - delegatorNodeID := ids.NodeID(keys[0].PublicKey().Address()) - delegatorEndTime := uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix()) + delegatorNodeID := genesisNodeIDs[0] + delegatorStartTime := defaultValidateStartTime + delegatorEndTime := defaultGenesisTime.Add(defaultMinStakingDuration) tx, err := service.vm.txBuilder.NewAddDelegatorTx( stakeAmount, - uint64(defaultGenesisTime.Unix()), - delegatorEndTime, + uint64(delegatorStartTime.Unix()), + uint64(delegatorEndTime.Unix()), delegatorNodeID, ids.GenerateTestShortID(), []*secp256k1.PrivateKey{keys[0]}, @@ -516,9 +488,11 @@ func TestGetStake(t *testing.T) { ) require.NoError(err) + addDelTx := tx.Unsigned.(*txs.AddDelegatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddDelegatorTx), + addDelTx, + delegatorStartTime, 0, ) require.NoError(err) @@ -603,13 +577,8 @@ func TestGetCurrentValidators(t *testing.T) { require := require.New(t) service, _ := defaultService(t) defaultAddress(t, service) - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() - genesis, _ := defaultGenesis(t) + genesis, _ := defaultGenesis(t, service.vm.ctx.AVAXAssetID) // Call getValidators args := GetCurrentValidatorsArgs{SubnetID: constants.PrimaryNetworkID} @@ -635,16 +604,16 @@ func TestGetCurrentValidators(t *testing.T) { // Add a delegator stakeAmount := service.vm.MinDelegatorStake + 12345 - validatorNodeID := ids.NodeID(keys[1].PublicKey().Address()) - delegatorStartTime := uint64(defaultValidateStartTime.Unix()) - delegatorEndTime := uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix()) + validatorNodeID := genesisNodeIDs[1] + delegatorStartTime := defaultValidateStartTime + delegatorEndTime := delegatorStartTime.Add(defaultMinStakingDuration) service.vm.ctx.Lock.Lock() delTx, err := service.vm.txBuilder.NewAddDelegatorTx( stakeAmount, - delegatorStartTime, - delegatorEndTime, + uint64(delegatorStartTime.Unix()), + uint64(delegatorEndTime.Unix()), validatorNodeID, ids.GenerateTestShortID(), []*secp256k1.PrivateKey{keys[0]}, @@ -652,9 +621,11 @@ func TestGetCurrentValidators(t *testing.T) { ) require.NoError(err) + addDelTx := delTx.Unsigned.(*txs.AddDelegatorTx) staker, err := state.NewCurrentStaker( delTx.ID(), - delTx.Unsigned.(*txs.AddDelegatorTx), + addDelTx, + delegatorStartTime, 0, ) require.NoError(err) @@ -696,8 +667,8 @@ func TestGetCurrentValidators(t *testing.T) { require.Len(*innerVdr.Delegators, 1) delegator := (*innerVdr.Delegators)[0] require.Equal(delegator.NodeID, innerVdr.NodeID) - require.Equal(uint64(delegator.StartTime), delegatorStartTime) - require.Equal(uint64(delegator.EndTime), delegatorEndTime) + require.Equal(int64(delegator.StartTime), delegatorStartTime.Unix()) + require.Equal(int64(delegator.EndTime), delegatorEndTime.Unix()) require.Equal(uint64(delegator.Weight), stakeAmount) } require.True(found) @@ -731,11 +702,6 @@ func TestGetCurrentValidators(t *testing.T) { func TestGetTimestamp(t *testing.T) { require := require.New(t) service, _ := defaultService(t) - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() reply := GetTimestampReply{} require.NoError(service.GetTimestamp(nil, nil, &reply)) @@ -773,11 +739,6 @@ func TestGetBlock(t *testing.T) { require := require.New(t) service, _ := defaultService(t) service.vm.ctx.Lock.Lock() - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() service.vm.Config.CreateAssetTxFee = 100 * defaultTxFee @@ -793,7 +754,8 @@ func TestGetBlock(t *testing.T) { ) require.NoError(err) - preferred, err := service.vm.Builder.Preferred() + preferredID := service.vm.manager.Preferred() + preferred, err := service.vm.manager.GetBlock(preferredID) require.NoError(err) statelessBlock, err := block.NewBanffStandardBlock( diff --git a/vms/platformvm/signer/empty.go b/vms/platformvm/signer/empty.go index 7b5dec06cbcf..21412ae6d0b1 100644 --- a/vms/platformvm/signer/empty.go +++ b/vms/platformvm/signer/empty.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package signer diff --git a/vms/platformvm/signer/empty_test.go b/vms/platformvm/signer/empty_test.go index e6a6307b9842..9fe949f4677d 100644 --- a/vms/platformvm/signer/empty_test.go +++ b/vms/platformvm/signer/empty_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package signer diff --git a/vms/platformvm/signer/proof_of_possession.go b/vms/platformvm/signer/proof_of_possession.go index 35ddcb320745..8b32975b4969 100644 --- a/vms/platformvm/signer/proof_of_possession.go +++ b/vms/platformvm/signer/proof_of_possession.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package signer diff --git a/vms/platformvm/signer/proof_of_possession_test.go b/vms/platformvm/signer/proof_of_possession_test.go index 8214554cebfe..9f4f3feefa3c 100644 --- a/vms/platformvm/signer/proof_of_possession_test.go +++ b/vms/platformvm/signer/proof_of_possession_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package signer diff --git a/vms/platformvm/signer/signer.go b/vms/platformvm/signer/signer.go index 7269ad199534..31bf212ddca6 100644 --- a/vms/platformvm/signer/signer.go +++ b/vms/platformvm/signer/signer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package signer diff --git a/vms/platformvm/stakeable/stakeable_lock.go b/vms/platformvm/stakeable/stakeable_lock.go index 5c09cbfdda8a..58149266175e 100644 --- a/vms/platformvm/stakeable/stakeable_lock.go +++ b/vms/platformvm/stakeable/stakeable_lock.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package stakeable diff --git a/vms/platformvm/stakeable/stakeable_lock_test.go b/vms/platformvm/stakeable/stakeable_lock_test.go index b733aa0244c5..0ea53e9cc426 100644 --- a/vms/platformvm/stakeable/stakeable_lock_test.go +++ b/vms/platformvm/stakeable/stakeable_lock_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package stakeable diff --git a/vms/platformvm/state/camino.go b/vms/platformvm/state/camino.go index cc30238492d4..15fc4c839281 100644 --- a/vms/platformvm/state/camino.go +++ b/vms/platformvm/state/camino.go @@ -24,7 +24,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/multisig" as "github.com/ava-labs/avalanchego/vms/platformvm/addrstate" "github.com/ava-labs/avalanchego/vms/platformvm/block" - "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/dac" "github.com/ava-labs/avalanchego/vms/platformvm/deposit" "github.com/ava-labs/avalanchego/vms/platformvm/genesis" @@ -72,7 +71,7 @@ var ( ) type CaminoApply interface { - ApplyCaminoState(State) error + ApplyCaminoState(Chain) error } type CaminoDiff interface { @@ -152,7 +151,6 @@ type Camino interface { LockedUTXOs(set.Set[ids.ID], set.Set[ids.ShortID], locked.State) ([]*avax.UTXO, error) CaminoConfig() (*CaminoConfig, error) - Config() (*config.Config, error) } // For state only @@ -194,8 +192,8 @@ type caminoState struct { genesisSynced bool verifyNodeSignature bool lockModeBondDeposit bool - baseFee uint64 - feeDistribution [dac.FeeDistributionFractionsCount]uint64 + baseFee *uint64 + feeDistribution *[dac.FeeDistributionFractionsCount]uint64 // Deferred Stakers deferredStakers *baseStakers @@ -448,12 +446,12 @@ func (cs *caminoState) syncGenesis(s *state, g *genesis.Genesis) error { } txIDs.Add(tx.ID()) - validatorTx, ok := tx.Unsigned.(txs.ValidatorTx) + validatorTx, ok := tx.Unsigned.(txs.ScheduledStaker) if !ok { - return fmt.Errorf("expected tx type txs.ValidatorTx but got %T", tx.Unsigned) + return fmt.Errorf("expected a scheduled staker but got %T", tx.Unsigned) } - staker, err := NewCurrentStaker(tx.ID(), validatorTx, 0) + staker, err := NewCurrentStaker(tx.ID(), validatorTx, validatorTx.StartTime(), 0) if err != nil { return err } @@ -549,28 +547,20 @@ func (cs *caminoState) Load(s *state) error { cs.lockModeBondDeposit = mode baseFee, err := database.GetUInt64(cs.caminoDB, baseFeeKey) - if err == database.ErrNotFound { - // if baseFee is not in db yet, than its first time when we access it - // and it should be equal to config base fee - config, err := s.Config() - if err != nil { - return err - } - baseFee = config.TxFee - } else if err != nil { + switch { + case err == nil: + cs.baseFee = &baseFee + case err != database.ErrNotFound: return err } - cs.baseFee = baseFee feeDistribution, err := database.GetUInt64Slice(cs.caminoDB, feeDistributionKey) - if err == database.ErrNotFound { - // if fee distribution is not in db yet, than its first time when we access it - // and it should be equal to hardcoded fee distribution - feeDistribution = s.cfg.CaminoConfig.FeeDistribution[:] - } else if err != nil { + switch { + case err == nil: + cs.feeDistribution = (*[3]uint64)(feeDistribution) + case err != database.ErrNotFound: return err } - cs.feeDistribution = *(*[dac.FeeDistributionFractionsCount]uint64)(feeDistribution) // TODO @evlekht change when mod go is >= 1.20 errs := wrappers.Errs{} errs.Add( @@ -592,9 +582,13 @@ func (cs *caminoState) Write() error { database.PutBool(cs.caminoDB, depositBondModeKey, cs.lockModeBondDeposit), ) } + if cs.baseFee != nil { + errs.Add(database.PutUInt64(cs.caminoDB, baseFeeKey, *cs.baseFee)) + } + if cs.feeDistribution != nil { + errs.Add(database.PutUInt64Slice(cs.caminoDB, feeDistributionKey, cs.feeDistribution[:])) + } errs.Add( - database.PutUInt64(cs.caminoDB, baseFeeKey, cs.baseFee), - database.PutUInt64Slice(cs.caminoDB, feeDistributionKey, cs.feeDistribution[:]), cs.writeAddressStates(), cs.writeDepositOffers(), cs.writeDeposits(), @@ -627,17 +621,23 @@ func (cs *caminoState) Close() error { } func (cs *caminoState) GetBaseFee() (uint64, error) { - return cs.baseFee, nil + if cs.baseFee == nil { + return 0, database.ErrNotFound + } + return *cs.baseFee, nil } func (cs *caminoState) SetBaseFee(baseFee uint64) { - cs.baseFee = baseFee + cs.baseFee = &baseFee } func (cs *caminoState) GetFeeDistribution() ([dac.FeeDistributionFractionsCount]uint64, error) { - return cs.feeDistribution, nil + if cs.feeDistribution == nil { + return [3]uint64{}, database.ErrNotFound + } + return *cs.feeDistribution, nil } func (cs *caminoState) SetFeeDistribution(feeDistribution [dac.FeeDistributionFractionsCount]uint64) { - cs.feeDistribution = feeDistribution + cs.feeDistribution = &feeDistribution } diff --git a/vms/platformvm/state/camino_claimable.go b/vms/platformvm/state/camino_claimable.go index cac2a875aa47..4190b4bab64a 100644 --- a/vms/platformvm/state/camino_claimable.go +++ b/vms/platformvm/state/camino_claimable.go @@ -88,7 +88,7 @@ func (cs *caminoState) writeClaimableAndValidatorRewards() error { return err } } else { - claimableBytes, err := block.GenesisCodec.Marshal(block.Version, claimable) + claimableBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, claimable) if err != nil { return fmt.Errorf("failed to serialize claimable: %w", err) } diff --git a/vms/platformvm/state/camino_claimable_test.go b/vms/platformvm/state/camino_claimable_test.go index 1a5338027379..ecddab09a6e8 100644 --- a/vms/platformvm/state/camino_claimable_test.go +++ b/vms/platformvm/state/camino_claimable_test.go @@ -20,7 +20,7 @@ import ( func TestGetClaimable(t *testing.T) { claimableOwnerID := ids.ID{1} claimable := &Claimable{Owner: &secp256k1fx.OutputOwners{Addrs: []ids.ShortID{}}} - claimableBytes, err := block.GenesisCodec.Marshal(block.Version, claimable) + claimableBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, claimable) require.NoError(t, err) testError := errors.New("test error") @@ -314,7 +314,7 @@ func TestWriteClaimableAndValidatorRewards(t *testing.T) { claimableOwnerID1 := ids.ID{1} claimableOwnerID2 := ids.ID{2} claimable1 := &Claimable{Owner: &secp256k1fx.OutputOwners{}, ValidatorReward: 1, ExpiredDepositReward: 2} - claimableBytes1, err := block.GenesisCodec.Marshal(block.Version, claimable1) + claimableBytes1, err := block.GenesisCodec.Marshal(block.CodecVersion, claimable1) require.NoError(t, err) tests := map[string]struct { diff --git a/vms/platformvm/state/camino_deposit.go b/vms/platformvm/state/camino_deposit.go index 0a0eed194a90..9d06c59741fe 100644 --- a/vms/platformvm/state/camino_deposit.go +++ b/vms/platformvm/state/camino_deposit.go @@ -157,7 +157,7 @@ func (cs *caminoState) writeDeposits() error { return err } } else { - depositBytes, err := block.GenesisCodec.Marshal(block.Version, depositDiff.Deposit) + depositBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, depositDiff.Deposit) if err != nil { return fmt.Errorf("failed to serialize deposit: %w", err) } diff --git a/vms/platformvm/state/camino_deposit_offer.go b/vms/platformvm/state/camino_deposit_offer.go index a46282527e96..94e1fd26380e 100644 --- a/vms/platformvm/state/camino_deposit_offer.go +++ b/vms/platformvm/state/camino_deposit_offer.go @@ -85,7 +85,7 @@ func (cs *caminoState) writeDepositOffers() error { } delete(cs.depositOffers, offerID) } else { - offerBytes, err := block.GenesisCodec.Marshal(block.Version, offer) + offerBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, offer) if err != nil { return fmt.Errorf("failed to serialize deposit offer: %w", err) } diff --git a/vms/platformvm/state/camino_deposit_offer_test.go b/vms/platformvm/state/camino_deposit_offer_test.go index d21a71a07169..dee0c41c3965 100644 --- a/vms/platformvm/state/camino_deposit_offer_test.go +++ b/vms/platformvm/state/camino_deposit_offer_test.go @@ -283,13 +283,13 @@ func TestWriteDepositOffers(t *testing.T) { depositOffer0_3 := &deposit.Offer{ID: ids.ID{3}} depositOffer0_4 := &deposit.Offer{ID: ids.ID{4}} depositOffer1_5 := &deposit.Offer{ID: ids.ID{5}, UpgradeVersionID: codec.UpgradeVersion1} - depositOffer2modifiedBytes, err := block.GenesisCodec.Marshal(block.Version, depositOffer0_2modified) + depositOffer2modifiedBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, depositOffer0_2modified) require.NoError(t, err) - depositOffer2Bytes, err := block.GenesisCodec.Marshal(block.Version, depositOffer0_2) + depositOffer2Bytes, err := block.GenesisCodec.Marshal(block.CodecVersion, depositOffer0_2) require.NoError(t, err) - depositOffer3Bytes, err := block.GenesisCodec.Marshal(block.Version, depositOffer0_3) + depositOffer3Bytes, err := block.GenesisCodec.Marshal(block.CodecVersion, depositOffer0_3) require.NoError(t, err) - depositOffer5Bytes, err := block.GenesisCodec.Marshal(block.Version, depositOffer1_5) + depositOffer5Bytes, err := block.GenesisCodec.Marshal(block.CodecVersion, depositOffer1_5) require.NoError(t, err) testError := errors.New("test error") @@ -404,13 +404,13 @@ func TestLoadDepositOffers(t *testing.T) { depositOffer1_4 := &deposit.Offer{ UpgradeVersionID: codec.UpgradeVersion1, ID: ids.ID{4}, Memo: []byte("4"), } - depositOffer1Bytes, err := block.GenesisCodec.Marshal(block.Version, depositOffer0_1) + depositOffer1Bytes, err := block.GenesisCodec.Marshal(block.CodecVersion, depositOffer0_1) require.NoError(t, err) - depositOffer2Bytes, err := block.GenesisCodec.Marshal(block.Version, depositOffer0_2) + depositOffer2Bytes, err := block.GenesisCodec.Marshal(block.CodecVersion, depositOffer0_2) require.NoError(t, err) - depositOffer3Bytes, err := block.GenesisCodec.Marshal(block.Version, depositOffer0_3) + depositOffer3Bytes, err := block.GenesisCodec.Marshal(block.CodecVersion, depositOffer0_3) require.NoError(t, err) - depositOffer4Bytes, err := block.GenesisCodec.Marshal(block.Version, depositOffer1_4) + depositOffer4Bytes, err := block.GenesisCodec.Marshal(block.CodecVersion, depositOffer1_4) require.NoError(t, err) tests := map[string]struct { diff --git a/vms/platformvm/state/camino_deposit_test.go b/vms/platformvm/state/camino_deposit_test.go index 941a193e8986..5aec81b3754f 100644 --- a/vms/platformvm/state/camino_deposit_test.go +++ b/vms/platformvm/state/camino_deposit_test.go @@ -28,7 +28,7 @@ func TestGetDeposit(t *testing.T) { Addrs: []ids.ShortID{{1}}, }, } - depositBytes, err := block.GenesisCodec.Marshal(block.Version, deposit1) + depositBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, deposit1) require.NoError(t, err) testError := errors.New("test error") @@ -608,9 +608,9 @@ func TestWriteDeposits(t *testing.T) { }, } depositEndtime := deposit2.EndTime() - deposit1Bytes, err := block.GenesisCodec.Marshal(block.Version, deposit1) + deposit1Bytes, err := block.GenesisCodec.Marshal(block.CodecVersion, deposit1) require.NoError(t, err) - deposit2Bytes, err := block.GenesisCodec.Marshal(block.Version, deposit2) + deposit2Bytes, err := block.GenesisCodec.Marshal(block.CodecVersion, deposit2) require.NoError(t, err) tests := map[string]struct { diff --git a/vms/platformvm/state/camino_diff.go b/vms/platformvm/state/camino_diff.go index 3da77f0fd62e..fc5c89d4052b 100644 --- a/vms/platformvm/state/camino_diff.go +++ b/vms/platformvm/state/camino_diff.go @@ -15,7 +15,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/multisig" as "github.com/ava-labs/avalanchego/vms/platformvm/addrstate" - "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/dac" "github.com/ava-labs/avalanchego/vms/platformvm/deposit" "github.com/ava-labs/avalanchego/vms/platformvm/locked" @@ -79,14 +78,6 @@ func (d *diff) LockedUTXOs(txIDs set.Set[ids.ID], addresses set.Set[ids.ShortID] return retUtxos, nil } -func (d *diff) Config() (*config.Config, error) { - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - return parentState.Config() -} - func (d *diff) CaminoConfig() (*CaminoConfig, error) { parentState, ok := d.stateVersions.GetState(d.parentID) if !ok { @@ -652,7 +643,7 @@ func (d *diff) SetFeeDistribution(feeDistribution [dac.FeeDistributionFractionsC } // Finally apply all changes -func (d *diff) ApplyCaminoState(baseState State) error { +func (d *diff) ApplyCaminoState(baseState Chain) error { if d.caminoDiff.modifiedNotDistributedValidatorReward != nil { baseState.SetNotDistributedValidatorReward(*d.caminoDiff.modifiedNotDistributedValidatorReward) } diff --git a/vms/platformvm/state/camino_diff_test.go b/vms/platformvm/state/camino_diff_test.go index ea470cc166d2..99dd96317828 100644 --- a/vms/platformvm/state/camino_diff_test.go +++ b/vms/platformvm/state/camino_diff_test.go @@ -18,7 +18,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/multisig" as "github.com/ava-labs/avalanchego/vms/platformvm/addrstate" - "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/dac" "github.com/ava-labs/avalanchego/vms/platformvm/deposit" "github.com/ava-labs/avalanchego/vms/platformvm/locked" @@ -1896,62 +1895,6 @@ func TestDiffLockedUTXOs(t *testing.T) { } } -func TestDiffConfig(t *testing.T) { - parentStateID := ids.GenerateTestID() - testErr := errors.New("test err") - - tests := map[string]struct { - diff func(*gomock.Controller) *diff - expectedDiff func(*diff) *diff - expectedConfig *config.Config - expectedErr error - }{ - "OK": { - diff: func(c *gomock.Controller) *diff { - parentState := NewMockChain(c) - parentState.EXPECT().Config().Return(&config.Config{TxFee: 111}, nil) - return &diff{ - stateVersions: newMockStateVersions(c, parentStateID, parentState), - parentID: parentStateID, - } - }, - expectedDiff: func(actualDiff *diff) *diff { - return &diff{ - stateVersions: actualDiff.stateVersions, - parentID: actualDiff.parentID, - } - }, - expectedConfig: &config.Config{TxFee: 111}, - }, - "Fail: parent errored": { - diff: func(c *gomock.Controller) *diff { - parentState := NewMockChain(c) - parentState.EXPECT().Config().Return(nil, testErr) - return &diff{ - stateVersions: newMockStateVersions(c, parentStateID, parentState), - parentID: parentStateID, - } - }, - expectedDiff: func(actualDiff *diff) *diff { - return &diff{ - stateVersions: actualDiff.stateVersions, - parentID: actualDiff.parentID, - } - }, - expectedErr: testErr, - }, - } - for name, tt := range tests { - t.Run(name, func(t *testing.T) { - actualDiff := tt.diff(gomock.NewController(t)) - config, err := actualDiff.Config() - require.ErrorIs(t, err, tt.expectedErr) - require.Equal(t, tt.expectedConfig, config) - require.Equal(t, tt.expectedDiff(actualDiff), actualDiff) - }) - } -} - func TestDiffCaminoConfig(t *testing.T) { parentStateID := ids.GenerateTestID() testErr := errors.New("test err") diff --git a/vms/platformvm/state/camino_helpers_test.go b/vms/platformvm/state/camino_helpers_test.go index a4d007edf670..70f73085bf3c 100644 --- a/vms/platformvm/state/camino_helpers_test.go +++ b/vms/platformvm/state/camino_helpers_test.go @@ -15,7 +15,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/config" @@ -70,7 +69,6 @@ func newEmptyState(t *testing.T) *state { MintingPeriod: 365 * 24 * time.Hour, SupplyCap: 720 * units.MegaAvax, }), - &utils.Atomic[bool]{}, ) require.NoError(t, err) require.NotNil(t, newState) diff --git a/vms/platformvm/state/camino_multisig_alias.go b/vms/platformvm/state/camino_multisig_alias.go index 306290a7b485..9db5601d22b3 100644 --- a/vms/platformvm/state/camino_multisig_alias.go +++ b/vms/platformvm/state/camino_multisig_alias.go @@ -78,7 +78,7 @@ func (cs *caminoState) writeMultisigAliases() error { Owners: alias.Owners, Nonce: alias.Nonce, } - aliasBytes, err := block.GenesisCodec.Marshal(block.Version, multisigAlias) + aliasBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, multisigAlias) if err != nil { return fmt.Errorf("failed to serialize multisig alias: %w", err) } diff --git a/vms/platformvm/state/camino_multisig_alias_test.go b/vms/platformvm/state/camino_multisig_alias_test.go index f5a50ed92772..59bdac116a88 100644 --- a/vms/platformvm/state/camino_multisig_alias_test.go +++ b/vms/platformvm/state/camino_multisig_alias_test.go @@ -26,7 +26,7 @@ func TestGetMultisigAlias(t *testing.T) { Memo: []byte("multisigAlias memo"), }, } - multisigAliasBytes, err := block.GenesisCodec.Marshal(block.Version, &msigAlias{ + multisigAliasBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &msigAlias{ Owners: multisigAlias.Owners, Memo: multisigAlias.Memo, }) @@ -232,7 +232,7 @@ func TestSetMultisigAlias(t *testing.T) { func TestWriteMultisigAliases(t *testing.T) { multisigAlias1 := &multisig.AliasWithNonce{Alias: multisig.Alias{ID: ids.ShortID{1}, Owners: &secp256k1fx.OutputOwners{}}} multisigAlias2 := &multisig.AliasWithNonce{Alias: multisig.Alias{ID: ids.ShortID{2}, Owners: &secp256k1fx.OutputOwners{}}} - multisigAliasBytes1, err := block.GenesisCodec.Marshal(block.Version, &msigAlias{Owners: multisigAlias1.Owners}) + multisigAliasBytes1, err := block.GenesisCodec.Marshal(block.CodecVersion, &msigAlias{Owners: multisigAlias1.Owners}) require.NoError(t, err) testError := errors.New("test error") diff --git a/vms/platformvm/state/camino_proposal.go b/vms/platformvm/state/camino_proposal.go index 42525fbdb6d7..63dd6c6ba8b7 100644 --- a/vms/platformvm/state/camino_proposal.go +++ b/vms/platformvm/state/camino_proposal.go @@ -198,7 +198,7 @@ func (cs *caminoState) writeProposals() error { return err } } else { - proposalBytes, err := dac.Codec.Marshal(block.Version, &proposalStateWrapper{ProposalState: proposalDiff.Proposal}) + proposalBytes, err := dac.Codec.Marshal(block.CodecVersion, &proposalStateWrapper{ProposalState: proposalDiff.Proposal}) if err != nil { return fmt.Errorf("failed to serialize deposit: %w", err) } diff --git a/vms/platformvm/state/camino_proposal_test.go b/vms/platformvm/state/camino_proposal_test.go index c06742275673..18f04977bd7f 100644 --- a/vms/platformvm/state/camino_proposal_test.go +++ b/vms/platformvm/state/camino_proposal_test.go @@ -33,7 +33,7 @@ func TestGetProposal(t *testing.T) { TotalAllowedVoters: 5, }, } - proposalBytes, err := dac.Codec.Marshal(block.Version, wrapper) + proposalBytes, err := dac.Codec.Marshal(block.CodecVersion, wrapper) require.NoError(t, err) testError := errors.New("test error") @@ -845,9 +845,9 @@ func TestWriteProposals(t *testing.T) { }} proposalEndtime := proposalWrapper2.EndTime() - proposal1Bytes, err := dac.Codec.Marshal(block.Version, proposalWrapper1) + proposal1Bytes, err := dac.Codec.Marshal(block.CodecVersion, proposalWrapper1) require.NoError(t, err) - proposal2Bytes, err := dac.Codec.Marshal(block.Version, proposalWrapper2) + proposal2Bytes, err := dac.Codec.Marshal(block.CodecVersion, proposalWrapper2) require.NoError(t, err) tests := map[string]struct { diff --git a/vms/platformvm/state/camino_stakers.go b/vms/platformvm/state/camino_stakers.go index 948a0b12cf88..986939609ac3 100644 --- a/vms/platformvm/state/camino_stakers.go +++ b/vms/platformvm/state/camino_stakers.go @@ -5,6 +5,7 @@ package state import ( "fmt" + "time" "github.com/ava-labs/avalanchego/database" @@ -52,7 +53,12 @@ func (cs *caminoState) loadDeferredValidators(s *state) error { return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } - staker, err := NewCurrentStaker(txID, stakerTx, 0) + var startTime time.Time + if scheduledStakerTx, ok := tx.Unsigned.(txs.ScheduledStaker); ok { + startTime = scheduledStakerTx.StartTime() + } + + staker, err := NewCurrentStaker(txID, stakerTx, startTime, 0) if err != nil { return err } diff --git a/vms/platformvm/state/camino_state.go b/vms/platformvm/state/camino_state.go index 22b8ef894127..129b3cc0d2a6 100644 --- a/vms/platformvm/state/camino_state.go +++ b/vms/platformvm/state/camino_state.go @@ -12,7 +12,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/multisig" as "github.com/ava-labs/avalanchego/vms/platformvm/addrstate" - "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/dac" "github.com/ava-labs/avalanchego/vms/platformvm/deposit" "github.com/ava-labs/avalanchego/vms/platformvm/locked" @@ -42,10 +41,6 @@ func (s *state) LockedUTXOs(txIDs set.Set[ids.ID], addresses set.Set[ids.ShortID return retUtxos, nil } -func (s *state) Config() (*config.Config, error) { - return s.cfg, nil -} - func (s *state) CaminoConfig() (*CaminoConfig, error) { return s.caminoState.CaminoConfig(), nil } diff --git a/vms/platformvm/state/camino_test.go b/vms/platformvm/state/camino_test.go index 24a7e3966cde..0f366f67dc72 100644 --- a/vms/platformvm/state/camino_test.go +++ b/vms/platformvm/state/camino_test.go @@ -163,9 +163,9 @@ func getExpectedSupply( func TestSyncGenesis(t *testing.T) { require := require.New(t) - s, _ := newInitializedState(require) + s := newInitializedState(require) db := memdb.New() - validatorsDB := prefixdb.New(validatorsPrefix, db) + validatorsDB := prefixdb.New(ValidatorsPrefix, db) var ( id = ids.GenerateTestID() @@ -449,6 +449,8 @@ func defaultGenesisState(addresses []pvm_genesis.AddressState, deposits []*txs.T } func TestGetBaseFee(t *testing.T) { + baseFee := uint64(123) + tests := map[string]struct { caminoState *caminoState expectedCaminoState *caminoState @@ -456,8 +458,8 @@ func TestGetBaseFee(t *testing.T) { expectedErr error }{ "OK": { - caminoState: &caminoState{baseFee: 123}, - expectedCaminoState: &caminoState{baseFee: 123}, + caminoState: &caminoState{baseFee: &baseFee}, + expectedCaminoState: &caminoState{baseFee: &baseFee}, expectedBaseFee: 123, }, } @@ -472,6 +474,8 @@ func TestGetBaseFee(t *testing.T) { } func TestSetBaseFee(t *testing.T) { + baseFee := uint64(123) + tests := map[string]struct { baseFee uint64 caminoState *caminoState @@ -479,8 +483,8 @@ func TestSetBaseFee(t *testing.T) { }{ "OK": { baseFee: 123, - caminoState: &caminoState{baseFee: 111}, - expectedCaminoState: &caminoState{baseFee: 123}, + caminoState: &caminoState{baseFee: &baseFee}, + expectedCaminoState: &caminoState{baseFee: &baseFee}, }, } for name, tt := range tests { diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 746cb593f42d..11ec54283ec3 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -27,7 +27,8 @@ import ( ) var ( - _ Diff = (*diff)(nil) + _ Diff = (*diff)(nil) + _ Versions = stateGetter{} ErrMissingParentState = errors.New("missing parent state") ) @@ -35,7 +36,7 @@ var ( type Diff interface { Chain - Apply(State) error + Apply(Chain) error CaminoApply } @@ -58,10 +59,8 @@ type diff struct { subnetOwners map[ids.ID]fx.Owner // Subnet ID --> Tx that transforms the subnet transformedSubnets map[ids.ID]*txs.Tx - cachedSubnets []*txs.Tx - addedChains map[ids.ID][]*txs.Tx - cachedChains map[ids.ID][]*txs.Tx + addedChains map[ids.ID][]*txs.Tx addedRewardUTXOs map[ids.ID][]*avax.UTXO @@ -91,6 +90,20 @@ func NewDiff( }, nil } +type stateGetter struct { + state Chain +} + +func (s stateGetter) GetState(ids.ID) (Chain, bool) { + return s.state, true +} + +func NewDiffOn(parentState Chain) (Diff, error) { + return NewDiff(ids.Empty, stateGetter{ + state: parentState, + }) +} + func (d *diff) GetTimestamp() time.Time { return d.timestamp } @@ -274,41 +287,8 @@ func (d *diff) GetPendingStakerIterator() (StakerIterator, error) { return d.pendingStakerDiffs.GetStakerIterator(parentIterator), nil } -func (d *diff) GetSubnets() ([]*txs.Tx, error) { - if len(d.addedSubnets) == 0 { - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - return parentState.GetSubnets() - } - - if len(d.cachedSubnets) != 0 { - return d.cachedSubnets, nil - } - - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - subnets, err := parentState.GetSubnets() - if err != nil { - return nil, err - } - newSubnets := make([]*txs.Tx, len(subnets)+len(d.addedSubnets)) - copy(newSubnets, subnets) - for i, subnet := range d.addedSubnets { - newSubnets[i+len(subnets)] = subnet - } - d.cachedSubnets = newSubnets - return newSubnets, nil -} - func (d *diff) AddSubnet(createSubnetTx *txs.Tx) { d.addedSubnets = append(d.addedSubnets, createSubnetTx) - if d.cachedSubnets != nil { - d.cachedSubnets = append(d.cachedSubnets, createSubnetTx) - } } func (d *diff) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { @@ -354,48 +334,6 @@ func (d *diff) AddSubnetTransformation(transformSubnetTxIntf *txs.Tx) { } } -func (d *diff) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { - addedChains := d.addedChains[subnetID] - if len(addedChains) == 0 { - // No chains have been added to this subnet - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - return parentState.GetChains(subnetID) - } - - // There have been chains added to the requested subnet - - if d.cachedChains == nil { - // This is the first time we are going to be caching the subnet chains - d.cachedChains = make(map[ids.ID][]*txs.Tx) - } - - cachedChains, cached := d.cachedChains[subnetID] - if cached { - return cachedChains, nil - } - - // This chain wasn't cached yet - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - chains, err := parentState.GetChains(subnetID) - if err != nil { - return nil, err - } - - newChains := make([]*txs.Tx, len(chains)+len(addedChains)) - copy(newChains, chains) - for i, chain := range addedChains { - newChains[i+len(chains)] = chain - } - d.cachedChains[subnetID] = newChains - return newChains, nil -} - func (d *diff) AddChain(createChainTx *txs.Tx) { tx := createChainTx.Unsigned.(*txs.CreateChainTx) if d.addedChains == nil { @@ -405,12 +343,6 @@ func (d *diff) AddChain(createChainTx *txs.Tx) { } else { d.addedChains[tx.SubnetID] = append(d.addedChains[tx.SubnetID], createChainTx) } - - cachedChains, cached := d.cachedChains[tx.SubnetID] - if !cached { - return - } - d.cachedChains[tx.SubnetID] = append(cachedChains, createChainTx) } func (d *diff) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { @@ -440,18 +372,6 @@ func (d *diff) AddTx(tx *txs.Tx, status status.Status) { } } -func (d *diff) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { - if utxos, exists := d.addedRewardUTXOs[txID]; exists { - return utxos, nil - } - - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - return parentState.GetRewardUTXOs(txID) -} - func (d *diff) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { if d.addedRewardUTXOs == nil { d.addedRewardUTXOs = make(map[ids.ID][]*avax.UTXO) @@ -494,7 +414,7 @@ func (d *diff) DeleteUTXO(utxoID ids.ID) { } } -func (d *diff) Apply(baseState State) error { +func (d *diff) Apply(baseState Chain) error { baseState.SetTimestamp(d.timestamp) for subnetID, supply := range d.currentSupply { baseState.SetCurrentSupply(subnetID, supply) diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index c35fb925594b..e8d51039bdba 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -38,7 +38,7 @@ func TestDiffCreation(t *testing.T) { ctrl := gomock.NewController(t) lastAcceptedID := ids.GenerateTestID() - state, _ := newInitializedState(require) + state := newInitializedState(require) versions := NewMockVersions(ctrl) versions.EXPECT().GetState(lastAcceptedID).AnyTimes().Return(state, true) @@ -52,7 +52,7 @@ func TestDiffCurrentSupply(t *testing.T) { ctrl := gomock.NewController(t) lastAcceptedID := ids.GenerateTestID() - state, _ := newInitializedState(require) + state := newInitializedState(require) versions := NewMockVersions(ctrl) versions.EXPECT().GetState(lastAcceptedID).AnyTimes().Return(state, true) @@ -250,15 +250,28 @@ func TestDiffSubnet(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state := newInitializedState(require) + + // Initialize parent with one subnet + parentStateCreateSubnetTx := &txs.Tx{ + Unsigned: &txs.CreateSubnetTx{ + Owner: fx.NewMockOwner(ctrl), + }, + } + state.AddSubnet(parentStateCreateSubnetTx) + + // Verify parent returns one subnet + subnets, err := state.GetSubnets() + require.NoError(err) + require.Equal([]*txs.Tx{ + parentStateCreateSubnetTx, + }, subnets) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() - d, err := NewDiff(lastAcceptedID, states) + diff, err := NewDiff(lastAcceptedID, states) require.NoError(err) // Put a subnet @@ -267,60 +280,67 @@ func TestDiffSubnet(t *testing.T) { Owner: fx.NewMockOwner(ctrl), }, } - d.AddSubnet(createSubnetTx) + diff.AddSubnet(createSubnetTx) - // Assert that we get the subnet back - // [state] returns 1 subnet. - parentStateCreateSubnetTx := &txs.Tx{ - Unsigned: &txs.CreateSubnetTx{ - Owner: fx.NewMockOwner(ctrl), - }, - } - state.EXPECT().GetSubnets().Return([]*txs.Tx{parentStateCreateSubnetTx}, nil).Times(1) - gotSubnets, err := d.GetSubnets() + // Apply diff to parent state + require.NoError(diff.Apply(state)) + + // Verify parent now returns two subnets + subnets, err = state.GetSubnets() require.NoError(err) - require.Len(gotSubnets, 2) - require.Equal(gotSubnets[0], parentStateCreateSubnetTx) - require.Equal(gotSubnets[1], createSubnetTx) + require.Equal([]*txs.Tx{ + parentStateCreateSubnetTx, + createSubnetTx, + }, subnets) } func TestDiffChain(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state := newInitializedState(require) + subnetID := ids.GenerateTestID() + + // Initialize parent with one chain + parentStateCreateChainTx := &txs.Tx{ + Unsigned: &txs.CreateChainTx{ + SubnetID: subnetID, + }, + } + state.AddChain(parentStateCreateChainTx) + + // Verify parent returns one chain + chains, err := state.GetChains(subnetID) + require.NoError(err) + require.Equal([]*txs.Tx{ + parentStateCreateChainTx, + }, chains) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() - d, err := NewDiff(lastAcceptedID, states) + diff, err := NewDiff(lastAcceptedID, states) require.NoError(err) // Put a chain - subnetID := ids.GenerateTestID() createChainTx := &txs.Tx{ Unsigned: &txs.CreateChainTx{ - SubnetID: subnetID, + SubnetID: subnetID, // note this is the same subnet as [parentStateCreateChainTx] }, } - d.AddChain(createChainTx) + diff.AddChain(createChainTx) - // Assert that we get the chain back - // [state] returns 1 chain. - parentStateCreateChainTx := &txs.Tx{ - Unsigned: &txs.CreateChainTx{ - SubnetID: subnetID, // note this is the same subnet as [createChainTx] - }, - } - state.EXPECT().GetChains(subnetID).Return([]*txs.Tx{parentStateCreateChainTx}, nil).Times(1) - gotChains, err := d.GetChains(subnetID) + // Apply diff to parent state + require.NoError(diff.Apply(state)) + + // Verify parent now returns two chains + chains, err = state.GetChains(subnetID) require.NoError(err) - require.Len(gotChains, 2) - require.Equal(parentStateCreateChainTx, gotChains[0]) - require.Equal(createChainTx, gotChains[1]) + require.Equal([]*txs.Tx{ + parentStateCreateChainTx, + createChainTx, + }, chains) } func TestDiffTx(t *testing.T) { @@ -377,45 +397,46 @@ func TestDiffRewardUTXO(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state := newInitializedState(require) + + txID := ids.GenerateTestID() + + // Initialize parent with one reward UTXO + parentRewardUTXO := &avax.UTXO{ + UTXOID: avax.UTXOID{TxID: txID}, + } + state.AddRewardUTXO(txID, parentRewardUTXO) + + // Verify parent returns the reward UTXO + rewardUTXOs, err := state.GetRewardUTXOs(txID) + require.NoError(err) + require.Equal([]*avax.UTXO{ + parentRewardUTXO, + }, rewardUTXOs) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() - d, err := NewDiff(lastAcceptedID, states) + diff, err := NewDiff(lastAcceptedID, states) require.NoError(err) // Put a reward UTXO - txID := ids.GenerateTestID() rewardUTXO := &avax.UTXO{ UTXOID: avax.UTXOID{TxID: txID}, } - d.AddRewardUTXO(txID, rewardUTXO) + diff.AddRewardUTXO(txID, rewardUTXO) - { - // Assert that we get the UTXO back - gotRewardUTXOs, err := d.GetRewardUTXOs(txID) - require.NoError(err) - require.Len(gotRewardUTXOs, 1) - require.Equal(rewardUTXO, gotRewardUTXOs[0]) - } + // Apply diff to parent state + require.NoError(diff.Apply(state)) - { - // Assert that we can get a UTXO from the parent state - // [state] returns 1 UTXO. - txID2 := ids.GenerateTestID() - parentRewardUTXO := &avax.UTXO{ - UTXOID: avax.UTXOID{TxID: txID2}, - } - state.EXPECT().GetRewardUTXOs(txID2).Return([]*avax.UTXO{parentRewardUTXO}, nil).Times(1) - gotParentRewardUTXOs, err := d.GetRewardUTXOs(txID2) - require.NoError(err) - require.Len(gotParentRewardUTXOs, 1) - require.Equal(parentRewardUTXO, gotParentRewardUTXOs[0]) - } + // Verify parent now returns two reward UTXOs + rewardUTXOs, err = state.GetRewardUTXOs(txID) + require.NoError(err) + require.Equal([]*avax.UTXO{ + parentRewardUTXO, + rewardUTXO, + }, rewardUTXOs) } func TestDiffUTXO(t *testing.T) { @@ -496,32 +517,13 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { require.NoError(err) require.Equal(expectedCurrentSupply, actualCurrentSupply) - - expectedSubnets, expectedErr := expected.GetSubnets() - actualSubnets, actualErr := actual.GetSubnets() - require.Equal(expectedErr, actualErr) - if expectedErr == nil { - require.Equal(expectedSubnets, actualSubnets) - - for _, subnet := range expectedSubnets { - subnetID := subnet.ID() - - expectedChains, expectedErr := expected.GetChains(subnetID) - actualChains, actualErr := actual.GetChains(subnetID) - require.Equal(expectedErr, actualErr) - if expectedErr != nil { - continue - } - require.Equal(expectedChains, actualChains) - } - } } func TestDiffSubnetOwner(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - state, _ := newInitializedState(require) + state := newInitializedState(require) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() @@ -578,3 +580,97 @@ func TestDiffSubnetOwner(t *testing.T) { require.NoError(err) require.Equal(owner2, owner) } + +func TestDiffStacking(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + state := newInitializedState(require) + + states := NewMockVersions(ctrl) + lastAcceptedID := ids.GenerateTestID() + states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() + + var ( + owner1 = fx.NewMockOwner(ctrl) + owner2 = fx.NewMockOwner(ctrl) + owner3 = fx.NewMockOwner(ctrl) + + createSubnetTx = &txs.Tx{ + Unsigned: &txs.CreateSubnetTx{ + BaseTx: txs.BaseTx{}, + Owner: owner1, + }, + } + + subnetID = createSubnetTx.ID() + ) + + // Create subnet on base state + owner, err := state.GetSubnetOwner(subnetID) + require.ErrorIs(err, database.ErrNotFound) + require.Nil(owner) + + state.AddSubnet(createSubnetTx) + state.SetSubnetOwner(subnetID, owner1) + + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner1, owner) + + // Create first diff and verify that subnet owner returns correctly + statesDiff, err := NewDiff(lastAcceptedID, states) + require.NoError(err) + + owner, err = statesDiff.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner1, owner) + + // Transferring subnet ownership on first diff should be reflected on first diff not state + statesDiff.SetSubnetOwner(subnetID, owner2) + owner, err = statesDiff.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner2, owner) + + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner1, owner) + + // Create a second diff on first diff and verify that subnet owner returns correctly + stackedDiff, err := NewDiffOn(statesDiff) + require.NoError(err) + owner, err = stackedDiff.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner2, owner) + + // Transfer ownership on stacked diff and verify it is only reflected on stacked diff + stackedDiff.SetSubnetOwner(subnetID, owner3) + owner, err = stackedDiff.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner3, owner) + + owner, err = statesDiff.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner2, owner) + + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner1, owner) + + // Applying both diffs successively should work as expected. + require.NoError(stackedDiff.Apply(statesDiff)) + + owner, err = statesDiff.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner3, owner) + + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner1, owner) + + require.NoError(statesDiff.Apply(state)) + + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner3, owner) +} diff --git a/vms/platformvm/state/disk_staker_diff_iterator.go b/vms/platformvm/state/disk_staker_diff_iterator.go index 44ee1ed87180..1c6e88338724 100644 --- a/vms/platformvm/state/disk_staker_diff_iterator.go +++ b/vms/platformvm/state/disk_staker_diff_iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -43,7 +43,7 @@ func marshalDiffKey(subnetID ids.ID, height uint64, nodeID ids.NodeID) []byte { key := make([]byte, diffKeyLength) copy(key, subnetID[:]) packIterableHeight(key[ids.IDLen:], height) - copy(key[diffKeyNodeIDOffset:], nodeID[:]) + copy(key[diffKeyNodeIDOffset:], nodeID.Bytes()) return key } diff --git a/vms/platformvm/state/disk_staker_diff_iterator_test.go b/vms/platformvm/state/disk_staker_diff_iterator_test.go index 9439428937b5..af719e7a0beb 100644 --- a/vms/platformvm/state/disk_staker_diff_iterator_test.go +++ b/vms/platformvm/state/disk_staker_diff_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -58,8 +58,8 @@ func TestDiffIteration(t *testing.T) { subnetID0 := ids.GenerateTestID() subnetID1 := ids.GenerateTestID() - nodeID0 := ids.NodeID{0x00} - nodeID1 := ids.NodeID{0x01} + nodeID0 := ids.BuildTestNodeID([]byte{0x00}) + nodeID1 := ids.BuildTestNodeID([]byte{0x01}) subnetID0Height0NodeID0 := marshalDiffKey(subnetID0, 0, nodeID0) subnetID0Height1NodeID0 := marshalDiffKey(subnetID0, 1, nodeID0) diff --git a/vms/platformvm/state/empty_iterator.go b/vms/platformvm/state/empty_iterator.go index 69766c194fce..3ec5f04f82a9 100644 --- a/vms/platformvm/state/empty_iterator.go +++ b/vms/platformvm/state/empty_iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/platformvm/state/empty_iterator_test.go b/vms/platformvm/state/empty_iterator_test.go index b5bb43d1f640..19cd4f06dc06 100644 --- a/vms/platformvm/state/empty_iterator_test.go +++ b/vms/platformvm/state/empty_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/platformvm/state/masked_iterator.go b/vms/platformvm/state/masked_iterator.go index 8551a05889f6..9ceee9712b40 100644 --- a/vms/platformvm/state/masked_iterator.go +++ b/vms/platformvm/state/masked_iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/platformvm/state/masked_iterator_test.go b/vms/platformvm/state/masked_iterator_test.go index 8ba719d3e732..ccc37d6ffb3d 100644 --- a/vms/platformvm/state/masked_iterator_test.go +++ b/vms/platformvm/state/masked_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/platformvm/state/merged_iterator.go b/vms/platformvm/state/merged_iterator.go index 6c5bdafe801e..059001b3144f 100644 --- a/vms/platformvm/state/merged_iterator.go +++ b/vms/platformvm/state/merged_iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/platformvm/state/merged_iterator_test.go b/vms/platformvm/state/merged_iterator_test.go index c85b35941b0f..e6cd52451f73 100644 --- a/vms/platformvm/state/merged_iterator_test.go +++ b/vms/platformvm/state/merged_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/platformvm/state/metadata_codec.go b/vms/platformvm/state/metadata_codec.go index 6240bbd879ca..65832ed77460 100644 --- a/vms/platformvm/state/metadata_codec.go +++ b/vms/platformvm/state/metadata_codec.go @@ -1,27 +1,36 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils" ) const ( - v0tag = "v0" - v0 = uint16(0) + CodecVersion0Tag = "v0" + CodecVersion0 uint16 = 0 + + CodecVersion1Tag = "v1" + CodecVersion1 uint16 = 1 ) -var metadataCodec codec.Manager +var MetadataCodec codec.Manager func init() { - c := linearcodec.New([]string{v0tag}, math.MaxInt32) - metadataCodec = codec.NewManager(math.MaxInt32) + c0 := linearcodec.New(time.Time{}, []string{CodecVersion0Tag}, math.MaxInt32) + c1 := linearcodec.New(time.Time{}, []string{CodecVersion0Tag, CodecVersion1Tag}, math.MaxInt32) + MetadataCodec = codec.NewManager(math.MaxInt32) - err := metadataCodec.RegisterCodec(v0, c) + err := utils.Err( + MetadataCodec.RegisterCodec(CodecVersion0, c0), + MetadataCodec.RegisterCodec(CodecVersion1, c1), + ) if err != nil { panic(err) } diff --git a/vms/platformvm/state/metadata_delegator.go b/vms/platformvm/state/metadata_delegator.go index 04e7ef6a8795..06099d813cde 100644 --- a/vms/platformvm/state/metadata_delegator.go +++ b/vms/platformvm/state/metadata_delegator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -9,17 +9,36 @@ import ( ) type delegatorMetadata struct { - PotentialReward uint64 + PotentialReward uint64 `v1:"true"` + StakerStartTime uint64 `v1:"true"` txID ids.ID } func parseDelegatorMetadata(bytes []byte, metadata *delegatorMetadata) error { var err error - metadata.PotentialReward, err = database.ParseUInt64(bytes) + switch len(bytes) { + case database.Uint64Size: + // only potential reward was stored + metadata.PotentialReward, err = database.ParseUInt64(bytes) + default: + _, err = MetadataCodec.Unmarshal(bytes, metadata) + } return err } -func writeDelegatorMetadata(db database.KeyValueWriter, metadata *delegatorMetadata) error { - return database.PutUInt64(db, metadata.txID[:], metadata.PotentialReward) +func writeDelegatorMetadata(db database.KeyValueWriter, metadata *delegatorMetadata, codecVersion uint16) error { + // The "0" codec is skipped for [delegatorMetadata]. This is to ensure the + // [validatorMetadata] codec version is the same as the [delegatorMetadata] + // codec version. + // + // TODO: Cleanup post-Durango activation. + if codecVersion == 0 { + return database.PutUInt64(db, metadata.txID[:], metadata.PotentialReward) + } + metadataBytes, err := MetadataCodec.Marshal(codecVersion, metadata) + if err != nil { + return err + } + return db.Put(metadata.txID[:], metadataBytes) } diff --git a/vms/platformvm/state/metadata_delegator_test.go b/vms/platformvm/state/metadata_delegator_test.go new file mode 100644 index 000000000000..9c9d6c1c0044 --- /dev/null +++ b/vms/platformvm/state/metadata_delegator_test.go @@ -0,0 +1,141 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +func TestParseDelegatorMetadata(t *testing.T) { + type test struct { + name string + bytes []byte + expected *delegatorMetadata + expectedErr error + } + tests := []test{ + { + name: "potential reward only no codec", + bytes: []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, + }, + expected: &delegatorMetadata{ + PotentialReward: 123, + StakerStartTime: 0, + }, + expectedErr: nil, + }, + { + name: "potential reward + staker start time with codec v1", + bytes: []byte{ + // codec version + 0x00, 0x01, + // potential reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, + // staker start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + }, + expected: &delegatorMetadata{ + PotentialReward: 123, + StakerStartTime: 456, + }, + expectedErr: nil, + }, + { + name: "invalid codec version", + bytes: []byte{ + // codec version + 0x00, 0x02, + // potential reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, + // staker start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + }, + expected: nil, + expectedErr: codec.ErrUnknownVersion, + }, + { + name: "short byte len", + bytes: []byte{ + // codec version + 0x00, 0x01, + // potential reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, + // staker start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + expected: nil, + expectedErr: wrappers.ErrInsufficientLength, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + var metadata delegatorMetadata + err := parseDelegatorMetadata(tt.bytes, &metadata) + require.ErrorIs(err, tt.expectedErr) + if tt.expectedErr != nil { + return + } + require.Equal(tt.expected, &metadata) + }) + } +} + +func TestWriteDelegatorMetadata(t *testing.T) { + type test struct { + name string + version uint16 + metadata *delegatorMetadata + expected []byte + } + tests := []test{ + { + name: CodecVersion0Tag, + version: CodecVersion0, + metadata: &delegatorMetadata{ + PotentialReward: 123, + StakerStartTime: 456, + }, + expected: []byte{ + // potential reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, + }, + }, + { + name: CodecVersion1Tag, + version: CodecVersion1, + metadata: &delegatorMetadata{ + PotentialReward: 123, + StakerStartTime: 456, + }, + expected: []byte{ + // codec version + 0x00, 0x01, + // potential reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, + // staker start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + db := memdb.New() + tt.metadata.txID = ids.GenerateTestID() + require.NoError(writeDelegatorMetadata(db, tt.metadata, tt.version)) + bytes, err := db.Get(tt.metadata.txID[:]) + require.NoError(err) + require.Equal(tt.expected, bytes) + }) + } +} diff --git a/vms/platformvm/state/metadata_validator.go b/vms/platformvm/state/metadata_validator.go index 6b839ccad801..0c725368505b 100644 --- a/vms/platformvm/state/metadata_validator.go +++ b/vms/platformvm/state/metadata_validator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -32,6 +32,7 @@ type validatorMetadata struct { LastUpdated uint64 `v0:"true"` // Unix time in seconds PotentialReward uint64 `v0:"true"` PotentialDelegateeReward uint64 `v0:"true"` + StakerStartTime uint64 ` v1:"true"` txID ids.ID lastUpdated time.Time @@ -58,7 +59,7 @@ func parseValidatorMetadata(bytes []byte, metadata *validatorMetadata) error { // potential reward and uptime was stored but potential delegatee reward // was not tmp := preDelegateeRewardMetadata{} - if _, err := metadataCodec.Unmarshal(bytes, &tmp); err != nil { + if _, err := MetadataCodec.Unmarshal(bytes, &tmp); err != nil { return err } @@ -67,7 +68,7 @@ func parseValidatorMetadata(bytes []byte, metadata *validatorMetadata) error { metadata.PotentialReward = tmp.PotentialReward default: // everything was stored - if _, err := metadataCodec.Unmarshal(bytes, metadata); err != nil { + if _, err := MetadataCodec.Unmarshal(bytes, metadata); err != nil { return err } } @@ -130,6 +131,7 @@ type validatorState interface { WriteValidatorMetadata( dbPrimary database.KeyValueWriter, dbSubnet database.KeyValueWriter, + codecVersion uint16, ) error } @@ -230,13 +232,14 @@ func (m *metadata) DeleteValidatorMetadata(vdrID ids.NodeID, subnetID ids.ID) { func (m *metadata) WriteValidatorMetadata( dbPrimary database.KeyValueWriter, dbSubnet database.KeyValueWriter, + codecVersion uint16, ) error { for vdrID, updatedSubnets := range m.updatedMetadata { for subnetID := range updatedSubnets { metadata := m.metadata[vdrID][subnetID] metadata.LastUpdated = uint64(metadata.lastUpdated.Unix()) - metadataBytes, err := metadataCodec.Marshal(v0, metadata) + metadataBytes, err := MetadataCodec.Marshal(codecVersion, metadata) if err != nil { return err } diff --git a/vms/platformvm/state/metadata_validator_test.go b/vms/platformvm/state/metadata_validator_test.go index 68f18e62bd72..3a041a26b2eb 100644 --- a/vms/platformvm/state/metadata_validator_test.go +++ b/vms/platformvm/state/metadata_validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -81,8 +81,9 @@ func TestWriteValidatorMetadata(t *testing.T) { primaryDB := memdb.New() subnetDB := memdb.New() + // write empty uptimes - require.NoError(state.WriteValidatorMetadata(primaryDB, subnetDB)) + require.NoError(state.WriteValidatorMetadata(primaryDB, subnetDB, CodecVersion1)) // load uptime nodeID := ids.GenerateTestNodeID() @@ -96,7 +97,7 @@ func TestWriteValidatorMetadata(t *testing.T) { state.LoadValidatorMetadata(nodeID, subnetID, testUptimeReward) // write state, should not reflect to DB yet - require.NoError(state.WriteValidatorMetadata(primaryDB, subnetDB)) + require.NoError(state.WriteValidatorMetadata(primaryDB, subnetDB, CodecVersion1)) require.False(primaryDB.Has(testUptimeReward.txID[:])) require.False(subnetDB.Has(testUptimeReward.txID[:])) @@ -112,7 +113,7 @@ func TestWriteValidatorMetadata(t *testing.T) { require.NoError(state.SetUptime(nodeID, subnetID, newUpDuration, newLastUpdated)) // write uptimes, should reflect to subnet DB - require.NoError(state.WriteValidatorMetadata(primaryDB, subnetDB)) + require.NoError(state.WriteValidatorMetadata(primaryDB, subnetDB, CodecVersion1)) require.False(primaryDB.Has(testUptimeReward.txID[:])) require.True(subnetDB.Has(testUptimeReward.txID[:])) } @@ -252,7 +253,7 @@ func TestParseValidatorMetadata(t *testing.T) { name: "invalid codec version", bytes: []byte{ // codec version - 0x00, 0x01, + 0x00, 0x02, // up duration 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, // last updated diff --git a/vms/platformvm/state/mock_chain.go b/vms/platformvm/state/mock_chain.go deleted file mode 100644 index 3f35686b57d0..000000000000 --- a/vms/platformvm/state/mock_chain.go +++ /dev/null @@ -1,1081 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: Chain) - -// Package state is a generated GoMock package. -package state - -import ( - reflect "reflect" - time "time" - - ids "github.com/ava-labs/avalanchego/ids" - set "github.com/ava-labs/avalanchego/utils/set" - avax "github.com/ava-labs/avalanchego/vms/components/avax" - multisig "github.com/ava-labs/avalanchego/vms/components/multisig" - addrstate "github.com/ava-labs/avalanchego/vms/platformvm/addrstate" - config "github.com/ava-labs/avalanchego/vms/platformvm/config" - dac "github.com/ava-labs/avalanchego/vms/platformvm/dac" - deposit "github.com/ava-labs/avalanchego/vms/platformvm/deposit" - fx "github.com/ava-labs/avalanchego/vms/platformvm/fx" - locked "github.com/ava-labs/avalanchego/vms/platformvm/locked" - status "github.com/ava-labs/avalanchego/vms/platformvm/status" - txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" - gomock "go.uber.org/mock/gomock" -) - -// MockChain is a mock of Chain interface. -type MockChain struct { - ctrl *gomock.Controller - recorder *MockChainMockRecorder -} - -// MockChainMockRecorder is the mock recorder for MockChain. -type MockChainMockRecorder struct { - mock *MockChain -} - -// NewMockChain creates a new mock instance. -func NewMockChain(ctrl *gomock.Controller) *MockChain { - mock := &MockChain{ctrl: ctrl} - mock.recorder = &MockChainMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockChain) EXPECT() *MockChainMockRecorder { - return m.recorder -} - -// AddChain mocks base method. -func (m *MockChain) AddChain(arg0 *txs.Tx) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddChain", arg0) -} - -// AddChain indicates an expected call of AddChain. -func (mr *MockChainMockRecorder) AddChain(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockChain)(nil).AddChain), arg0) -} - -// SetDepositOffer mocks base method. -func (m *MockChain) SetDepositOffer(arg0 *deposit.Offer) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetDepositOffer", arg0) -} - -// SetDepositOffer indicates an expected call of SetDepositOffer. -func (mr *MockChainMockRecorder) SetDepositOffer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDepositOffer", reflect.TypeOf((*MockChain)(nil).SetDepositOffer), arg0) -} - -// AddRewardUTXO mocks base method. -func (m *MockChain) AddRewardUTXO(arg0 ids.ID, arg1 *avax.UTXO) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddRewardUTXO", arg0, arg1) -} - -// AddRewardUTXO indicates an expected call of AddRewardUTXO. -func (mr *MockChainMockRecorder) AddRewardUTXO(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRewardUTXO", reflect.TypeOf((*MockChain)(nil).AddRewardUTXO), arg0, arg1) -} - -// AddSubnet mocks base method. -func (m *MockChain) AddSubnet(arg0 *txs.Tx) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddSubnet", arg0) -} - -// AddSubnet indicates an expected call of AddSubnet. -func (mr *MockChainMockRecorder) AddSubnet(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnet", reflect.TypeOf((*MockChain)(nil).AddSubnet), arg0) -} - -// AddSubnetTransformation mocks base method. -func (m *MockChain) AddSubnetTransformation(arg0 *txs.Tx) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddSubnetTransformation", arg0) -} - -// AddSubnetTransformation indicates an expected call of AddSubnetTransformation. -func (mr *MockChainMockRecorder) AddSubnetTransformation(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnetTransformation", reflect.TypeOf((*MockChain)(nil).AddSubnetTransformation), arg0) -} - -// AddTx mocks base method. -func (m *MockChain) AddTx(arg0 *txs.Tx, arg1 status.Status) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddTx", arg0, arg1) -} - -// AddTx indicates an expected call of AddTx. -func (mr *MockChainMockRecorder) AddTx(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockChain)(nil).AddTx), arg0, arg1) -} - -// AddUTXO mocks base method. -func (m *MockChain) AddUTXO(arg0 *avax.UTXO) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddUTXO", arg0) -} - -// AddUTXO indicates an expected call of AddUTXO. -func (mr *MockChainMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockChain)(nil).AddUTXO), arg0) -} - -// CaminoConfig mocks base method. -func (m *MockChain) CaminoConfig() (*CaminoConfig, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CaminoConfig") - ret0, _ := ret[0].(*CaminoConfig) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CaminoConfig indicates an expected call of CaminoConfig. -func (mr *MockChainMockRecorder) CaminoConfig() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CaminoConfig", reflect.TypeOf((*MockChain)(nil).CaminoConfig)) -} - -// Config mocks base method. -func (m *MockChain) Config() (*config.Config, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Config") - ret0, _ := ret[0].(*config.Config) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Config indicates an expected call of Config. -func (mr *MockChainMockRecorder) Config() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Config", reflect.TypeOf((*MockChain)(nil).Config)) -} - -// DeleteCurrentDelegator mocks base method. -func (m *MockChain) DeleteCurrentDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteCurrentDelegator", arg0) -} - -// DeleteCurrentDelegator indicates an expected call of DeleteCurrentDelegator. -func (mr *MockChainMockRecorder) DeleteCurrentDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentDelegator", reflect.TypeOf((*MockChain)(nil).DeleteCurrentDelegator), arg0) -} - -// DeleteCurrentValidator mocks base method. -func (m *MockChain) DeleteCurrentValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteCurrentValidator", arg0) -} - -// DeleteCurrentValidator indicates an expected call of DeleteCurrentValidator. -func (mr *MockChainMockRecorder) DeleteCurrentValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentValidator", reflect.TypeOf((*MockChain)(nil).DeleteCurrentValidator), arg0) -} - -// DeletePendingDelegator mocks base method. -func (m *MockChain) DeletePendingDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeletePendingDelegator", arg0) -} - -// DeletePendingDelegator indicates an expected call of DeletePendingDelegator. -func (mr *MockChainMockRecorder) DeletePendingDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingDelegator", reflect.TypeOf((*MockChain)(nil).DeletePendingDelegator), arg0) -} - -// DeletePendingValidator mocks base method. -func (m *MockChain) DeletePendingValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeletePendingValidator", arg0) -} - -// DeletePendingValidator indicates an expected call of DeletePendingValidator. -func (mr *MockChainMockRecorder) DeletePendingValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingValidator", reflect.TypeOf((*MockChain)(nil).DeletePendingValidator), arg0) -} - -// DeleteUTXO mocks base method. -func (m *MockChain) DeleteUTXO(arg0 ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteUTXO", arg0) -} - -// DeleteUTXO indicates an expected call of DeleteUTXO. -func (mr *MockChainMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockChain)(nil).DeleteUTXO), arg0) -} - -// GetAddressStates mocks base method. -func (m *MockChain) GetAddressStates(arg0 ids.ShortID) (addrstate.AddressState, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAddressStates", arg0) - ret0, _ := ret[0].(addrstate.AddressState) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAddressStates indicates an expected call of GetAddressStates. -func (mr *MockChainMockRecorder) GetAddressStates(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAddressStates", reflect.TypeOf((*MockChain)(nil).GetAddressStates), arg0) -} - -// GetAllDepositOffers mocks base method. -func (m *MockChain) GetAllDepositOffers() ([]*deposit.Offer, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAllDepositOffers") - ret0, _ := ret[0].([]*deposit.Offer) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAllDepositOffers indicates an expected call of GetAllDepositOffers. -func (mr *MockChainMockRecorder) GetAllDepositOffers() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllDepositOffers", reflect.TypeOf((*MockChain)(nil).GetAllDepositOffers)) -} - -// GetChains mocks base method. -func (m *MockChain) GetChains(arg0 ids.ID) ([]*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetChains", arg0) - ret0, _ := ret[0].([]*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetChains indicates an expected call of GetChains. -func (mr *MockChainMockRecorder) GetChains(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChains", reflect.TypeOf((*MockChain)(nil).GetChains), arg0) -} - -// GetClaimable mocks base method. -func (m *MockChain) GetClaimable(arg0 ids.ID) (*Claimable, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetClaimable", arg0) - ret0, _ := ret[0].(*Claimable) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetClaimable indicates an expected call of GetClaimable. -func (mr *MockChainMockRecorder) GetClaimable(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClaimable", reflect.TypeOf((*MockChain)(nil).GetClaimable), arg0) -} - -// GetProposal mocks base method. -func (m *MockChain) GetProposal(arg0 ids.ID) (dac.ProposalState, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProposal", arg0) - ret0, _ := ret[0].(dac.ProposalState) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetProposal indicates an expected call of GetProposal. -func (mr *MockChainMockRecorder) GetProposal(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposal", reflect.TypeOf((*MockChain)(nil).GetProposal), arg0) -} - -// GetBaseFee mocks base method. -func (m *MockChain) GetBaseFee() (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBaseFee") - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBaseFee indicates an expected call of GetBaseFee. -func (mr *MockChainMockRecorder) GetBaseFee() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBaseFee", reflect.TypeOf((*MockChain)(nil).GetBaseFee)) -} - -// GetFeeDistribution mocks base method. -func (m *MockChain) GetFeeDistribution() ([dac.FeeDistributionFractionsCount]uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFeeDistribution") - ret0, _ := ret[0].([dac.FeeDistributionFractionsCount]uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetFeeDistribution indicates an expected call of GetFeeDistribution. -func (mr *MockChainMockRecorder) GetFeeDistribution() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFeeDistribution", reflect.TypeOf((*MockChain)(nil).GetFeeDistribution)) -} - -// GetCurrentDelegatorIterator mocks base method. -func (m *MockChain) GetCurrentDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentDelegatorIterator", arg0, arg1) - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentDelegatorIterator indicates an expected call of GetCurrentDelegatorIterator. -func (mr *MockChainMockRecorder) GetCurrentDelegatorIterator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentDelegatorIterator", reflect.TypeOf((*MockChain)(nil).GetCurrentDelegatorIterator), arg0, arg1) -} - -// GetCurrentStakerIterator mocks base method. -func (m *MockChain) GetCurrentStakerIterator() (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentStakerIterator") - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentStakerIterator indicates an expected call of GetCurrentStakerIterator. -func (mr *MockChainMockRecorder) GetCurrentStakerIterator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentStakerIterator", reflect.TypeOf((*MockChain)(nil).GetCurrentStakerIterator)) -} - -// GetCurrentSupply mocks base method. -func (m *MockChain) GetCurrentSupply(arg0 ids.ID) (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentSupply", arg0) - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentSupply indicates an expected call of GetCurrentSupply. -func (mr *MockChainMockRecorder) GetCurrentSupply(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSupply", reflect.TypeOf((*MockChain)(nil).GetCurrentSupply), arg0) -} - -// GetCurrentValidator mocks base method. -func (m *MockChain) GetCurrentValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentValidator", arg0, arg1) - ret0, _ := ret[0].(*Staker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentValidator indicates an expected call of GetCurrentValidator. -func (mr *MockChainMockRecorder) GetCurrentValidator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidator", reflect.TypeOf((*MockChain)(nil).GetCurrentValidator), arg0, arg1) -} - -// GetDelegateeReward mocks base method. -func (m *MockChain) GetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID) (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDelegateeReward", arg0, arg1) - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDelegateeReward indicates an expected call of GetDelegateeReward. -func (mr *MockChainMockRecorder) GetDelegateeReward(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockChain)(nil).GetDelegateeReward), arg0, arg1) -} - -// GetDeposit mocks base method. -func (m *MockChain) GetDeposit(arg0 ids.ID) (*deposit.Deposit, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDeposit", arg0) - ret0, _ := ret[0].(*deposit.Deposit) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDeposit indicates an expected call of GetDeposit. -func (mr *MockChainMockRecorder) GetDeposit(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeposit", reflect.TypeOf((*MockChain)(nil).GetDeposit), arg0) -} - -// GetNextToUnlockDepositTime mocks base method. -func (m *MockChain) GetNextToUnlockDepositTime(arg0 set.Set[ids.ID]) (time.Time, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNextToUnlockDepositTime", arg0) - ret0, _ := ret[0].(time.Time) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetNextToUnlockDepositTime indicates an expected call of GetNextToUnlockDepositTime. -func (mr *MockChainMockRecorder) GetNextToUnlockDepositTime(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToUnlockDepositTime", reflect.TypeOf((*MockChain)(nil).GetNextToUnlockDepositTime), arg0) -} - -// GetNextToUnlockDepositIDsAndTime mocks base method. -func (m *MockChain) GetNextToUnlockDepositIDsAndTime(arg0 set.Set[ids.ID]) ([]ids.ID, time.Time, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNextToUnlockDepositIDsAndTime", arg0) - ret0, _ := ret[0].([]ids.ID) - ret1, _ := ret[1].(time.Time) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetNextToUnlockDepositIDsAndTime indicates an expected call of GetNextToUnlockDepositIDsAndTime. -func (mr *MockChainMockRecorder) GetNextToUnlockDepositIDsAndTime(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToUnlockDepositIDsAndTime", reflect.TypeOf((*MockChain)(nil).GetNextToUnlockDepositIDsAndTime), arg0) -} - -// GetNextProposalExpirationTime mocks base method. -func (m *MockChain) GetNextProposalExpirationTime(arg0 set.Set[ids.ID]) (time.Time, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNextProposalExpirationTime", arg0) - ret0, _ := ret[0].(time.Time) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetNextProposalExpirationTime indicates an expected call of GetNextProposalExpirationTime. -func (mr *MockChainMockRecorder) GetNextProposalExpirationTime(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextProposalExpirationTime", reflect.TypeOf((*MockChain)(nil).GetNextProposalExpirationTime), arg0) -} - -// GetNextToExpireProposalIDsAndTime mocks base method. -func (m *MockChain) GetNextToExpireProposalIDsAndTime(arg0 set.Set[ids.ID]) ([]ids.ID, time.Time, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNextToExpireProposalIDsAndTime", arg0) - ret0, _ := ret[0].([]ids.ID) - ret1, _ := ret[1].(time.Time) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetNextToExpireProposalIDsAndTime indicates an expected call of GetNextToExpireProposalIDsAndTime. -func (mr *MockChainMockRecorder) GetNextToExpireProposalIDsAndTime(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToExpireProposalIDsAndTime", reflect.TypeOf((*MockChain)(nil).GetNextToExpireProposalIDsAndTime), arg0) -} - -// GetProposalIDsToFinish mocks base method. -func (m *MockChain) GetProposalIDsToFinish() ([]ids.ID, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProposalIDsToFinish") - ret0, _ := ret[0].([]ids.ID) - ret2, _ := ret[1].(error) - return ret0, ret2 -} - -// GetProposalIDsToFinish indicates an expected call of GetProposalIDsToFinish. -func (mr *MockChainMockRecorder) GetProposalIDsToFinish() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposalIDsToFinish", reflect.TypeOf((*MockChain)(nil).GetProposalIDsToFinish)) -} - -// GetProposalIterator mocks base method. -func (m *MockChain) GetProposalIterator() (ProposalsIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProposalIterator") - ret0, _ := ret[0].(ProposalsIterator) - ret2, _ := ret[1].(error) - return ret0, ret2 -} - -// GetProposalIterator indicates an expected call of GetProposalIterator. -func (mr *MockChainMockRecorder) GetProposalIterator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposalIterator", reflect.TypeOf((*MockChain)(nil).GetProposalIterator)) -} - -// GetDepositOffer mocks base method. -func (m *MockChain) GetDepositOffer(arg0 ids.ID) (*deposit.Offer, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDepositOffer", arg0) - ret0, _ := ret[0].(*deposit.Offer) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDepositOffer indicates an expected call of GetDepositOffer. -func (mr *MockChainMockRecorder) GetDepositOffer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDepositOffer", reflect.TypeOf((*MockChain)(nil).GetDepositOffer), arg0) -} - -// GetMultisigAlias mocks base method. -func (m *MockChain) GetMultisigAlias(arg0 ids.ShortID) (*multisig.AliasWithNonce, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMultisigAlias", arg0) - ret0, _ := ret[0].(*multisig.AliasWithNonce) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetMultisigAlias indicates an expected call of GetMultisigAlias. -func (mr *MockChainMockRecorder) GetMultisigAlias(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMultisigAlias", reflect.TypeOf((*MockChain)(nil).GetMultisigAlias), arg0) -} - -// GetNotDistributedValidatorReward mocks base method. -func (m *MockChain) GetNotDistributedValidatorReward() (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNotDistributedValidatorReward") - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetNotDistributedValidatorReward indicates an expected call of GetNotDistributedValidatorReward. -func (mr *MockChainMockRecorder) GetNotDistributedValidatorReward() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNotDistributedValidatorReward", reflect.TypeOf((*MockChain)(nil).GetNotDistributedValidatorReward)) -} - -// GetPendingDelegatorIterator mocks base method. -func (m *MockChain) GetPendingDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPendingDelegatorIterator", arg0, arg1) - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPendingDelegatorIterator indicates an expected call of GetPendingDelegatorIterator. -func (mr *MockChainMockRecorder) GetPendingDelegatorIterator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingDelegatorIterator", reflect.TypeOf((*MockChain)(nil).GetPendingDelegatorIterator), arg0, arg1) -} - -// GetPendingStakerIterator mocks base method. -func (m *MockChain) GetPendingStakerIterator() (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPendingStakerIterator") - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPendingStakerIterator indicates an expected call of GetPendingStakerIterator. -func (mr *MockChainMockRecorder) GetPendingStakerIterator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingStakerIterator", reflect.TypeOf((*MockChain)(nil).GetPendingStakerIterator)) -} - -// GetPendingValidator mocks base method. -func (m *MockChain) GetPendingValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPendingValidator", arg0, arg1) - ret0, _ := ret[0].(*Staker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPendingValidator indicates an expected call of GetPendingValidator. -func (mr *MockChainMockRecorder) GetPendingValidator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockChain)(nil).GetPendingValidator), arg0, arg1) -} - -// GetDeferredStakerIterator mocks base method. -func (m *MockChain) GetDeferredStakerIterator() (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDeferredStakerIterator") - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDeferredStakerIterator indicates an expected call of GetDeferredStakerIterator. -func (mr *MockChainMockRecorder) GetDeferredStakerIterator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeferredStakerIterator", reflect.TypeOf((*MockChain)(nil).GetDeferredStakerIterator)) -} - -// GetDeferredValidator mocks base method. -func (m *MockChain) GetDeferredValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDeferredValidator", arg0, arg1) - ret0, _ := ret[0].(*Staker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDeferredValidator indicates an expected call of GetDeferredValidator. -func (mr *MockChainMockRecorder) GetDeferredValidator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeferredValidator", reflect.TypeOf((*MockChain)(nil).GetDeferredValidator), arg0, arg1) -} - -// DeleteDeferredValidator mocks base method. -func (m *MockChain) DeleteDeferredValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteDeferredValidator", arg0) -} - -// DeleteDeferredValidator indicates an expected call of DeleteDeferredValidator. -func (mr *MockChainMockRecorder) DeleteDeferredValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteDeferredValidator", reflect.TypeOf((*MockChain)(nil).DeleteDeferredValidator), arg0) -} - -// PutDeferredValidator mocks base method. -func (m *MockChain) PutDeferredValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutDeferredValidator", arg0) -} - -// PutDeferredValidator indicates an expected call of PutDeferredValidator. -func (mr *MockChainMockRecorder) PutDeferredValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutDeferredValidator", reflect.TypeOf((*MockChain)(nil).PutDeferredValidator), arg0) -} - -// GetRewardUTXOs mocks base method. -func (m *MockChain) GetRewardUTXOs(arg0 ids.ID) ([]*avax.UTXO, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRewardUTXOs", arg0) - ret0, _ := ret[0].([]*avax.UTXO) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetRewardUTXOs indicates an expected call of GetRewardUTXOs. -func (mr *MockChainMockRecorder) GetRewardUTXOs(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRewardUTXOs", reflect.TypeOf((*MockChain)(nil).GetRewardUTXOs), arg0) -} - -// GetSubnetOwner mocks base method. -func (m *MockChain) GetSubnetOwner(arg0 ids.ID) (fx.Owner, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnetOwner", arg0) - ret0, _ := ret[0].(fx.Owner) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSubnetOwner indicates an expected call of GetSubnetOwner. -func (mr *MockChainMockRecorder) GetSubnetOwner(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetOwner", reflect.TypeOf((*MockChain)(nil).GetSubnetOwner), arg0) -} - -// GetShortIDLink mocks base method. -func (m *MockChain) GetShortIDLink(arg0 ids.ShortID, arg1 ShortLinkKey) (ids.ShortID, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetShortIDLink", arg0, arg1) - ret0, _ := ret[0].(ids.ShortID) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetShortIDLink indicates an expected call of GetShortIDLink. -func (mr *MockChainMockRecorder) GetShortIDLink(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShortIDLink", reflect.TypeOf((*MockChain)(nil).GetShortIDLink), arg0, arg1) -} - -// GetSubnetTransformation mocks base method. -func (m *MockChain) GetSubnetTransformation(arg0 ids.ID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnetTransformation", arg0) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSubnetTransformation indicates an expected call of GetSubnetTransformation. -func (mr *MockChainMockRecorder) GetSubnetTransformation(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetTransformation", reflect.TypeOf((*MockChain)(nil).GetSubnetTransformation), arg0) -} - -// GetSubnets mocks base method. -func (m *MockChain) GetSubnets() ([]*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnets") - ret0, _ := ret[0].([]*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSubnets indicates an expected call of GetSubnets. -func (mr *MockChainMockRecorder) GetSubnets() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnets", reflect.TypeOf((*MockChain)(nil).GetSubnets)) -} - -// GetTimestamp mocks base method. -func (m *MockChain) GetTimestamp() time.Time { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTimestamp") - ret0, _ := ret[0].(time.Time) - return ret0 -} - -// GetTimestamp indicates an expected call of GetTimestamp. -func (mr *MockChainMockRecorder) GetTimestamp() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTimestamp", reflect.TypeOf((*MockChain)(nil).GetTimestamp)) -} - -// GetTx mocks base method. -func (m *MockChain) GetTx(arg0 ids.ID) (*txs.Tx, status.Status, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTx", arg0) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(status.Status) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetTx indicates an expected call of GetTx. -func (mr *MockChainMockRecorder) GetTx(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockChain)(nil).GetTx), arg0) -} - -// GetUTXO mocks base method. -func (m *MockChain) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUTXO", arg0) - ret0, _ := ret[0].(*avax.UTXO) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetUTXO indicates an expected call of GetUTXO. -func (mr *MockChainMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockChain)(nil).GetUTXO), arg0) -} - -// LockedUTXOs mocks base method. -func (m *MockChain) LockedUTXOs(arg0 set.Set[ids.ID], arg1 set.Set[ids.ShortID], arg2 locked.State) ([]*avax.UTXO, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LockedUTXOs", arg0, arg1, arg2) - ret0, _ := ret[0].([]*avax.UTXO) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// LockedUTXOs indicates an expected call of LockedUTXOs. -func (mr *MockChainMockRecorder) LockedUTXOs(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LockedUTXOs", reflect.TypeOf((*MockChain)(nil).LockedUTXOs), arg0, arg1, arg2) -} - -// PutCurrentDelegator mocks base method. -func (m *MockChain) PutCurrentDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutCurrentDelegator", arg0) -} - -// PutCurrentDelegator indicates an expected call of PutCurrentDelegator. -func (mr *MockChainMockRecorder) PutCurrentDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentDelegator", reflect.TypeOf((*MockChain)(nil).PutCurrentDelegator), arg0) -} - -// PutCurrentValidator mocks base method. -func (m *MockChain) PutCurrentValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutCurrentValidator", arg0) -} - -// PutCurrentValidator indicates an expected call of PutCurrentValidator. -func (mr *MockChainMockRecorder) PutCurrentValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentValidator", reflect.TypeOf((*MockChain)(nil).PutCurrentValidator), arg0) -} - -// PutPendingDelegator mocks base method. -func (m *MockChain) PutPendingDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutPendingDelegator", arg0) -} - -// PutPendingDelegator indicates an expected call of PutPendingDelegator. -func (mr *MockChainMockRecorder) PutPendingDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingDelegator", reflect.TypeOf((*MockChain)(nil).PutPendingDelegator), arg0) -} - -// PutPendingValidator mocks base method. -func (m *MockChain) PutPendingValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutPendingValidator", arg0) -} - -// PutPendingValidator indicates an expected call of PutPendingValidator. -func (mr *MockChainMockRecorder) PutPendingValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingValidator", reflect.TypeOf((*MockChain)(nil).PutPendingValidator), arg0) -} - -// SetAddressStates mocks base method. -func (m *MockChain) SetAddressStates(arg0 ids.ShortID, arg1 addrstate.AddressState) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetAddressStates", arg0, arg1) -} - -// SetAddressStates indicates an expected call of SetAddressStates. -func (mr *MockChainMockRecorder) SetAddressStates(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAddressStates", reflect.TypeOf((*MockChain)(nil).SetAddressStates), arg0, arg1) -} - -// SetClaimable mocks base method. -func (m *MockChain) SetClaimable(arg0 ids.ID, arg1 *Claimable) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetClaimable", arg0, arg1) -} - -// SetClaimable indicates an expected call of SetClaimable. -func (mr *MockChainMockRecorder) SetClaimable(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetClaimable", reflect.TypeOf((*MockChain)(nil).SetClaimable), arg0, arg1) -} - -// AddProposal mocks base method. -func (m *MockChain) AddProposal(arg0 ids.ID, arg1 dac.ProposalState) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddProposal", arg0, arg1) -} - -// AddProposal indicates an expected call of AddProposal. -func (mr *MockChainMockRecorder) AddProposal(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProposal", reflect.TypeOf((*MockChain)(nil).AddProposal), arg0, arg1) -} - -// ModifyProposal mocks base method. -func (m *MockChain) ModifyProposal(arg0 ids.ID, arg1 dac.ProposalState) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "ModifyProposal", arg0, arg1) -} - -// ModifyProposal indicates an expected call of ModifyProposal. -func (mr *MockChainMockRecorder) ModifyProposal(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyProposal", reflect.TypeOf((*MockChain)(nil).ModifyProposal), arg0, arg1) -} - -// RemoveProposal mocks base method. -func (m *MockChain) RemoveProposal(arg0 ids.ID, arg1 dac.ProposalState) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveProposal", arg0, arg1) -} - -// RemoveProposal indicates an expected call of RemoveProposal. -func (mr *MockChainMockRecorder) RemoveProposal(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveProposal", reflect.TypeOf((*MockChain)(nil).RemoveProposal), arg0, arg1) -} - -// AddProposalIDToFinish mocks base method. -func (m *MockChain) AddProposalIDToFinish(arg0 ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddProposalIDToFinish", arg0) -} - -// AddProposalIDToFinish indicates an expected call of AddProposalIDToFinish. -func (mr *MockChainMockRecorder) AddProposalIDToFinish(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProposalIDToFinish", reflect.TypeOf((*MockChain)(nil).AddProposalIDToFinish), arg0) -} - -// RemoveProposalIDToFinish mocks base method. -func (m *MockChain) RemoveProposalIDToFinish(arg0 ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveProposalIDToFinish", arg0) -} - -// RemoveProposalIDToFinish indicates an expected call of RemoveProposalIDToFinish. -func (mr *MockChainMockRecorder) RemoveProposalIDToFinish(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveProposalIDToFinish", reflect.TypeOf((*MockChain)(nil).RemoveProposalIDToFinish), arg0) -} - -// SetBaseFee mocks base method. -func (m *MockChain) SetBaseFee(arg0 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetBaseFee", arg0) -} - -// SetBaseFee indicates an expected call of SetBaseFee. -func (mr *MockChainMockRecorder) SetBaseFee(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBaseFee", reflect.TypeOf((*MockChain)(nil).SetBaseFee), arg0) -} - -// SetFeeDistribution mocks base method. -func (m *MockChain) SetFeeDistribution(arg0 [dac.FeeDistributionFractionsCount]uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetFeeDistribution", arg0) -} - -// SetFeeDistribution indicates an expected call of SetFeeDistribution. -func (mr *MockChainMockRecorder) SetFeeDistribution(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFeeDistribution", reflect.TypeOf((*MockChain)(nil).SetFeeDistribution), arg0) -} - -// SetCurrentSupply mocks base method. -func (m *MockChain) SetCurrentSupply(arg0 ids.ID, arg1 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetCurrentSupply", arg0, arg1) -} - -// SetCurrentSupply indicates an expected call of SetCurrentSupply. -func (mr *MockChainMockRecorder) SetCurrentSupply(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSupply", reflect.TypeOf((*MockChain)(nil).SetCurrentSupply), arg0, arg1) -} - -// SetDelegateeReward mocks base method. -func (m *MockChain) SetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID, arg2 uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetDelegateeReward", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetDelegateeReward indicates an expected call of SetDelegateeReward. -func (mr *MockChainMockRecorder) SetDelegateeReward(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDelegateeReward", reflect.TypeOf((*MockChain)(nil).SetDelegateeReward), arg0, arg1, arg2) -} - -// SetSubnetOwner mocks base method. -func (m *MockChain) SetSubnetOwner(arg0 ids.ID, arg1 fx.Owner) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSubnetOwner", arg0, arg1) -} - -// SetSubnetOwner indicates an expected call of SetSubnetOwner. -func (mr *MockChainMockRecorder) SetSubnetOwner(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetOwner", reflect.TypeOf((*MockChain)(nil).SetSubnetOwner), arg0, arg1) -} - -// SetLastRewardImportTimestamp mocks base method. -func (m *MockChain) SetLastRewardImportTimestamp(arg0 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetLastRewardImportTimestamp", arg0) -} - -// SetLastRewardImportTimestamp indicates an expected call of SetLastRewardImportTimestamp. -func (mr *MockChainMockRecorder) SetLastRewardImportTimestamp(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastRewardImportTimestamp", reflect.TypeOf((*MockChain)(nil).SetLastRewardImportTimestamp), - arg0) -} - -// SetMultisigAlias mocks base method. -func (m *MockChain) SetMultisigAlias(arg0 ids.ShortID, arg1 *multisig.AliasWithNonce) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetMultisigAlias", arg0, arg1) -} - -// SetMultisigAlias indicates an expected call of SetMultisigAlias. -func (mr *MockChainMockRecorder) SetMultisigAlias(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMultisigAlias", reflect.TypeOf((*MockChain)(nil).SetMultisigAlias), arg0, arg1) -} - -// SetNotDistributedValidatorReward mocks base method. -func (m *MockChain) SetNotDistributedValidatorReward(arg0 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetNotDistributedValidatorReward", arg0) -} - -// SetNotDistributedValidatorReward indicates an expected call of SetNotDistributedValidatorReward. -func (mr *MockChainMockRecorder) SetNotDistributedValidatorReward(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNotDistributedValidatorReward", reflect.TypeOf((*MockChain)(nil).SetNotDistributedValidatorReward), arg0) -} - -// SetShortIDLink mocks base method. -func (m *MockChain) SetShortIDLink(arg0 ids.ShortID, arg1 ShortLinkKey, arg2 *ids.ShortID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetShortIDLink", arg0, arg1, arg2) -} - -// SetShortIDLink indicates an expected call of SetShortIDLink. -func (mr *MockChainMockRecorder) SetShortIDLink(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetShortIDLink", reflect.TypeOf((*MockChain)(nil).SetShortIDLink), arg0, arg1, arg2) -} - -// SetTimestamp mocks base method. -func (m *MockChain) SetTimestamp(arg0 time.Time) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetTimestamp", arg0) -} - -// SetTimestamp indicates an expected call of SetTimestamp. -func (mr *MockChainMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockChain)(nil).SetTimestamp), arg0) -} - -// AddDeposit mocks base method. -func (m *MockChain) AddDeposit(arg0 ids.ID, arg1 *deposit.Deposit) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddDeposit", arg0, arg1) -} - -// AddDeposit indicates an expected call of AddDeposit. -func (mr *MockChainMockRecorder) AddDeposit(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDeposit", reflect.TypeOf((*MockChain)(nil).AddDeposit), arg0, arg1) -} - -// ModifyDeposit mocks base method. -func (m *MockChain) ModifyDeposit(arg0 ids.ID, arg1 *deposit.Deposit) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "ModifyDeposit", arg0, arg1) -} - -// ModifyDeposit indicates an expected call of ModifyDeposit. -func (mr *MockChainMockRecorder) ModifyDeposit(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyDeposit", reflect.TypeOf((*MockChain)(nil).ModifyDeposit), arg0, arg1) -} - -// RemoveDeposit mocks base method. -func (m *MockChain) RemoveDeposit(arg0 ids.ID, arg1 *deposit.Deposit) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveDeposit", arg0, arg1) -} - -// RemoveDeposit indicates an expected call of RemoveDeposit. -func (mr *MockChainMockRecorder) RemoveDeposit(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveDeposit", reflect.TypeOf((*MockChain)(nil).RemoveDeposit), arg0, arg1) -} diff --git a/vms/platformvm/state/mock_diff.go b/vms/platformvm/state/mock_diff.go deleted file mode 100644 index a33660d98af6..000000000000 --- a/vms/platformvm/state/mock_diff.go +++ /dev/null @@ -1,1096 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: Diff) - -// Package state is a generated GoMock package. -package state - -import ( - reflect "reflect" - time "time" - - ids "github.com/ava-labs/avalanchego/ids" - set "github.com/ava-labs/avalanchego/utils/set" - avax "github.com/ava-labs/avalanchego/vms/components/avax" - multisig "github.com/ava-labs/avalanchego/vms/components/multisig" - addrstate "github.com/ava-labs/avalanchego/vms/platformvm/addrstate" - config "github.com/ava-labs/avalanchego/vms/platformvm/config" - dac "github.com/ava-labs/avalanchego/vms/platformvm/dac" - deposit "github.com/ava-labs/avalanchego/vms/platformvm/deposit" - fx "github.com/ava-labs/avalanchego/vms/platformvm/fx" - locked "github.com/ava-labs/avalanchego/vms/platformvm/locked" - status "github.com/ava-labs/avalanchego/vms/platformvm/status" - txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" - gomock "go.uber.org/mock/gomock" -) - -// MockDiff is a mock of Diff interface. -type MockDiff struct { - ctrl *gomock.Controller - recorder *MockDiffMockRecorder -} - -// MockDiffMockRecorder is the mock recorder for MockDiff. -type MockDiffMockRecorder struct { - mock *MockDiff -} - -// NewMockDiff creates a new mock instance. -func NewMockDiff(ctrl *gomock.Controller) *MockDiff { - mock := &MockDiff{ctrl: ctrl} - mock.recorder = &MockDiffMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDiff) EXPECT() *MockDiffMockRecorder { - return m.recorder -} - -// AddChain mocks base method. -func (m *MockDiff) AddChain(arg0 *txs.Tx) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddChain", arg0) -} - -// AddChain indicates an expected call of AddChain. -func (mr *MockDiffMockRecorder) AddChain(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockDiff)(nil).AddChain), arg0) -} - -// SetDepositOffer mocks base method. -func (m *MockDiff) SetDepositOffer(arg0 *deposit.Offer) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetDepositOffer", arg0) -} - -// SetDepositOffer indicates an expected call of SetDepositOffer. -func (mr *MockDiffMockRecorder) SetDepositOffer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDepositOffer", reflect.TypeOf((*MockDiff)(nil).SetDepositOffer), arg0) -} - -// AddRewardUTXO mocks base method. -func (m *MockDiff) AddRewardUTXO(arg0 ids.ID, arg1 *avax.UTXO) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddRewardUTXO", arg0, arg1) -} - -// AddRewardUTXO indicates an expected call of AddRewardUTXO. -func (mr *MockDiffMockRecorder) AddRewardUTXO(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRewardUTXO", reflect.TypeOf((*MockDiff)(nil).AddRewardUTXO), arg0, arg1) -} - -// AddSubnet mocks base method. -func (m *MockDiff) AddSubnet(arg0 *txs.Tx) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddSubnet", arg0) -} - -// AddSubnet indicates an expected call of AddSubnet. -func (mr *MockDiffMockRecorder) AddSubnet(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnet", reflect.TypeOf((*MockDiff)(nil).AddSubnet), arg0) -} - -// AddSubnetTransformation mocks base method. -func (m *MockDiff) AddSubnetTransformation(arg0 *txs.Tx) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddSubnetTransformation", arg0) -} - -// AddSubnetTransformation indicates an expected call of AddSubnetTransformation. -func (mr *MockDiffMockRecorder) AddSubnetTransformation(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnetTransformation", reflect.TypeOf((*MockDiff)(nil).AddSubnetTransformation), arg0) -} - -// AddTx mocks base method. -func (m *MockDiff) AddTx(arg0 *txs.Tx, arg1 status.Status) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddTx", arg0, arg1) -} - -// AddTx indicates an expected call of AddTx. -func (mr *MockDiffMockRecorder) AddTx(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockDiff)(nil).AddTx), arg0, arg1) -} - -// AddUTXO mocks base method. -func (m *MockDiff) AddUTXO(arg0 *avax.UTXO) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddUTXO", arg0) -} - -// AddUTXO indicates an expected call of AddUTXO. -func (mr *MockDiffMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockDiff)(nil).AddUTXO), arg0) -} - -// Apply mocks base method. -func (m *MockDiff) Apply(arg0 State) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Apply", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Apply indicates an expected call of Apply. -func (mr *MockDiffMockRecorder) Apply(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockDiff)(nil).Apply), arg0) -} - -// ApplyCaminoState mocks base method. -func (m *MockDiff) ApplyCaminoState(arg0 State) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ApplyCaminoState", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// ApplyCaminoState indicates an expected call of ApplyCaminoState. -func (mr *MockDiffMockRecorder) ApplyCaminoState(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyCaminoState", reflect.TypeOf((*MockDiff)(nil).ApplyCaminoState), arg0) -} - -// CaminoConfig mocks base method. -func (m *MockDiff) CaminoConfig() (*CaminoConfig, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CaminoConfig") - ret0, _ := ret[0].(*CaminoConfig) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CaminoConfig indicates an expected call of CaminoConfig. -func (mr *MockDiffMockRecorder) CaminoConfig() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CaminoConfig", reflect.TypeOf((*MockDiff)(nil).CaminoConfig)) -} - -// Config mocks base method. -func (m *MockDiff) Config() (*config.Config, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Config") - ret0, _ := ret[0].(*config.Config) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Config indicates an expected call of Config. -func (mr *MockDiffMockRecorder) Config() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Config", reflect.TypeOf((*MockDiff)(nil).Config)) -} - -// DeleteCurrentDelegator mocks base method. -func (m *MockDiff) DeleteCurrentDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteCurrentDelegator", arg0) -} - -// DeleteCurrentDelegator indicates an expected call of DeleteCurrentDelegator. -func (mr *MockDiffMockRecorder) DeleteCurrentDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentDelegator", reflect.TypeOf((*MockDiff)(nil).DeleteCurrentDelegator), arg0) -} - -// DeleteCurrentValidator mocks base method. -func (m *MockDiff) DeleteCurrentValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteCurrentValidator", arg0) -} - -// DeleteCurrentValidator indicates an expected call of DeleteCurrentValidator. -func (mr *MockDiffMockRecorder) DeleteCurrentValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentValidator", reflect.TypeOf((*MockDiff)(nil).DeleteCurrentValidator), arg0) -} - -// DeletePendingDelegator mocks base method. -func (m *MockDiff) DeletePendingDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeletePendingDelegator", arg0) -} - -// DeletePendingDelegator indicates an expected call of DeletePendingDelegator. -func (mr *MockDiffMockRecorder) DeletePendingDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingDelegator", reflect.TypeOf((*MockDiff)(nil).DeletePendingDelegator), arg0) -} - -// DeletePendingValidator mocks base method. -func (m *MockDiff) DeletePendingValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeletePendingValidator", arg0) -} - -// DeletePendingValidator indicates an expected call of DeletePendingValidator. -func (mr *MockDiffMockRecorder) DeletePendingValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingValidator", reflect.TypeOf((*MockDiff)(nil).DeletePendingValidator), arg0) -} - -// DeleteUTXO mocks base method. -func (m *MockDiff) DeleteUTXO(arg0 ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteUTXO", arg0) -} - -// DeleteUTXO indicates an expected call of DeleteUTXO. -func (mr *MockDiffMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockDiff)(nil).DeleteUTXO), arg0) -} - -// GetAddressStates mocks base method. -func (m *MockDiff) GetAddressStates(arg0 ids.ShortID) (addrstate.AddressState, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAddressStates", arg0) - ret0, _ := ret[0].(addrstate.AddressState) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAddressStates indicates an expected call of GetAddressStates. -func (mr *MockDiffMockRecorder) GetAddressStates(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAddressStates", reflect.TypeOf((*MockDiff)(nil).GetAddressStates), arg0) -} - -// GetAllDepositOffers mocks base method. -func (m *MockDiff) GetAllDepositOffers() ([]*deposit.Offer, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAllDepositOffers") - ret0, _ := ret[0].([]*deposit.Offer) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAllDepositOffers indicates an expected call of GetAllDepositOffers. -func (mr *MockDiffMockRecorder) GetAllDepositOffers() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllDepositOffers", reflect.TypeOf((*MockDiff)(nil).GetAllDepositOffers)) -} - -// GetChains mocks base method. -func (m *MockDiff) GetChains(arg0 ids.ID) ([]*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetChains", arg0) - ret0, _ := ret[0].([]*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetChains indicates an expected call of GetChains. -func (mr *MockDiffMockRecorder) GetChains(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChains", reflect.TypeOf((*MockDiff)(nil).GetChains), arg0) -} - -// GetClaimable mocks base method. -func (m *MockDiff) GetClaimable(arg0 ids.ID) (*Claimable, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetClaimable", arg0) - ret0, _ := ret[0].(*Claimable) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetClaimable indicates an expected call of GetClaimable. -func (mr *MockDiffMockRecorder) GetClaimable(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClaimable", reflect.TypeOf((*MockDiff)(nil).GetClaimable), arg0) -} - -// GetProposal mocks base method. -func (m *MockDiff) GetProposal(arg0 ids.ID) (dac.ProposalState, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProposal", arg0) - ret0, _ := ret[0].(dac.ProposalState) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetProposal indicates an expected call of GetProposal. -func (mr *MockDiffMockRecorder) GetProposal(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposal", reflect.TypeOf((*MockDiff)(nil).GetProposal), arg0) -} - -// GetBaseFee mocks base method. -func (m *MockDiff) GetBaseFee() (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBaseFee") - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBaseFee indicates an expected call of GetBaseFee. -func (mr *MockDiffMockRecorder) GetBaseFee() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBaseFee", reflect.TypeOf((*MockDiff)(nil).GetBaseFee)) -} - -// GetFeeDistribution mocks base method. -func (m *MockDiff) GetFeeDistribution() ([dac.FeeDistributionFractionsCount]uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFeeDistribution") - ret0, _ := ret[0].([dac.FeeDistributionFractionsCount]uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetFeeDistribution indicates an expected call of GetFeeDistribution. -func (mr *MockDiffMockRecorder) GetFeeDistribution() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFeeDistribution", reflect.TypeOf((*MockDiff)(nil).GetFeeDistribution)) -} - -// GetCurrentDelegatorIterator mocks base method. -func (m *MockDiff) GetCurrentDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentDelegatorIterator", arg0, arg1) - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentDelegatorIterator indicates an expected call of GetCurrentDelegatorIterator. -func (mr *MockDiffMockRecorder) GetCurrentDelegatorIterator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentDelegatorIterator", reflect.TypeOf((*MockDiff)(nil).GetCurrentDelegatorIterator), arg0, arg1) -} - -// GetCurrentStakerIterator mocks base method. -func (m *MockDiff) GetCurrentStakerIterator() (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentStakerIterator") - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentStakerIterator indicates an expected call of GetCurrentStakerIterator. -func (mr *MockDiffMockRecorder) GetCurrentStakerIterator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentStakerIterator", reflect.TypeOf((*MockDiff)(nil).GetCurrentStakerIterator)) -} - -// GetCurrentSupply mocks base method. -func (m *MockDiff) GetCurrentSupply(arg0 ids.ID) (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentSupply", arg0) - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentSupply indicates an expected call of GetCurrentSupply. -func (mr *MockDiffMockRecorder) GetCurrentSupply(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSupply", reflect.TypeOf((*MockDiff)(nil).GetCurrentSupply), arg0) -} - -// GetCurrentValidator mocks base method. -func (m *MockDiff) GetCurrentValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentValidator", arg0, arg1) - ret0, _ := ret[0].(*Staker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentValidator indicates an expected call of GetCurrentValidator. -func (mr *MockDiffMockRecorder) GetCurrentValidator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidator", reflect.TypeOf((*MockDiff)(nil).GetCurrentValidator), arg0, arg1) -} - -// GetDelegateeReward mocks base method. -func (m *MockDiff) GetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID) (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDelegateeReward", arg0, arg1) - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDelegateeReward indicates an expected call of GetDelegateeReward. -func (mr *MockDiffMockRecorder) GetDelegateeReward(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockDiff)(nil).GetDelegateeReward), arg0, arg1) -} - -// GetDeposit mocks base method. -func (m *MockDiff) GetDeposit(arg0 ids.ID) (*deposit.Deposit, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDeposit", arg0) - ret0, _ := ret[0].(*deposit.Deposit) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDeposit indicates an expected call of GetDeposit. -func (mr *MockDiffMockRecorder) GetDeposit(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeposit", reflect.TypeOf((*MockDiff)(nil).GetDeposit), arg0) -} - -// GetNextToUnlockDepositTime mocks base method. -func (m *MockDiff) GetNextToUnlockDepositTime(arg0 set.Set[ids.ID]) (time.Time, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNextToUnlockDepositTime", arg0) - ret0, _ := ret[0].(time.Time) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetNextToUnlockDepositTime indicates an expected call of GetNextToUnlockDepositTime. -func (mr *MockDiffMockRecorder) GetNextToUnlockDepositTime(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToUnlockDepositTime", reflect.TypeOf((*MockDiff)(nil).GetNextToUnlockDepositTime), arg0) -} - -// GetNextToUnlockDepositIDsAndTime mocks base method. -func (m *MockDiff) GetNextToUnlockDepositIDsAndTime(arg0 set.Set[ids.ID]) ([]ids.ID, time.Time, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNextToUnlockDepositIDsAndTime", arg0) - ret0, _ := ret[0].([]ids.ID) - ret1, _ := ret[1].(time.Time) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetNextToUnlockDepositIDsAndTime indicates an expected call of GetNextToUnlockDepositIDsAndTime. -func (mr *MockDiffMockRecorder) GetNextToUnlockDepositIDsAndTime(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToUnlockDepositIDsAndTime", reflect.TypeOf((*MockDiff)(nil).GetNextToUnlockDepositIDsAndTime), arg0) -} - -// GetNextProposalExpirationTime mocks base method. -func (m *MockDiff) GetNextProposalExpirationTime(arg0 set.Set[ids.ID]) (time.Time, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNextProposalExpirationTime", arg0) - ret0, _ := ret[0].(time.Time) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetNextProposalExpirationTime indicates an expected call of GetNextProposalExpirationTime. -func (mr *MockDiffMockRecorder) GetNextProposalExpirationTime(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextProposalExpirationTime", reflect.TypeOf((*MockDiff)(nil).GetNextProposalExpirationTime), arg0) -} - -// GetNextToExpireProposalIDsAndTime mocks base method. -func (m *MockDiff) GetNextToExpireProposalIDsAndTime(arg0 set.Set[ids.ID]) ([]ids.ID, time.Time, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNextToExpireProposalIDsAndTime", arg0) - ret0, _ := ret[0].([]ids.ID) - ret1, _ := ret[1].(time.Time) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetNextToExpireProposalIDsAndTime indicates an expected call of GetNextToExpireProposalIDsAndTime. -func (mr *MockDiffMockRecorder) GetNextToExpireProposalIDsAndTime(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToExpireProposalIDsAndTime", reflect.TypeOf((*MockDiff)(nil).GetNextToExpireProposalIDsAndTime), arg0) -} - -// GetProposalIDsToFinish mocks base method. -func (m *MockDiff) GetProposalIDsToFinish() ([]ids.ID, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProposalIDsToFinish") - ret0, _ := ret[0].([]ids.ID) - ret2, _ := ret[1].(error) - return ret0, ret2 -} - -// GetProposalIDsToFinish indicates an expected call of GetProposalIDsToFinish. -func (mr *MockDiffMockRecorder) GetProposalIDsToFinish() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposalIDsToFinish", reflect.TypeOf((*MockDiff)(nil).GetProposalIDsToFinish)) -} - -// GetProposalIterator mocks base method. -func (m *MockDiff) GetProposalIterator() (ProposalsIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProposalIterator") - ret0, _ := ret[0].(ProposalsIterator) - ret2, _ := ret[1].(error) - return ret0, ret2 -} - -// GetProposalIterator indicates an expected call of GetProposalIterator. -func (mr *MockDiffMockRecorder) GetProposalIterator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposalIterator", reflect.TypeOf((*MockDiff)(nil).GetProposalIterator)) -} - -// GetDepositOffer mocks base method. -func (m *MockDiff) GetDepositOffer(arg0 ids.ID) (*deposit.Offer, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDepositOffer", arg0) - ret0, _ := ret[0].(*deposit.Offer) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDepositOffer indicates an expected call of GetDepositOffer. -func (mr *MockDiffMockRecorder) GetDepositOffer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDepositOffer", reflect.TypeOf((*MockDiff)(nil).GetDepositOffer), arg0) -} - -// GetMultisigAlias mocks base method. -func (m *MockDiff) GetMultisigAlias(arg0 ids.ShortID) (*multisig.AliasWithNonce, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMultisigAlias", arg0) - ret0, _ := ret[0].(*multisig.AliasWithNonce) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetMultisigAlias indicates an expected call of GetMultisigAlias. -func (mr *MockDiffMockRecorder) GetMultisigAlias(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMultisigAlias", reflect.TypeOf((*MockDiff)(nil).GetMultisigAlias), arg0) -} - -// GetNotDistributedValidatorReward mocks base method. -func (m *MockDiff) GetNotDistributedValidatorReward() (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNotDistributedValidatorReward") - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetNotDistributedValidatorReward indicates an expected call of GetNotDistributedValidatorReward. -func (mr *MockDiffMockRecorder) GetNotDistributedValidatorReward() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNotDistributedValidatorReward", reflect.TypeOf((*MockDiff)(nil).GetNotDistributedValidatorReward)) -} - -// GetPendingDelegatorIterator mocks base method. -func (m *MockDiff) GetPendingDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPendingDelegatorIterator", arg0, arg1) - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPendingDelegatorIterator indicates an expected call of GetPendingDelegatorIterator. -func (mr *MockDiffMockRecorder) GetPendingDelegatorIterator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingDelegatorIterator", reflect.TypeOf((*MockDiff)(nil).GetPendingDelegatorIterator), arg0, arg1) -} - -// GetPendingStakerIterator mocks base method. -func (m *MockDiff) GetPendingStakerIterator() (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPendingStakerIterator") - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPendingStakerIterator indicates an expected call of GetPendingStakerIterator. -func (mr *MockDiffMockRecorder) GetPendingStakerIterator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingStakerIterator", reflect.TypeOf((*MockDiff)(nil).GetPendingStakerIterator)) -} - -// GetPendingValidator mocks base method. -func (m *MockDiff) GetPendingValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPendingValidator", arg0, arg1) - ret0, _ := ret[0].(*Staker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPendingValidator indicates an expected call of GetPendingValidator. -func (mr *MockDiffMockRecorder) GetPendingValidator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockDiff)(nil).GetPendingValidator), arg0, arg1) -} - -// GetDeferredStakerIterator mocks base method. -func (m *MockDiff) GetDeferredStakerIterator() (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDeferredStakerIterator") - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDeferredStakerIterator indicates an expected call of GetDeferredStakerIterator. -func (mr *MockDiffMockRecorder) GetDeferredStakerIterator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeferredStakerIterator", reflect.TypeOf((*MockDiff)(nil).GetDeferredStakerIterator)) -} - -// GetDeferredValidator mocks base method. -func (m *MockDiff) GetDeferredValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDeferredValidator", arg0, arg1) - ret0, _ := ret[0].(*Staker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDeferredValidator indicates an expected call of GetDeferredValidator. -func (mr *MockDiffMockRecorder) GetDeferredValidator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeferredValidator", reflect.TypeOf((*MockDiff)(nil).GetDeferredValidator), arg0, arg1) -} - -// DeleteDeferredValidator mocks base method. -func (m *MockDiff) DeleteDeferredValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteDeferredValidator", arg0) -} - -// DeleteDeferredValidator indicates an expected call of DeleteDeferredValidator. -func (mr *MockDiffMockRecorder) DeleteDeferredValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteDeferredValidator", reflect.TypeOf((*MockDiff)(nil).DeleteDeferredValidator), arg0) -} - -// PutDeferredValidator mocks base method. -func (m *MockDiff) PutDeferredValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutDeferredValidator", arg0) -} - -// PutDeferredValidator indicates an expected call of PutDeferredValidator. -func (mr *MockDiffMockRecorder) PutDeferredValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutDeferredValidator", reflect.TypeOf((*MockDiff)(nil).PutDeferredValidator), arg0) -} - -// GetRewardUTXOs mocks base method. -func (m *MockDiff) GetRewardUTXOs(arg0 ids.ID) ([]*avax.UTXO, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRewardUTXOs", arg0) - ret0, _ := ret[0].([]*avax.UTXO) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetRewardUTXOs indicates an expected call of GetRewardUTXOs. -func (mr *MockDiffMockRecorder) GetRewardUTXOs(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRewardUTXOs", reflect.TypeOf((*MockDiff)(nil).GetRewardUTXOs), arg0) -} - -// GetSubnetOwner mocks base method. -func (m *MockDiff) GetSubnetOwner(arg0 ids.ID) (fx.Owner, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnetOwner", arg0) - ret0, _ := ret[0].(fx.Owner) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSubnetOwner indicates an expected call of GetSubnetOwner. -func (mr *MockDiffMockRecorder) GetSubnetOwner(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetOwner", reflect.TypeOf((*MockDiff)(nil).GetSubnetOwner), arg0) -} - -// GetShortIDLink mocks base method. -func (m *MockDiff) GetShortIDLink(arg0 ids.ShortID, arg1 ShortLinkKey) (ids.ShortID, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetShortIDLink", arg0, arg1) - ret0, _ := ret[0].(ids.ShortID) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetShortIDLink indicates an expected call of GetShortIDLink. -func (mr *MockDiffMockRecorder) GetShortIDLink(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShortIDLink", reflect.TypeOf((*MockDiff)(nil).GetShortIDLink), arg0, arg1) -} - -// GetSubnetTransformation mocks base method. -func (m *MockDiff) GetSubnetTransformation(arg0 ids.ID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnetTransformation", arg0) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSubnetTransformation indicates an expected call of GetSubnetTransformation. -func (mr *MockDiffMockRecorder) GetSubnetTransformation(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetTransformation", reflect.TypeOf((*MockDiff)(nil).GetSubnetTransformation), arg0) -} - -// GetSubnets mocks base method. -func (m *MockDiff) GetSubnets() ([]*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnets") - ret0, _ := ret[0].([]*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSubnets indicates an expected call of GetSubnets. -func (mr *MockDiffMockRecorder) GetSubnets() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnets", reflect.TypeOf((*MockDiff)(nil).GetSubnets)) -} - -// GetTimestamp mocks base method. -func (m *MockDiff) GetTimestamp() time.Time { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTimestamp") - ret0, _ := ret[0].(time.Time) - return ret0 -} - -// GetTimestamp indicates an expected call of GetTimestamp. -func (mr *MockDiffMockRecorder) GetTimestamp() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTimestamp", reflect.TypeOf((*MockDiff)(nil).GetTimestamp)) -} - -// GetTx mocks base method. -func (m *MockDiff) GetTx(arg0 ids.ID) (*txs.Tx, status.Status, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTx", arg0) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(status.Status) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetTx indicates an expected call of GetTx. -func (mr *MockDiffMockRecorder) GetTx(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockDiff)(nil).GetTx), arg0) -} - -// GetUTXO mocks base method. -func (m *MockDiff) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUTXO", arg0) - ret0, _ := ret[0].(*avax.UTXO) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetUTXO indicates an expected call of GetUTXO. -func (mr *MockDiffMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockDiff)(nil).GetUTXO), arg0) -} - -// LockedUTXOs mocks base method. -func (m *MockDiff) LockedUTXOs(arg0 set.Set[ids.ID], arg1 set.Set[ids.ShortID], arg2 locked.State) ([]*avax.UTXO, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LockedUTXOs", arg0, arg1, arg2) - ret0, _ := ret[0].([]*avax.UTXO) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// LockedUTXOs indicates an expected call of LockedUTXOs. -func (mr *MockDiffMockRecorder) LockedUTXOs(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LockedUTXOs", reflect.TypeOf((*MockDiff)(nil).LockedUTXOs), arg0, arg1, arg2) -} - -// PutCurrentDelegator mocks base method. -func (m *MockDiff) PutCurrentDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutCurrentDelegator", arg0) -} - -// PutCurrentDelegator indicates an expected call of PutCurrentDelegator. -func (mr *MockDiffMockRecorder) PutCurrentDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentDelegator", reflect.TypeOf((*MockDiff)(nil).PutCurrentDelegator), arg0) -} - -// PutCurrentValidator mocks base method. -func (m *MockDiff) PutCurrentValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutCurrentValidator", arg0) -} - -// PutCurrentValidator indicates an expected call of PutCurrentValidator. -func (mr *MockDiffMockRecorder) PutCurrentValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentValidator", reflect.TypeOf((*MockDiff)(nil).PutCurrentValidator), arg0) -} - -// PutPendingDelegator mocks base method. -func (m *MockDiff) PutPendingDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutPendingDelegator", arg0) -} - -// PutPendingDelegator indicates an expected call of PutPendingDelegator. -func (mr *MockDiffMockRecorder) PutPendingDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingDelegator", reflect.TypeOf((*MockDiff)(nil).PutPendingDelegator), arg0) -} - -// PutPendingValidator mocks base method. -func (m *MockDiff) PutPendingValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutPendingValidator", arg0) -} - -// PutPendingValidator indicates an expected call of PutPendingValidator. -func (mr *MockDiffMockRecorder) PutPendingValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingValidator", reflect.TypeOf((*MockDiff)(nil).PutPendingValidator), arg0) -} - -// SetAddressStates mocks base method. -func (m *MockDiff) SetAddressStates(arg0 ids.ShortID, arg1 addrstate.AddressState) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetAddressStates", arg0, arg1) -} - -// SetAddressStates indicates an expected call of SetAddressStates. -func (mr *MockDiffMockRecorder) SetAddressStates(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAddressStates", reflect.TypeOf((*MockDiff)(nil).SetAddressStates), arg0, arg1) -} - -// SetClaimable mocks base method. -func (m *MockDiff) SetClaimable(arg0 ids.ID, arg1 *Claimable) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetClaimable", arg0, arg1) -} - -// SetClaimable indicates an expected call of SetClaimable. -func (mr *MockDiffMockRecorder) SetClaimable(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetClaimable", reflect.TypeOf((*MockDiff)(nil).SetClaimable), arg0, arg1) -} - -// AddProposal mocks base method. -func (m *MockDiff) AddProposal(arg0 ids.ID, arg1 dac.ProposalState) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddProposal", arg0, arg1) -} - -// AddProposal indicates an expected call of AddProposal. -func (mr *MockDiffMockRecorder) AddProposal(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProposal", reflect.TypeOf((*MockDiff)(nil).AddProposal), arg0, arg1) -} - -// ModifyProposal mocks base method. -func (m *MockDiff) ModifyProposal(arg0 ids.ID, arg1 dac.ProposalState) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "ModifyProposal", arg0, arg1) -} - -// ModifyProposal indicates an expected call of ModifyProposal. -func (mr *MockDiffMockRecorder) ModifyProposal(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyProposal", reflect.TypeOf((*MockDiff)(nil).ModifyProposal), arg0, arg1) -} - -// RemoveProposal mocks base method. -func (m *MockDiff) RemoveProposal(arg0 ids.ID, arg1 dac.ProposalState) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveProposal", arg0, arg1) -} - -// RemoveProposal indicates an expected call of RemoveProposal. -func (mr *MockDiffMockRecorder) RemoveProposal(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveProposal", reflect.TypeOf((*MockDiff)(nil).RemoveProposal), arg0, arg1) -} - -// AddProposalIDToFinish mocks base method. -func (m *MockDiff) AddProposalIDToFinish(arg0 ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddProposalIDToFinish", arg0) -} - -// AddProposalIDToFinish indicates an expected call of AddProposalIDToFinish. -func (mr *MockDiffMockRecorder) AddProposalIDToFinish(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProposalIDToFinish", reflect.TypeOf((*MockDiff)(nil).AddProposalIDToFinish), arg0) -} - -// RemoveProposalIDToFinish mocks base method. -func (m *MockDiff) RemoveProposalIDToFinish(arg0 ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveProposalIDToFinish", arg0) -} - -// RemoveProposalIDToFinish indicates an expected call of RemoveProposalIDToFinish. -func (mr *MockDiffMockRecorder) RemoveProposalIDToFinish(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveProposalIDToFinish", reflect.TypeOf((*MockDiff)(nil).RemoveProposalIDToFinish), arg0) -} - -// SetBaseFee mocks base method. -func (m *MockDiff) SetBaseFee(arg0 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetBaseFee", arg0) -} - -// SetBaseFee indicates an expected call of SetBaseFee. -func (mr *MockDiffMockRecorder) SetBaseFee(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBaseFee", reflect.TypeOf((*MockDiff)(nil).SetBaseFee), arg0) -} - -// SetFeeDistribution mocks base method. -func (m *MockDiff) SetFeeDistribution(arg0 [dac.FeeDistributionFractionsCount]uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetFeeDistribution", arg0) -} - -// SetFeeDistribution indicates an expected call of SetFeeDistribution. -func (mr *MockDiffMockRecorder) SetFeeDistribution(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFeeDistribution", reflect.TypeOf((*MockDiff)(nil).SetFeeDistribution), arg0) -} - -// SetCurrentSupply mocks base method. -func (m *MockDiff) SetCurrentSupply(arg0 ids.ID, arg1 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetCurrentSupply", arg0, arg1) -} - -// SetCurrentSupply indicates an expected call of SetCurrentSupply. -func (mr *MockDiffMockRecorder) SetCurrentSupply(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSupply", reflect.TypeOf((*MockDiff)(nil).SetCurrentSupply), arg0, arg1) -} - -// SetDelegateeReward mocks base method. -func (m *MockDiff) SetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID, arg2 uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetDelegateeReward", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetDelegateeReward indicates an expected call of SetDelegateeReward. -func (mr *MockDiffMockRecorder) SetDelegateeReward(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDelegateeReward", reflect.TypeOf((*MockDiff)(nil).SetDelegateeReward), arg0, arg1, arg2) -} - -// SetSubnetOwner mocks base method. -func (m *MockDiff) SetSubnetOwner(arg0 ids.ID, arg1 fx.Owner) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSubnetOwner", arg0, arg1) -} - -// SetSubnetOwner indicates an expected call of SetSubnetOwner. -func (mr *MockDiffMockRecorder) SetSubnetOwner(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetOwner", reflect.TypeOf((*MockDiff)(nil).SetSubnetOwner), arg0, arg1) -} - -// SetMultisigAlias mocks base method. -func (m *MockDiff) SetMultisigAlias(arg0 ids.ShortID, arg1 *multisig.AliasWithNonce) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetMultisigAlias", arg0, arg1) -} - -// SetMultisigAlias indicates an expected call of SetMultisigAlias. -func (mr *MockDiffMockRecorder) SetMultisigAlias(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMultisigAlias", reflect.TypeOf((*MockDiff)(nil).SetMultisigAlias), arg0, arg1) -} - -// SetNotDistributedValidatorReward mocks base method. -func (m *MockDiff) SetNotDistributedValidatorReward(arg0 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetNotDistributedValidatorReward", arg0) -} - -// SetNotDistributedValidatorReward indicates an expected call of SetNotDistributedValidatorReward. -func (mr *MockDiffMockRecorder) SetNotDistributedValidatorReward(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNotDistributedValidatorReward", reflect.TypeOf((*MockDiff)(nil).SetNotDistributedValidatorReward), arg0) -} - -// SetShortIDLink mocks base method. -func (m *MockDiff) SetShortIDLink(arg0 ids.ShortID, arg1 ShortLinkKey, arg2 *ids.ShortID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetShortIDLink", arg0, arg1, arg2) -} - -// SetShortIDLink indicates an expected call of SetShortIDLink. -func (mr *MockDiffMockRecorder) SetShortIDLink(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetShortIDLink", reflect.TypeOf((*MockDiff)(nil).SetShortIDLink), arg0, arg1, arg2) -} - -// SetTimestamp mocks base method. -func (m *MockDiff) SetTimestamp(arg0 time.Time) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetTimestamp", arg0) -} - -// SetTimestamp indicates an expected call of SetTimestamp. -func (mr *MockDiffMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockDiff)(nil).SetTimestamp), arg0) -} - -// AddDeposit mocks base method. -func (m *MockDiff) AddDeposit(arg0 ids.ID, arg1 *deposit.Deposit) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddDeposit", arg0, arg1) -} - -// AddDeposit indicates an expected call of AddDeposit. -func (mr *MockDiffMockRecorder) AddDeposit(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDeposit", reflect.TypeOf((*MockDiff)(nil).AddDeposit), arg0, arg1) -} - -// ModifyDeposit mocks base method. -func (m *MockDiff) ModifyDeposit(arg0 ids.ID, arg1 *deposit.Deposit) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "ModifyDeposit", arg0, arg1) -} - -// ModifyDeposit indicates an expected call of ModifyDeposit. -func (mr *MockDiffMockRecorder) ModifyDeposit(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyDeposit", reflect.TypeOf((*MockDiff)(nil).ModifyDeposit), arg0, arg1) -} - -// RemoveDeposit mocks base method. -func (m *MockDiff) RemoveDeposit(arg0 ids.ID, arg1 *deposit.Deposit) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveDeposit", arg0, arg1) -} - -// RemoveDeposit indicates an expected call of RemoveDeposit. -func (mr *MockDiffMockRecorder) RemoveDeposit(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveDeposit", reflect.TypeOf((*MockDiff)(nil).RemoveDeposit), arg0, arg1) -} diff --git a/vms/platformvm/state/mock_proposals_iterator.go b/vms/platformvm/state/mock_proposals_iterator.go index 99120f95922c..ad4e61131311 100644 --- a/vms/platformvm/state/mock_proposals_iterator.go +++ b/vms/platformvm/state/mock_proposals_iterator.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: ProposalsIterator) +// +// Generated by this command: +// +// mockgen -package=state -destination=vms/platformvm/state/mock_proposals_iterator.go github.com/ava-labs/avalanchego/vms/platformvm/state ProposalsIterator +// // Package state is a generated GoMock package. package state diff --git a/vms/platformvm/state/mock_staker_iterator.go b/vms/platformvm/state/mock_staker_iterator.go index 6ef7e9fb2d51..62ba31d8b1c6 100644 --- a/vms/platformvm/state/mock_staker_iterator.go +++ b/vms/platformvm/state/mock_staker_iterator.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: StakerIterator) +// +// Generated by this command: +// +// mockgen -package=state -destination=vms/platformvm/state/mock_staker_iterator.go github.com/ava-labs/avalanchego/vms/platformvm/state StakerIterator +// // Package state is a generated GoMock package. package state diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index 362bc99443fb..ae2482892b33 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: State) +// Source: vms/platformvm/state/state.go +// +// Generated by this command: +// +// mockgen -source=vms/platformvm/state/state.go -destination=vms/platformvm/state/mock_state.go -package=state -exclude_interfaces= +// // Package state is a generated GoMock package. package state @@ -22,7 +24,6 @@ import ( multisig "github.com/ava-labs/avalanchego/vms/components/multisig" addrstate "github.com/ava-labs/avalanchego/vms/platformvm/addrstate" block "github.com/ava-labs/avalanchego/vms/platformvm/block" - config "github.com/ava-labs/avalanchego/vms/platformvm/config" dac "github.com/ava-labs/avalanchego/vms/platformvm/dac" deposit "github.com/ava-labs/avalanchego/vms/platformvm/deposit" fx "github.com/ava-labs/avalanchego/vms/platformvm/fx" @@ -32,195 +33,139 @@ import ( gomock "go.uber.org/mock/gomock" ) -// MockState is a mock of State interface. -type MockState struct { +// MockChain is a mock of Chain interface. +type MockChain struct { ctrl *gomock.Controller - recorder *MockStateMockRecorder + recorder *MockChainMockRecorder } -// MockStateMockRecorder is the mock recorder for MockState. -type MockStateMockRecorder struct { - mock *MockState +// MockChainMockRecorder is the mock recorder for MockChain. +type MockChainMockRecorder struct { + mock *MockChain } -// NewMockState creates a new mock instance. -func NewMockState(ctrl *gomock.Controller) *MockState { - mock := &MockState{ctrl: ctrl} - mock.recorder = &MockStateMockRecorder{mock} +// NewMockChain creates a new mock instance. +func NewMockChain(ctrl *gomock.Controller) *MockChain { + mock := &MockChain{ctrl: ctrl} + mock.recorder = &MockChainMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockState) EXPECT() *MockStateMockRecorder { +func (m *MockChain) EXPECT() *MockChainMockRecorder { return m.recorder } -// Abort mocks base method. -func (m *MockState) Abort() { +// AddChain mocks base method. +func (m *MockChain) AddChain(createChainTx *txs.Tx) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Abort") + m.ctrl.Call(m, "AddChain", createChainTx) } -// Abort indicates an expected call of Abort. -func (mr *MockStateMockRecorder) Abort() *gomock.Call { +// AddChain indicates an expected call of AddChain. +func (mr *MockChainMockRecorder) AddChain(createChainTx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Abort", reflect.TypeOf((*MockState)(nil).Abort)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockChain)(nil).AddChain), createChainTx) } -// AddChain mocks base method. -func (m *MockState) AddChain(arg0 *txs.Tx) { +// AddDeposit mocks base method. +func (m *MockChain) AddDeposit(depositTxID ids.ID, deposit *deposit.Deposit) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddChain", arg0) + m.ctrl.Call(m, "AddDeposit", depositTxID, deposit) } -// AddChain indicates an expected call of AddChain. -func (mr *MockStateMockRecorder) AddChain(arg0 interface{}) *gomock.Call { +// AddDeposit indicates an expected call of AddDeposit. +func (mr *MockChainMockRecorder) AddDeposit(depositTxID, deposit any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockState)(nil).AddChain), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDeposit", reflect.TypeOf((*MockChain)(nil).AddDeposit), depositTxID, deposit) } -// SetDepositOffer mocks base method. -func (m *MockState) SetDepositOffer(arg0 *deposit.Offer) { +// AddProposal mocks base method. +func (m *MockChain) AddProposal(proposalID ids.ID, proposal dac.ProposalState) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetDepositOffer", arg0) + m.ctrl.Call(m, "AddProposal", proposalID, proposal) } -// SetDepositOffer indicates an expected call of SetDepositOffer. -func (mr *MockStateMockRecorder) SetDepositOffer(arg0 interface{}) *gomock.Call { +// AddProposal indicates an expected call of AddProposal. +func (mr *MockChainMockRecorder) AddProposal(proposalID, proposal any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDepositOffer", reflect.TypeOf((*MockState)(nil).SetDepositOffer), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProposal", reflect.TypeOf((*MockChain)(nil).AddProposal), proposalID, proposal) } -// AddRewardUTXO mocks base method. -func (m *MockState) AddRewardUTXO(arg0 ids.ID, arg1 *avax.UTXO) { +// AddProposalIDToFinish mocks base method. +func (m *MockChain) AddProposalIDToFinish(proposalID ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddRewardUTXO", arg0, arg1) + m.ctrl.Call(m, "AddProposalIDToFinish", proposalID) } -// AddRewardUTXO indicates an expected call of AddRewardUTXO. -func (mr *MockStateMockRecorder) AddRewardUTXO(arg0, arg1 interface{}) *gomock.Call { +// AddProposalIDToFinish indicates an expected call of AddProposalIDToFinish. +func (mr *MockChainMockRecorder) AddProposalIDToFinish(proposalID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRewardUTXO", reflect.TypeOf((*MockState)(nil).AddRewardUTXO), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProposalIDToFinish", reflect.TypeOf((*MockChain)(nil).AddProposalIDToFinish), proposalID) } -// AddStatelessBlock mocks base method. -func (m *MockState) AddStatelessBlock(arg0 block.Block) { +// AddRewardUTXO mocks base method. +func (m *MockChain) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddStatelessBlock", arg0) + m.ctrl.Call(m, "AddRewardUTXO", txID, utxo) } -// AddStatelessBlock indicates an expected call of AddStatelessBlock. -func (mr *MockStateMockRecorder) AddStatelessBlock(arg0 interface{}) *gomock.Call { +// AddRewardUTXO indicates an expected call of AddRewardUTXO. +func (mr *MockChainMockRecorder) AddRewardUTXO(txID, utxo any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStatelessBlock", reflect.TypeOf((*MockState)(nil).AddStatelessBlock), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRewardUTXO", reflect.TypeOf((*MockChain)(nil).AddRewardUTXO), txID, utxo) } // AddSubnet mocks base method. -func (m *MockState) AddSubnet(arg0 *txs.Tx) { +func (m *MockChain) AddSubnet(createSubnetTx *txs.Tx) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddSubnet", arg0) + m.ctrl.Call(m, "AddSubnet", createSubnetTx) } // AddSubnet indicates an expected call of AddSubnet. -func (mr *MockStateMockRecorder) AddSubnet(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) AddSubnet(createSubnetTx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnet", reflect.TypeOf((*MockState)(nil).AddSubnet), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnet", reflect.TypeOf((*MockChain)(nil).AddSubnet), createSubnetTx) } // AddSubnetTransformation mocks base method. -func (m *MockState) AddSubnetTransformation(arg0 *txs.Tx) { +func (m *MockChain) AddSubnetTransformation(transformSubnetTx *txs.Tx) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddSubnetTransformation", arg0) + m.ctrl.Call(m, "AddSubnetTransformation", transformSubnetTx) } // AddSubnetTransformation indicates an expected call of AddSubnetTransformation. -func (mr *MockStateMockRecorder) AddSubnetTransformation(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) AddSubnetTransformation(transformSubnetTx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnetTransformation", reflect.TypeOf((*MockState)(nil).AddSubnetTransformation), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnetTransformation", reflect.TypeOf((*MockChain)(nil).AddSubnetTransformation), transformSubnetTx) } // AddTx mocks base method. -func (m *MockState) AddTx(arg0 *txs.Tx, arg1 status.Status) { +func (m *MockChain) AddTx(tx *txs.Tx, status status.Status) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddTx", arg0, arg1) + m.ctrl.Call(m, "AddTx", tx, status) } // AddTx indicates an expected call of AddTx. -func (mr *MockStateMockRecorder) AddTx(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) AddTx(tx, status any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockState)(nil).AddTx), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockChain)(nil).AddTx), tx, status) } // AddUTXO mocks base method. -func (m *MockState) AddUTXO(arg0 *avax.UTXO) { +func (m *MockChain) AddUTXO(utxo *avax.UTXO) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddUTXO", arg0) + m.ctrl.Call(m, "AddUTXO", utxo) } // AddUTXO indicates an expected call of AddUTXO. -func (mr *MockStateMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockState)(nil).AddUTXO), arg0) -} - -// ApplyCurrentValidators mocks base method. -func (m *MockState) ApplyCurrentValidators(arg0 ids.ID, arg1 validators.Manager) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ApplyCurrentValidators", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ApplyCurrentValidators indicates an expected call of ApplyCurrentValidators. -func (mr *MockStateMockRecorder) ApplyCurrentValidators(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyCurrentValidators", reflect.TypeOf((*MockState)(nil).ApplyCurrentValidators), arg0, arg1) -} - -// ApplyValidatorPublicKeyDiffs mocks base method. -func (m *MockState) ApplyValidatorPublicKeyDiffs(arg0 context.Context, arg1 map[ids.NodeID]*validators.GetValidatorOutput, arg2, arg3 uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ApplyValidatorPublicKeyDiffs", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(error) - return ret0 -} - -// ApplyValidatorPublicKeyDiffs indicates an expected call of ApplyValidatorPublicKeyDiffs. -func (mr *MockStateMockRecorder) ApplyValidatorPublicKeyDiffs(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyValidatorPublicKeyDiffs", reflect.TypeOf((*MockState)(nil).ApplyValidatorPublicKeyDiffs), arg0, arg1, arg2, arg3) -} - -// ApplyValidatorWeightDiffs mocks base method. -func (m *MockState) ApplyValidatorWeightDiffs(arg0 context.Context, arg1 map[ids.NodeID]*validators.GetValidatorOutput, arg2, arg3 uint64, arg4 ids.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ApplyValidatorWeightDiffs", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(error) - return ret0 -} - -// ApplyValidatorWeightDiffs indicates an expected call of ApplyValidatorWeightDiffs. -func (mr *MockStateMockRecorder) ApplyValidatorWeightDiffs(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) AddUTXO(utxo any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyValidatorWeightDiffs", reflect.TypeOf((*MockState)(nil).ApplyValidatorWeightDiffs), arg0, arg1, arg2, arg3, arg4) -} - -// Checksum mocks base method. -func (m *MockState) Checksum() ids.ID { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Checksum") - ret0, _ := ret[0].(ids.ID) - return ret0 -} - -// Checksum indicates an expected call of Checksum. -func (mr *MockStateMockRecorder) Checksum() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Checksum", reflect.TypeOf((*MockState)(nil).Checksum)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockChain)(nil).AddUTXO), utxo) } // CaminoConfig mocks base method. -func (m *MockState) CaminoConfig() (*CaminoConfig, error) { +func (m *MockChain) CaminoConfig() (*CaminoConfig, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CaminoConfig") ret0, _ := ret[0].(*CaminoConfig) @@ -229,146 +174,85 @@ func (m *MockState) CaminoConfig() (*CaminoConfig, error) { } // CaminoConfig indicates an expected call of CaminoConfig. -func (mr *MockStateMockRecorder) CaminoConfig() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CaminoConfig", reflect.TypeOf((*MockState)(nil).CaminoConfig)) -} - -// Close mocks base method. -func (m *MockState) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockStateMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockState)(nil).Close)) -} - -// Commit mocks base method. -func (m *MockState) Commit() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Commit") - ret0, _ := ret[0].(error) - return ret0 -} - -// Commit indicates an expected call of Commit. -func (mr *MockStateMockRecorder) Commit() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockState)(nil).Commit)) -} - -// CommitBatch mocks base method. -func (m *MockState) CommitBatch() (database.Batch, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CommitBatch") - ret0, _ := ret[0].(database.Batch) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CommitBatch indicates an expected call of CommitBatch. -func (mr *MockStateMockRecorder) CommitBatch() *gomock.Call { +func (mr *MockChainMockRecorder) CaminoConfig() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitBatch", reflect.TypeOf((*MockState)(nil).CommitBatch)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CaminoConfig", reflect.TypeOf((*MockChain)(nil).CaminoConfig)) } -// Config mocks base method. -func (m *MockState) Config() (*config.Config, error) { +// DeleteCurrentDelegator mocks base method. +func (m *MockChain) DeleteCurrentDelegator(staker *Staker) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Config") - ret0, _ := ret[0].(*config.Config) - ret1, _ := ret[1].(error) - return ret0, ret1 + m.ctrl.Call(m, "DeleteCurrentDelegator", staker) } -// Config indicates an expected call of Config. -func (mr *MockStateMockRecorder) Config() *gomock.Call { +// DeleteCurrentDelegator indicates an expected call of DeleteCurrentDelegator. +func (mr *MockChainMockRecorder) DeleteCurrentDelegator(staker any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Config", reflect.TypeOf((*MockState)(nil).Config)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentDelegator", reflect.TypeOf((*MockChain)(nil).DeleteCurrentDelegator), staker) } -// DeleteCurrentDelegator mocks base method. -func (m *MockState) DeleteCurrentDelegator(arg0 *Staker) { +// DeleteCurrentValidator mocks base method. +func (m *MockChain) DeleteCurrentValidator(staker *Staker) { m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteCurrentDelegator", arg0) + m.ctrl.Call(m, "DeleteCurrentValidator", staker) } -// DeleteCurrentDelegator indicates an expected call of DeleteCurrentDelegator. -func (mr *MockStateMockRecorder) DeleteCurrentDelegator(arg0 interface{}) *gomock.Call { +// DeleteCurrentValidator indicates an expected call of DeleteCurrentValidator. +func (mr *MockChainMockRecorder) DeleteCurrentValidator(staker any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentDelegator", reflect.TypeOf((*MockState)(nil).DeleteCurrentDelegator), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentValidator", reflect.TypeOf((*MockChain)(nil).DeleteCurrentValidator), staker) } -// DeleteCurrentValidator mocks base method. -func (m *MockState) DeleteCurrentValidator(arg0 *Staker) { +// DeleteDeferredValidator mocks base method. +func (m *MockChain) DeleteDeferredValidator(staker *Staker) { m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteCurrentValidator", arg0) + m.ctrl.Call(m, "DeleteDeferredValidator", staker) } -// DeleteCurrentValidator indicates an expected call of DeleteCurrentValidator. -func (mr *MockStateMockRecorder) DeleteCurrentValidator(arg0 interface{}) *gomock.Call { +// DeleteDeferredValidator indicates an expected call of DeleteDeferredValidator. +func (mr *MockChainMockRecorder) DeleteDeferredValidator(staker any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentValidator", reflect.TypeOf((*MockState)(nil).DeleteCurrentValidator), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteDeferredValidator", reflect.TypeOf((*MockChain)(nil).DeleteDeferredValidator), staker) } // DeletePendingDelegator mocks base method. -func (m *MockState) DeletePendingDelegator(arg0 *Staker) { +func (m *MockChain) DeletePendingDelegator(staker *Staker) { m.ctrl.T.Helper() - m.ctrl.Call(m, "DeletePendingDelegator", arg0) + m.ctrl.Call(m, "DeletePendingDelegator", staker) } // DeletePendingDelegator indicates an expected call of DeletePendingDelegator. -func (mr *MockStateMockRecorder) DeletePendingDelegator(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) DeletePendingDelegator(staker any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingDelegator", reflect.TypeOf((*MockState)(nil).DeletePendingDelegator), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingDelegator", reflect.TypeOf((*MockChain)(nil).DeletePendingDelegator), staker) } // DeletePendingValidator mocks base method. -func (m *MockState) DeletePendingValidator(arg0 *Staker) { +func (m *MockChain) DeletePendingValidator(staker *Staker) { m.ctrl.T.Helper() - m.ctrl.Call(m, "DeletePendingValidator", arg0) + m.ctrl.Call(m, "DeletePendingValidator", staker) } // DeletePendingValidator indicates an expected call of DeletePendingValidator. -func (mr *MockStateMockRecorder) DeletePendingValidator(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) DeletePendingValidator(staker any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingValidator", reflect.TypeOf((*MockState)(nil).DeletePendingValidator), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingValidator", reflect.TypeOf((*MockChain)(nil).DeletePendingValidator), staker) } // DeleteUTXO mocks base method. -func (m *MockState) DeleteUTXO(arg0 ids.ID) { +func (m *MockChain) DeleteUTXO(utxoID ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteUTXO", arg0) + m.ctrl.Call(m, "DeleteUTXO", utxoID) } // DeleteUTXO indicates an expected call of DeleteUTXO. -func (mr *MockStateMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockState)(nil).DeleteUTXO), arg0) -} - -// GetBlockIDAtHeight mocks base method. -func (m *MockState) GetBlockIDAtHeight(arg0 uint64) (ids.ID, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBlockIDAtHeight", arg0) - ret0, _ := ret[0].(ids.ID) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBlockIDAtHeight indicates an expected call of GetBlockIDAtHeight. -func (mr *MockStateMockRecorder) GetBlockIDAtHeight(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) DeleteUTXO(utxoID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockState)(nil).GetBlockIDAtHeight), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockChain)(nil).DeleteUTXO), utxoID) } // GetAddressStates mocks base method. -func (m *MockState) GetAddressStates(arg0 ids.ShortID) (addrstate.AddressState, error) { +func (m *MockChain) GetAddressStates(arg0 ids.ShortID) (addrstate.AddressState, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetAddressStates", arg0) ret0, _ := ret[0].(addrstate.AddressState) @@ -377,13 +261,13 @@ func (m *MockState) GetAddressStates(arg0 ids.ShortID) (addrstate.AddressState, } // GetAddressStates indicates an expected call of GetAddressStates. -func (mr *MockStateMockRecorder) GetAddressStates(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) GetAddressStates(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAddressStates", reflect.TypeOf((*MockState)(nil).GetAddressStates), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAddressStates", reflect.TypeOf((*MockChain)(nil).GetAddressStates), arg0) } // GetAllDepositOffers mocks base method. -func (m *MockState) GetAllDepositOffers() ([]*deposit.Offer, error) { +func (m *MockChain) GetAllDepositOffers() ([]*deposit.Offer, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetAllDepositOffers") ret0, _ := ret[0].([]*deposit.Offer) @@ -392,226 +276,225 @@ func (m *MockState) GetAllDepositOffers() ([]*deposit.Offer, error) { } // GetAllDepositOffers indicates an expected call of GetAllDepositOffers. -func (mr *MockStateMockRecorder) GetAllDepositOffers() *gomock.Call { +func (mr *MockChainMockRecorder) GetAllDepositOffers() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllDepositOffers", reflect.TypeOf((*MockState)(nil).GetAllDepositOffers)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllDepositOffers", reflect.TypeOf((*MockChain)(nil).GetAllDepositOffers)) } -// GetChains mocks base method. -func (m *MockState) GetChains(arg0 ids.ID) ([]*txs.Tx, error) { +// GetBaseFee mocks base method. +func (m *MockChain) GetBaseFee() (uint64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetChains", arg0) - ret0, _ := ret[0].([]*txs.Tx) + ret := m.ctrl.Call(m, "GetBaseFee") + ret0, _ := ret[0].(uint64) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetChains indicates an expected call of GetChains. -func (mr *MockStateMockRecorder) GetChains(arg0 interface{}) *gomock.Call { +// GetBaseFee indicates an expected call of GetBaseFee. +func (mr *MockChainMockRecorder) GetBaseFee() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChains", reflect.TypeOf((*MockState)(nil).GetChains), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBaseFee", reflect.TypeOf((*MockChain)(nil).GetBaseFee)) } // GetClaimable mocks base method. -func (m *MockState) GetClaimable(arg0 ids.ID) (*Claimable, error) { +func (m *MockChain) GetClaimable(ownerID ids.ID) (*Claimable, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetClaimable", arg0) + ret := m.ctrl.Call(m, "GetClaimable", ownerID) ret0, _ := ret[0].(*Claimable) ret1, _ := ret[1].(error) return ret0, ret1 } // GetClaimable indicates an expected call of GetClaimable. -func (mr *MockStateMockRecorder) GetClaimable(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) GetClaimable(ownerID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClaimable", reflect.TypeOf((*MockState)(nil).GetClaimable), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClaimable", reflect.TypeOf((*MockChain)(nil).GetClaimable), ownerID) } -// GetProposal mocks base method. -func (m *MockState) GetProposal(arg0 ids.ID) (dac.ProposalState, error) { +// GetCurrentDelegatorIterator mocks base method. +func (m *MockChain) GetCurrentDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProposal", arg0) - ret0, _ := ret[0].(dac.ProposalState) + ret := m.ctrl.Call(m, "GetCurrentDelegatorIterator", subnetID, nodeID) + ret0, _ := ret[0].(StakerIterator) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetProposal indicates an expected call of GetProposal. -func (mr *MockStateMockRecorder) GetProposal(arg0 interface{}) *gomock.Call { +// GetCurrentDelegatorIterator indicates an expected call of GetCurrentDelegatorIterator. +func (mr *MockChainMockRecorder) GetCurrentDelegatorIterator(subnetID, nodeID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposal", reflect.TypeOf((*MockState)(nil).GetProposal), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentDelegatorIterator", reflect.TypeOf((*MockChain)(nil).GetCurrentDelegatorIterator), subnetID, nodeID) } -// GetBaseFee mocks base method. -func (m *MockState) GetBaseFee() (uint64, error) { +// GetCurrentStakerIterator mocks base method. +func (m *MockChain) GetCurrentStakerIterator() (StakerIterator, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBaseFee") - ret0, _ := ret[0].(uint64) + ret := m.ctrl.Call(m, "GetCurrentStakerIterator") + ret0, _ := ret[0].(StakerIterator) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetBaseFee indicates an expected call of GetBaseFee. -func (mr *MockStateMockRecorder) GetBaseFee() *gomock.Call { +// GetCurrentStakerIterator indicates an expected call of GetCurrentStakerIterator. +func (mr *MockChainMockRecorder) GetCurrentStakerIterator() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBaseFee", reflect.TypeOf((*MockState)(nil).GetBaseFee)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentStakerIterator", reflect.TypeOf((*MockChain)(nil).GetCurrentStakerIterator)) } -// GetFeeDistribution mocks base method. -func (m *MockState) GetFeeDistribution() ([dac.FeeDistributionFractionsCount]uint64, error) { +// GetCurrentSupply mocks base method. +func (m *MockChain) GetCurrentSupply(subnetID ids.ID) (uint64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFeeDistribution") - ret0, _ := ret[0].([dac.FeeDistributionFractionsCount]uint64) + ret := m.ctrl.Call(m, "GetCurrentSupply", subnetID) + ret0, _ := ret[0].(uint64) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetFeeDistribution indicates an expected call of GetFeeDistribution. -func (mr *MockStateMockRecorder) GetFeeDistribution() *gomock.Call { +// GetCurrentSupply indicates an expected call of GetCurrentSupply. +func (mr *MockChainMockRecorder) GetCurrentSupply(subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFeeDistribution", reflect.TypeOf((*MockState)(nil).GetFeeDistribution)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSupply", reflect.TypeOf((*MockChain)(nil).GetCurrentSupply), subnetID) } -// GetCurrentDelegatorIterator mocks base method. -func (m *MockState) GetCurrentDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { +// GetCurrentValidator mocks base method. +func (m *MockChain) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentDelegatorIterator", arg0, arg1) - ret0, _ := ret[0].(StakerIterator) + ret := m.ctrl.Call(m, "GetCurrentValidator", subnetID, nodeID) + ret0, _ := ret[0].(*Staker) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetCurrentDelegatorIterator indicates an expected call of GetCurrentDelegatorIterator. -func (mr *MockStateMockRecorder) GetCurrentDelegatorIterator(arg0, arg1 interface{}) *gomock.Call { +// GetCurrentValidator indicates an expected call of GetCurrentValidator. +func (mr *MockChainMockRecorder) GetCurrentValidator(subnetID, nodeID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentDelegatorIterator", reflect.TypeOf((*MockState)(nil).GetCurrentDelegatorIterator), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidator", reflect.TypeOf((*MockChain)(nil).GetCurrentValidator), subnetID, nodeID) } -// GetCurrentStakerIterator mocks base method. -func (m *MockState) GetCurrentStakerIterator() (StakerIterator, error) { +// GetDeferredStakerIterator mocks base method. +func (m *MockChain) GetDeferredStakerIterator() (StakerIterator, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentStakerIterator") + ret := m.ctrl.Call(m, "GetDeferredStakerIterator") ret0, _ := ret[0].(StakerIterator) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetCurrentStakerIterator indicates an expected call of GetCurrentStakerIterator. -func (mr *MockStateMockRecorder) GetCurrentStakerIterator() *gomock.Call { +// GetDeferredStakerIterator indicates an expected call of GetDeferredStakerIterator. +func (mr *MockChainMockRecorder) GetDeferredStakerIterator() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentStakerIterator", reflect.TypeOf((*MockState)(nil).GetCurrentStakerIterator)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeferredStakerIterator", reflect.TypeOf((*MockChain)(nil).GetDeferredStakerIterator)) } -// GetCurrentSupply mocks base method. -func (m *MockState) GetCurrentSupply(arg0 ids.ID) (uint64, error) { +// GetDeferredValidator mocks base method. +func (m *MockChain) GetDeferredValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentSupply", arg0) - ret0, _ := ret[0].(uint64) + ret := m.ctrl.Call(m, "GetDeferredValidator", subnetID, nodeID) + ret0, _ := ret[0].(*Staker) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetCurrentSupply indicates an expected call of GetCurrentSupply. -func (mr *MockStateMockRecorder) GetCurrentSupply(arg0 interface{}) *gomock.Call { +// GetDeferredValidator indicates an expected call of GetDeferredValidator. +func (mr *MockChainMockRecorder) GetDeferredValidator(subnetID, nodeID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSupply", reflect.TypeOf((*MockState)(nil).GetCurrentSupply), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeferredValidator", reflect.TypeOf((*MockChain)(nil).GetDeferredValidator), subnetID, nodeID) } -// GetCurrentValidator mocks base method. -func (m *MockState) GetCurrentValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { +// GetDelegateeReward mocks base method. +func (m *MockChain) GetDelegateeReward(subnetID ids.ID, nodeID ids.NodeID) (uint64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentValidator", arg0, arg1) - ret0, _ := ret[0].(*Staker) + ret := m.ctrl.Call(m, "GetDelegateeReward", subnetID, nodeID) + ret0, _ := ret[0].(uint64) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetCurrentValidator indicates an expected call of GetCurrentValidator. -func (mr *MockStateMockRecorder) GetCurrentValidator(arg0, arg1 interface{}) *gomock.Call { +// GetDelegateeReward indicates an expected call of GetDelegateeReward. +func (mr *MockChainMockRecorder) GetDelegateeReward(subnetID, nodeID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidator", reflect.TypeOf((*MockState)(nil).GetCurrentValidator), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockChain)(nil).GetDelegateeReward), subnetID, nodeID) } -// GetDelegateeReward mocks base method. -func (m *MockState) GetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID) (uint64, error) { +// GetDeposit mocks base method. +func (m *MockChain) GetDeposit(depositTxID ids.ID) (*deposit.Deposit, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDelegateeReward", arg0, arg1) - ret0, _ := ret[0].(uint64) + ret := m.ctrl.Call(m, "GetDeposit", depositTxID) + ret0, _ := ret[0].(*deposit.Deposit) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetDelegateeReward indicates an expected call of GetDelegateeReward. -func (mr *MockStateMockRecorder) GetDelegateeReward(arg0, arg1 interface{}) *gomock.Call { +// GetDeposit indicates an expected call of GetDeposit. +func (mr *MockChainMockRecorder) GetDeposit(depositTxID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockState)(nil).GetDelegateeReward), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeposit", reflect.TypeOf((*MockChain)(nil).GetDeposit), depositTxID) } -// GetDeposit mocks base method. -func (m *MockState) GetDeposit(arg0 ids.ID) (*deposit.Deposit, error) { +// GetDepositOffer mocks base method. +func (m *MockChain) GetDepositOffer(offerID ids.ID) (*deposit.Offer, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDeposit", arg0) - ret0, _ := ret[0].(*deposit.Deposit) + ret := m.ctrl.Call(m, "GetDepositOffer", offerID) + ret0, _ := ret[0].(*deposit.Offer) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetDeposit indicates an expected call of GetDeposit. -func (mr *MockStateMockRecorder) GetDeposit(arg0 interface{}) *gomock.Call { +// GetDepositOffer indicates an expected call of GetDepositOffer. +func (mr *MockChainMockRecorder) GetDepositOffer(offerID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeposit", reflect.TypeOf((*MockState)(nil).GetDeposit), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDepositOffer", reflect.TypeOf((*MockChain)(nil).GetDepositOffer), offerID) } -// GetNextToUnlockDepositTime mocks base method. -func (m *MockState) GetNextToUnlockDepositTime(arg0 set.Set[ids.ID]) (time.Time, error) { +// GetFeeDistribution mocks base method. +func (m *MockChain) GetFeeDistribution() ([dac.FeeDistributionFractionsCount]uint64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNextToUnlockDepositTime", arg0) - ret0, _ := ret[0].(time.Time) + ret := m.ctrl.Call(m, "GetFeeDistribution") + ret0, _ := ret[0].([dac.FeeDistributionFractionsCount]uint64) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetNextToUnlockDepositTime indicates an expected call of GetNextToUnlockDepositTime. -func (mr *MockStateMockRecorder) GetNextToUnlockDepositTime(arg0 interface{}) *gomock.Call { +// GetFeeDistribution indicates an expected call of GetFeeDistribution. +func (mr *MockChainMockRecorder) GetFeeDistribution() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToUnlockDepositTime", reflect.TypeOf((*MockState)(nil).GetNextToUnlockDepositTime), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFeeDistribution", reflect.TypeOf((*MockChain)(nil).GetFeeDistribution)) } -// GetNextToUnlockDepositIDsAndTime mocks base method. -func (m *MockState) GetNextToUnlockDepositIDsAndTime(arg0 set.Set[ids.ID]) ([]ids.ID, time.Time, error) { +// GetMultisigAlias mocks base method. +func (m *MockChain) GetMultisigAlias(arg0 ids.ShortID) (*multisig.AliasWithNonce, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNextToUnlockDepositIDsAndTime", arg0) - ret0, _ := ret[0].([]ids.ID) - ret1, _ := ret[1].(time.Time) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 + ret := m.ctrl.Call(m, "GetMultisigAlias", arg0) + ret0, _ := ret[0].(*multisig.AliasWithNonce) + ret1, _ := ret[1].(error) + return ret0, ret1 } -// GetNextToUnlockDepositIDsAndTime indicates an expected call of GetNextToUnlockDepositIDsAndTime. -func (mr *MockStateMockRecorder) GetNextToUnlockDepositIDsAndTime(arg0 interface{}) *gomock.Call { +// GetMultisigAlias indicates an expected call of GetMultisigAlias. +func (mr *MockChainMockRecorder) GetMultisigAlias(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToUnlockDepositIDsAndTime", reflect.TypeOf((*MockState)(nil).GetNextToUnlockDepositIDsAndTime), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMultisigAlias", reflect.TypeOf((*MockChain)(nil).GetMultisigAlias), arg0) } // GetNextProposalExpirationTime mocks base method. -func (m *MockState) GetNextProposalExpirationTime(arg0 set.Set[ids.ID]) (time.Time, error) { +func (m *MockChain) GetNextProposalExpirationTime(removedProposalIDs set.Set[ids.ID]) (time.Time, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNextProposalExpirationTime", arg0) + ret := m.ctrl.Call(m, "GetNextProposalExpirationTime", removedProposalIDs) ret0, _ := ret[0].(time.Time) ret1, _ := ret[1].(error) return ret0, ret1 } // GetNextProposalExpirationTime indicates an expected call of GetNextProposalExpirationTime. -func (mr *MockStateMockRecorder) GetNextProposalExpirationTime(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) GetNextProposalExpirationTime(removedProposalIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextProposalExpirationTime", reflect.TypeOf((*MockState)(nil).GetNextProposalExpirationTime), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextProposalExpirationTime", reflect.TypeOf((*MockChain)(nil).GetNextProposalExpirationTime), removedProposalIDs) } // GetNextToExpireProposalIDsAndTime mocks base method. -func (m *MockState) GetNextToExpireProposalIDsAndTime(arg0 set.Set[ids.ID]) ([]ids.ID, time.Time, error) { +func (m *MockChain) GetNextToExpireProposalIDsAndTime(removedProposalIDs set.Set[ids.ID]) ([]ids.ID, time.Time, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNextToExpireProposalIDsAndTime", arg0) + ret := m.ctrl.Call(m, "GetNextToExpireProposalIDsAndTime", removedProposalIDs) ret0, _ := ret[0].([]ids.ID) ret1, _ := ret[1].(time.Time) ret2, _ := ret[2].(error) @@ -619,87 +502,2345 @@ func (m *MockState) GetNextToExpireProposalIDsAndTime(arg0 set.Set[ids.ID]) ([]i } // GetNextToExpireProposalIDsAndTime indicates an expected call of GetNextToExpireProposalIDsAndTime. -func (mr *MockStateMockRecorder) GetNextToExpireProposalIDsAndTime(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) GetNextToExpireProposalIDsAndTime(removedProposalIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToExpireProposalIDsAndTime", reflect.TypeOf((*MockState)(nil).GetNextToExpireProposalIDsAndTime), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToExpireProposalIDsAndTime", reflect.TypeOf((*MockChain)(nil).GetNextToExpireProposalIDsAndTime), removedProposalIDs) } -// GetProposalIDsToFinish mocks base method. -func (m *MockState) GetProposalIDsToFinish() ([]ids.ID, error) { +// GetNextToUnlockDepositIDsAndTime mocks base method. +func (m *MockChain) GetNextToUnlockDepositIDsAndTime(removedDepositIDs set.Set[ids.ID]) ([]ids.ID, time.Time, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProposalIDsToFinish") + ret := m.ctrl.Call(m, "GetNextToUnlockDepositIDsAndTime", removedDepositIDs) ret0, _ := ret[0].([]ids.ID) - ret2, _ := ret[1].(error) - return ret0, ret2 + ret1, _ := ret[1].(time.Time) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 } -// GetProposalIDsToFinish indicates an expected call of GetProposalIDsToFinish. -func (mr *MockStateMockRecorder) GetProposalIDsToFinish() *gomock.Call { +// GetNextToUnlockDepositIDsAndTime indicates an expected call of GetNextToUnlockDepositIDsAndTime. +func (mr *MockChainMockRecorder) GetNextToUnlockDepositIDsAndTime(removedDepositIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposalIDsToFinish", reflect.TypeOf((*MockState)(nil).GetProposalIDsToFinish)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToUnlockDepositIDsAndTime", reflect.TypeOf((*MockChain)(nil).GetNextToUnlockDepositIDsAndTime), removedDepositIDs) } -// GetProposalIterator mocks base method. -func (m *MockState) GetProposalIterator() (ProposalsIterator, error) { +// GetNextToUnlockDepositTime mocks base method. +func (m *MockChain) GetNextToUnlockDepositTime(removedDepositIDs set.Set[ids.ID]) (time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNextToUnlockDepositTime", removedDepositIDs) + ret0, _ := ret[0].(time.Time) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNextToUnlockDepositTime indicates an expected call of GetNextToUnlockDepositTime. +func (mr *MockChainMockRecorder) GetNextToUnlockDepositTime(removedDepositIDs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToUnlockDepositTime", reflect.TypeOf((*MockChain)(nil).GetNextToUnlockDepositTime), removedDepositIDs) +} + +// GetNotDistributedValidatorReward mocks base method. +func (m *MockChain) GetNotDistributedValidatorReward() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNotDistributedValidatorReward") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNotDistributedValidatorReward indicates an expected call of GetNotDistributedValidatorReward. +func (mr *MockChainMockRecorder) GetNotDistributedValidatorReward() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNotDistributedValidatorReward", reflect.TypeOf((*MockChain)(nil).GetNotDistributedValidatorReward)) +} + +// GetPendingDelegatorIterator mocks base method. +func (m *MockChain) GetPendingDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPendingDelegatorIterator", subnetID, nodeID) + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPendingDelegatorIterator indicates an expected call of GetPendingDelegatorIterator. +func (mr *MockChainMockRecorder) GetPendingDelegatorIterator(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingDelegatorIterator", reflect.TypeOf((*MockChain)(nil).GetPendingDelegatorIterator), subnetID, nodeID) +} + +// GetPendingStakerIterator mocks base method. +func (m *MockChain) GetPendingStakerIterator() (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPendingStakerIterator") + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPendingStakerIterator indicates an expected call of GetPendingStakerIterator. +func (mr *MockChainMockRecorder) GetPendingStakerIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingStakerIterator", reflect.TypeOf((*MockChain)(nil).GetPendingStakerIterator)) +} + +// GetPendingValidator mocks base method. +func (m *MockChain) GetPendingValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPendingValidator", subnetID, nodeID) + ret0, _ := ret[0].(*Staker) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPendingValidator indicates an expected call of GetPendingValidator. +func (mr *MockChainMockRecorder) GetPendingValidator(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockChain)(nil).GetPendingValidator), subnetID, nodeID) +} + +// GetProposal mocks base method. +func (m *MockChain) GetProposal(proposalID ids.ID) (dac.ProposalState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProposal", proposalID) + ret0, _ := ret[0].(dac.ProposalState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProposal indicates an expected call of GetProposal. +func (mr *MockChainMockRecorder) GetProposal(proposalID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposal", reflect.TypeOf((*MockChain)(nil).GetProposal), proposalID) +} + +// GetProposalIDsToFinish mocks base method. +func (m *MockChain) GetProposalIDsToFinish() ([]ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProposalIDsToFinish") + ret0, _ := ret[0].([]ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProposalIDsToFinish indicates an expected call of GetProposalIDsToFinish. +func (mr *MockChainMockRecorder) GetProposalIDsToFinish() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposalIDsToFinish", reflect.TypeOf((*MockChain)(nil).GetProposalIDsToFinish)) +} + +// GetProposalIterator mocks base method. +func (m *MockChain) GetProposalIterator() (ProposalsIterator, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetProposalIterator") ret0, _ := ret[0].(ProposalsIterator) - ret2, _ := ret[1].(error) - return ret0, ret2 + ret1, _ := ret[1].(error) + return ret0, ret1 } // GetProposalIterator indicates an expected call of GetProposalIterator. -func (mr *MockStateMockRecorder) GetProposalIterator() *gomock.Call { +func (mr *MockChainMockRecorder) GetProposalIterator() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposalIterator", reflect.TypeOf((*MockState)(nil).GetProposalIterator)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposalIterator", reflect.TypeOf((*MockChain)(nil).GetProposalIterator)) } -// GetDepositOffer mocks base method. -func (m *MockState) GetDepositOffer(arg0 ids.ID) (*deposit.Offer, error) { +// GetShortIDLink mocks base method. +func (m *MockChain) GetShortIDLink(id ids.ShortID, key ShortLinkKey) (ids.ShortID, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDepositOffer", arg0) - ret0, _ := ret[0].(*deposit.Offer) + ret := m.ctrl.Call(m, "GetShortIDLink", id, key) + ret0, _ := ret[0].(ids.ShortID) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetDepositOffer indicates an expected call of GetDepositOffer. -func (mr *MockStateMockRecorder) GetDepositOffer(arg0 interface{}) *gomock.Call { +// GetShortIDLink indicates an expected call of GetShortIDLink. +func (mr *MockChainMockRecorder) GetShortIDLink(id, key any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDepositOffer", reflect.TypeOf((*MockState)(nil).GetDepositOffer), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShortIDLink", reflect.TypeOf((*MockChain)(nil).GetShortIDLink), id, key) } -// GetLastAccepted mocks base method. -func (m *MockState) GetLastAccepted() ids.ID { +// GetSubnetOwner mocks base method. +func (m *MockChain) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLastAccepted") - ret0, _ := ret[0].(ids.ID) + ret := m.ctrl.Call(m, "GetSubnetOwner", subnetID) + ret0, _ := ret[0].(fx.Owner) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetOwner indicates an expected call of GetSubnetOwner. +func (mr *MockChainMockRecorder) GetSubnetOwner(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetOwner", reflect.TypeOf((*MockChain)(nil).GetSubnetOwner), subnetID) +} + +// GetSubnetTransformation mocks base method. +func (m *MockChain) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetTransformation", subnetID) + ret0, _ := ret[0].(*txs.Tx) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetTransformation indicates an expected call of GetSubnetTransformation. +func (mr *MockChainMockRecorder) GetSubnetTransformation(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetTransformation", reflect.TypeOf((*MockChain)(nil).GetSubnetTransformation), subnetID) +} + +// GetTimestamp mocks base method. +func (m *MockChain) GetTimestamp() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTimestamp") + ret0, _ := ret[0].(time.Time) return ret0 } -// GetLastAccepted indicates an expected call of GetLastAccepted. -func (mr *MockStateMockRecorder) GetLastAccepted() *gomock.Call { +// GetTimestamp indicates an expected call of GetTimestamp. +func (mr *MockChainMockRecorder) GetTimestamp() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTimestamp", reflect.TypeOf((*MockChain)(nil).GetTimestamp)) +} + +// GetTx mocks base method. +func (m *MockChain) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTx", txID) + ret0, _ := ret[0].(*txs.Tx) + ret1, _ := ret[1].(status.Status) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetTx indicates an expected call of GetTx. +func (mr *MockChainMockRecorder) GetTx(txID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockChain)(nil).GetTx), txID) +} + +// GetUTXO mocks base method. +func (m *MockChain) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUTXO", utxoID) + ret0, _ := ret[0].(*avax.UTXO) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUTXO indicates an expected call of GetUTXO. +func (mr *MockChainMockRecorder) GetUTXO(utxoID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockChain)(nil).GetUTXO), utxoID) +} + +// LockedUTXOs mocks base method. +func (m *MockChain) LockedUTXOs(arg0 set.Set[ids.ID], arg1 set.Set[ids.ShortID], arg2 locked.State) ([]*avax.UTXO, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LockedUTXOs", arg0, arg1, arg2) + ret0, _ := ret[0].([]*avax.UTXO) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LockedUTXOs indicates an expected call of LockedUTXOs. +func (mr *MockChainMockRecorder) LockedUTXOs(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LockedUTXOs", reflect.TypeOf((*MockChain)(nil).LockedUTXOs), arg0, arg1, arg2) +} + +// ModifyDeposit mocks base method. +func (m *MockChain) ModifyDeposit(depositTxID ids.ID, deposit *deposit.Deposit) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ModifyDeposit", depositTxID, deposit) +} + +// ModifyDeposit indicates an expected call of ModifyDeposit. +func (mr *MockChainMockRecorder) ModifyDeposit(depositTxID, deposit any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyDeposit", reflect.TypeOf((*MockChain)(nil).ModifyDeposit), depositTxID, deposit) +} + +// ModifyProposal mocks base method. +func (m *MockChain) ModifyProposal(proposalID ids.ID, proposal dac.ProposalState) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ModifyProposal", proposalID, proposal) +} + +// ModifyProposal indicates an expected call of ModifyProposal. +func (mr *MockChainMockRecorder) ModifyProposal(proposalID, proposal any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyProposal", reflect.TypeOf((*MockChain)(nil).ModifyProposal), proposalID, proposal) +} + +// PutCurrentDelegator mocks base method. +func (m *MockChain) PutCurrentDelegator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutCurrentDelegator", staker) +} + +// PutCurrentDelegator indicates an expected call of PutCurrentDelegator. +func (mr *MockChainMockRecorder) PutCurrentDelegator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentDelegator", reflect.TypeOf((*MockChain)(nil).PutCurrentDelegator), staker) +} + +// PutCurrentValidator mocks base method. +func (m *MockChain) PutCurrentValidator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutCurrentValidator", staker) +} + +// PutCurrentValidator indicates an expected call of PutCurrentValidator. +func (mr *MockChainMockRecorder) PutCurrentValidator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentValidator", reflect.TypeOf((*MockChain)(nil).PutCurrentValidator), staker) +} + +// PutDeferredValidator mocks base method. +func (m *MockChain) PutDeferredValidator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutDeferredValidator", staker) +} + +// PutDeferredValidator indicates an expected call of PutDeferredValidator. +func (mr *MockChainMockRecorder) PutDeferredValidator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutDeferredValidator", reflect.TypeOf((*MockChain)(nil).PutDeferredValidator), staker) +} + +// PutPendingDelegator mocks base method. +func (m *MockChain) PutPendingDelegator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutPendingDelegator", staker) +} + +// PutPendingDelegator indicates an expected call of PutPendingDelegator. +func (mr *MockChainMockRecorder) PutPendingDelegator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingDelegator", reflect.TypeOf((*MockChain)(nil).PutPendingDelegator), staker) +} + +// PutPendingValidator mocks base method. +func (m *MockChain) PutPendingValidator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutPendingValidator", staker) +} + +// PutPendingValidator indicates an expected call of PutPendingValidator. +func (mr *MockChainMockRecorder) PutPendingValidator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingValidator", reflect.TypeOf((*MockChain)(nil).PutPendingValidator), staker) +} + +// RemoveDeposit mocks base method. +func (m *MockChain) RemoveDeposit(depositTxID ids.ID, deposit *deposit.Deposit) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RemoveDeposit", depositTxID, deposit) +} + +// RemoveDeposit indicates an expected call of RemoveDeposit. +func (mr *MockChainMockRecorder) RemoveDeposit(depositTxID, deposit any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveDeposit", reflect.TypeOf((*MockChain)(nil).RemoveDeposit), depositTxID, deposit) +} + +// RemoveProposal mocks base method. +func (m *MockChain) RemoveProposal(proposalID ids.ID, proposal dac.ProposalState) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RemoveProposal", proposalID, proposal) +} + +// RemoveProposal indicates an expected call of RemoveProposal. +func (mr *MockChainMockRecorder) RemoveProposal(proposalID, proposal any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveProposal", reflect.TypeOf((*MockChain)(nil).RemoveProposal), proposalID, proposal) +} + +// RemoveProposalIDToFinish mocks base method. +func (m *MockChain) RemoveProposalIDToFinish(arg0 ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RemoveProposalIDToFinish", arg0) +} + +// RemoveProposalIDToFinish indicates an expected call of RemoveProposalIDToFinish. +func (mr *MockChainMockRecorder) RemoveProposalIDToFinish(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveProposalIDToFinish", reflect.TypeOf((*MockChain)(nil).RemoveProposalIDToFinish), arg0) +} + +// SetAddressStates mocks base method. +func (m *MockChain) SetAddressStates(arg0 ids.ShortID, arg1 addrstate.AddressState) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetAddressStates", arg0, arg1) +} + +// SetAddressStates indicates an expected call of SetAddressStates. +func (mr *MockChainMockRecorder) SetAddressStates(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAddressStates", reflect.TypeOf((*MockChain)(nil).SetAddressStates), arg0, arg1) +} + +// SetBaseFee mocks base method. +func (m *MockChain) SetBaseFee(arg0 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetBaseFee", arg0) +} + +// SetBaseFee indicates an expected call of SetBaseFee. +func (mr *MockChainMockRecorder) SetBaseFee(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBaseFee", reflect.TypeOf((*MockChain)(nil).SetBaseFee), arg0) +} + +// SetClaimable mocks base method. +func (m *MockChain) SetClaimable(ownerID ids.ID, claimable *Claimable) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetClaimable", ownerID, claimable) +} + +// SetClaimable indicates an expected call of SetClaimable. +func (mr *MockChainMockRecorder) SetClaimable(ownerID, claimable any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetClaimable", reflect.TypeOf((*MockChain)(nil).SetClaimable), ownerID, claimable) +} + +// SetCurrentSupply mocks base method. +func (m *MockChain) SetCurrentSupply(subnetID ids.ID, cs uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetCurrentSupply", subnetID, cs) +} + +// SetCurrentSupply indicates an expected call of SetCurrentSupply. +func (mr *MockChainMockRecorder) SetCurrentSupply(subnetID, cs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSupply", reflect.TypeOf((*MockChain)(nil).SetCurrentSupply), subnetID, cs) +} + +// SetDelegateeReward mocks base method. +func (m *MockChain) SetDelegateeReward(subnetID ids.ID, nodeID ids.NodeID, amount uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetDelegateeReward", subnetID, nodeID, amount) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetDelegateeReward indicates an expected call of SetDelegateeReward. +func (mr *MockChainMockRecorder) SetDelegateeReward(subnetID, nodeID, amount any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDelegateeReward", reflect.TypeOf((*MockChain)(nil).SetDelegateeReward), subnetID, nodeID, amount) +} + +// SetDepositOffer mocks base method. +func (m *MockChain) SetDepositOffer(offer *deposit.Offer) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetDepositOffer", offer) +} + +// SetDepositOffer indicates an expected call of SetDepositOffer. +func (mr *MockChainMockRecorder) SetDepositOffer(offer any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDepositOffer", reflect.TypeOf((*MockChain)(nil).SetDepositOffer), offer) +} + +// SetFeeDistribution mocks base method. +func (m *MockChain) SetFeeDistribution(arg0 [3]uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetFeeDistribution", arg0) +} + +// SetFeeDistribution indicates an expected call of SetFeeDistribution. +func (mr *MockChainMockRecorder) SetFeeDistribution(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFeeDistribution", reflect.TypeOf((*MockChain)(nil).SetFeeDistribution), arg0) +} + +// SetMultisigAlias mocks base method. +func (m *MockChain) SetMultisigAlias(arg0 ids.ShortID, arg1 *multisig.AliasWithNonce) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetMultisigAlias", arg0, arg1) +} + +// SetMultisigAlias indicates an expected call of SetMultisigAlias. +func (mr *MockChainMockRecorder) SetMultisigAlias(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMultisigAlias", reflect.TypeOf((*MockChain)(nil).SetMultisigAlias), arg0, arg1) +} + +// SetNotDistributedValidatorReward mocks base method. +func (m *MockChain) SetNotDistributedValidatorReward(reward uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetNotDistributedValidatorReward", reward) +} + +// SetNotDistributedValidatorReward indicates an expected call of SetNotDistributedValidatorReward. +func (mr *MockChainMockRecorder) SetNotDistributedValidatorReward(reward any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNotDistributedValidatorReward", reflect.TypeOf((*MockChain)(nil).SetNotDistributedValidatorReward), reward) +} + +// SetShortIDLink mocks base method. +func (m *MockChain) SetShortIDLink(id ids.ShortID, key ShortLinkKey, link *ids.ShortID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetShortIDLink", id, key, link) +} + +// SetShortIDLink indicates an expected call of SetShortIDLink. +func (mr *MockChainMockRecorder) SetShortIDLink(id, key, link any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetShortIDLink", reflect.TypeOf((*MockChain)(nil).SetShortIDLink), id, key, link) +} + +// SetSubnetOwner mocks base method. +func (m *MockChain) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSubnetOwner", subnetID, owner) +} + +// SetSubnetOwner indicates an expected call of SetSubnetOwner. +func (mr *MockChainMockRecorder) SetSubnetOwner(subnetID, owner any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetOwner", reflect.TypeOf((*MockChain)(nil).SetSubnetOwner), subnetID, owner) +} + +// SetTimestamp mocks base method. +func (m *MockChain) SetTimestamp(tm time.Time) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTimestamp", tm) +} + +// SetTimestamp indicates an expected call of SetTimestamp. +func (mr *MockChainMockRecorder) SetTimestamp(tm any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockChain)(nil).SetTimestamp), tm) +} + +// MockState is a mock of State interface. +type MockState struct { + ctrl *gomock.Controller + recorder *MockStateMockRecorder +} + +// MockStateMockRecorder is the mock recorder for MockState. +type MockStateMockRecorder struct { + mock *MockState +} + +// NewMockState creates a new mock instance. +func NewMockState(ctrl *gomock.Controller) *MockState { + mock := &MockState{ctrl: ctrl} + mock.recorder = &MockStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockState) EXPECT() *MockStateMockRecorder { + return m.recorder +} + +// Abort mocks base method. +func (m *MockState) Abort() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Abort") +} + +// Abort indicates an expected call of Abort. +func (mr *MockStateMockRecorder) Abort() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Abort", reflect.TypeOf((*MockState)(nil).Abort)) +} + +// AddChain mocks base method. +func (m *MockState) AddChain(createChainTx *txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddChain", createChainTx) +} + +// AddChain indicates an expected call of AddChain. +func (mr *MockStateMockRecorder) AddChain(createChainTx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockState)(nil).AddChain), createChainTx) +} + +// AddDeposit mocks base method. +func (m *MockState) AddDeposit(depositTxID ids.ID, deposit *deposit.Deposit) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddDeposit", depositTxID, deposit) +} + +// AddDeposit indicates an expected call of AddDeposit. +func (mr *MockStateMockRecorder) AddDeposit(depositTxID, deposit any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDeposit", reflect.TypeOf((*MockState)(nil).AddDeposit), depositTxID, deposit) +} + +// AddProposal mocks base method. +func (m *MockState) AddProposal(proposalID ids.ID, proposal dac.ProposalState) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddProposal", proposalID, proposal) +} + +// AddProposal indicates an expected call of AddProposal. +func (mr *MockStateMockRecorder) AddProposal(proposalID, proposal any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProposal", reflect.TypeOf((*MockState)(nil).AddProposal), proposalID, proposal) +} + +// AddProposalIDToFinish mocks base method. +func (m *MockState) AddProposalIDToFinish(proposalID ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddProposalIDToFinish", proposalID) +} + +// AddProposalIDToFinish indicates an expected call of AddProposalIDToFinish. +func (mr *MockStateMockRecorder) AddProposalIDToFinish(proposalID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProposalIDToFinish", reflect.TypeOf((*MockState)(nil).AddProposalIDToFinish), proposalID) +} + +// AddRewardUTXO mocks base method. +func (m *MockState) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddRewardUTXO", txID, utxo) +} + +// AddRewardUTXO indicates an expected call of AddRewardUTXO. +func (mr *MockStateMockRecorder) AddRewardUTXO(txID, utxo any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRewardUTXO", reflect.TypeOf((*MockState)(nil).AddRewardUTXO), txID, utxo) +} + +// AddStatelessBlock mocks base method. +func (m *MockState) AddStatelessBlock(block block.Block) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddStatelessBlock", block) +} + +// AddStatelessBlock indicates an expected call of AddStatelessBlock. +func (mr *MockStateMockRecorder) AddStatelessBlock(block any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStatelessBlock", reflect.TypeOf((*MockState)(nil).AddStatelessBlock), block) +} + +// AddSubnet mocks base method. +func (m *MockState) AddSubnet(createSubnetTx *txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddSubnet", createSubnetTx) +} + +// AddSubnet indicates an expected call of AddSubnet. +func (mr *MockStateMockRecorder) AddSubnet(createSubnetTx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnet", reflect.TypeOf((*MockState)(nil).AddSubnet), createSubnetTx) +} + +// AddSubnetTransformation mocks base method. +func (m *MockState) AddSubnetTransformation(transformSubnetTx *txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddSubnetTransformation", transformSubnetTx) +} + +// AddSubnetTransformation indicates an expected call of AddSubnetTransformation. +func (mr *MockStateMockRecorder) AddSubnetTransformation(transformSubnetTx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnetTransformation", reflect.TypeOf((*MockState)(nil).AddSubnetTransformation), transformSubnetTx) +} + +// AddTx mocks base method. +func (m *MockState) AddTx(tx *txs.Tx, status status.Status) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddTx", tx, status) +} + +// AddTx indicates an expected call of AddTx. +func (mr *MockStateMockRecorder) AddTx(tx, status any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockState)(nil).AddTx), tx, status) +} + +// AddUTXO mocks base method. +func (m *MockState) AddUTXO(utxo *avax.UTXO) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddUTXO", utxo) +} + +// AddUTXO indicates an expected call of AddUTXO. +func (mr *MockStateMockRecorder) AddUTXO(utxo any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockState)(nil).AddUTXO), utxo) +} + +// ApplyValidatorPublicKeyDiffs mocks base method. +func (m *MockState) ApplyValidatorPublicKeyDiffs(ctx context.Context, validators map[ids.NodeID]*validators.GetValidatorOutput, startHeight, endHeight uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApplyValidatorPublicKeyDiffs", ctx, validators, startHeight, endHeight) + ret0, _ := ret[0].(error) + return ret0 +} + +// ApplyValidatorPublicKeyDiffs indicates an expected call of ApplyValidatorPublicKeyDiffs. +func (mr *MockStateMockRecorder) ApplyValidatorPublicKeyDiffs(ctx, validators, startHeight, endHeight any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyValidatorPublicKeyDiffs", reflect.TypeOf((*MockState)(nil).ApplyValidatorPublicKeyDiffs), ctx, validators, startHeight, endHeight) +} + +// ApplyValidatorWeightDiffs mocks base method. +func (m *MockState) ApplyValidatorWeightDiffs(ctx context.Context, validators map[ids.NodeID]*validators.GetValidatorOutput, startHeight, endHeight uint64, subnetID ids.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApplyValidatorWeightDiffs", ctx, validators, startHeight, endHeight, subnetID) + ret0, _ := ret[0].(error) + return ret0 +} + +// ApplyValidatorWeightDiffs indicates an expected call of ApplyValidatorWeightDiffs. +func (mr *MockStateMockRecorder) ApplyValidatorWeightDiffs(ctx, validators, startHeight, endHeight, subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyValidatorWeightDiffs", reflect.TypeOf((*MockState)(nil).ApplyValidatorWeightDiffs), ctx, validators, startHeight, endHeight, subnetID) +} + +// CaminoConfig mocks base method. +func (m *MockState) CaminoConfig() (*CaminoConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CaminoConfig") + ret0, _ := ret[0].(*CaminoConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CaminoConfig indicates an expected call of CaminoConfig. +func (mr *MockStateMockRecorder) CaminoConfig() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CaminoConfig", reflect.TypeOf((*MockState)(nil).CaminoConfig)) +} + +// Checksum mocks base method. +func (m *MockState) Checksum() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Checksum") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// Checksum indicates an expected call of Checksum. +func (mr *MockStateMockRecorder) Checksum() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Checksum", reflect.TypeOf((*MockState)(nil).Checksum)) +} + +// Close mocks base method. +func (m *MockState) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockStateMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockState)(nil).Close)) +} + +// Commit mocks base method. +func (m *MockState) Commit() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Commit") + ret0, _ := ret[0].(error) + return ret0 +} + +// Commit indicates an expected call of Commit. +func (mr *MockStateMockRecorder) Commit() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockState)(nil).Commit)) +} + +// CommitBatch mocks base method. +func (m *MockState) CommitBatch() (database.Batch, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitBatch") + ret0, _ := ret[0].(database.Batch) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CommitBatch indicates an expected call of CommitBatch. +func (mr *MockStateMockRecorder) CommitBatch() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitBatch", reflect.TypeOf((*MockState)(nil).CommitBatch)) +} + +// DeleteCurrentDelegator mocks base method. +func (m *MockState) DeleteCurrentDelegator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteCurrentDelegator", staker) +} + +// DeleteCurrentDelegator indicates an expected call of DeleteCurrentDelegator. +func (mr *MockStateMockRecorder) DeleteCurrentDelegator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentDelegator", reflect.TypeOf((*MockState)(nil).DeleteCurrentDelegator), staker) +} + +// DeleteCurrentValidator mocks base method. +func (m *MockState) DeleteCurrentValidator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteCurrentValidator", staker) +} + +// DeleteCurrentValidator indicates an expected call of DeleteCurrentValidator. +func (mr *MockStateMockRecorder) DeleteCurrentValidator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentValidator", reflect.TypeOf((*MockState)(nil).DeleteCurrentValidator), staker) +} + +// DeleteDeferredValidator mocks base method. +func (m *MockState) DeleteDeferredValidator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteDeferredValidator", staker) +} + +// DeleteDeferredValidator indicates an expected call of DeleteDeferredValidator. +func (mr *MockStateMockRecorder) DeleteDeferredValidator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteDeferredValidator", reflect.TypeOf((*MockState)(nil).DeleteDeferredValidator), staker) +} + +// DeletePendingDelegator mocks base method. +func (m *MockState) DeletePendingDelegator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeletePendingDelegator", staker) +} + +// DeletePendingDelegator indicates an expected call of DeletePendingDelegator. +func (mr *MockStateMockRecorder) DeletePendingDelegator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingDelegator", reflect.TypeOf((*MockState)(nil).DeletePendingDelegator), staker) +} + +// DeletePendingValidator mocks base method. +func (m *MockState) DeletePendingValidator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeletePendingValidator", staker) +} + +// DeletePendingValidator indicates an expected call of DeletePendingValidator. +func (mr *MockStateMockRecorder) DeletePendingValidator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingValidator", reflect.TypeOf((*MockState)(nil).DeletePendingValidator), staker) +} + +// DeleteUTXO mocks base method. +func (m *MockState) DeleteUTXO(utxoID ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteUTXO", utxoID) +} + +// DeleteUTXO indicates an expected call of DeleteUTXO. +func (mr *MockStateMockRecorder) DeleteUTXO(utxoID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockState)(nil).DeleteUTXO), utxoID) +} + +// GetAddressStates mocks base method. +func (m *MockState) GetAddressStates(arg0 ids.ShortID) (addrstate.AddressState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAddressStates", arg0) + ret0, _ := ret[0].(addrstate.AddressState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAddressStates indicates an expected call of GetAddressStates. +func (mr *MockStateMockRecorder) GetAddressStates(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAddressStates", reflect.TypeOf((*MockState)(nil).GetAddressStates), arg0) +} + +// GetAllDepositOffers mocks base method. +func (m *MockState) GetAllDepositOffers() ([]*deposit.Offer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllDepositOffers") + ret0, _ := ret[0].([]*deposit.Offer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAllDepositOffers indicates an expected call of GetAllDepositOffers. +func (mr *MockStateMockRecorder) GetAllDepositOffers() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllDepositOffers", reflect.TypeOf((*MockState)(nil).GetAllDepositOffers)) +} + +// GetBaseFee mocks base method. +func (m *MockState) GetBaseFee() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBaseFee") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBaseFee indicates an expected call of GetBaseFee. +func (mr *MockStateMockRecorder) GetBaseFee() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBaseFee", reflect.TypeOf((*MockState)(nil).GetBaseFee)) +} + +// GetBlockIDAtHeight mocks base method. +func (m *MockState) GetBlockIDAtHeight(height uint64) (ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockIDAtHeight", height) + ret0, _ := ret[0].(ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockIDAtHeight indicates an expected call of GetBlockIDAtHeight. +func (mr *MockStateMockRecorder) GetBlockIDAtHeight(height any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockState)(nil).GetBlockIDAtHeight), height) +} + +// GetChains mocks base method. +func (m *MockState) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChains", subnetID) + ret0, _ := ret[0].([]*txs.Tx) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetChains indicates an expected call of GetChains. +func (mr *MockStateMockRecorder) GetChains(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChains", reflect.TypeOf((*MockState)(nil).GetChains), subnetID) +} + +// GetClaimable mocks base method. +func (m *MockState) GetClaimable(ownerID ids.ID) (*Claimable, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClaimable", ownerID) + ret0, _ := ret[0].(*Claimable) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetClaimable indicates an expected call of GetClaimable. +func (mr *MockStateMockRecorder) GetClaimable(ownerID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClaimable", reflect.TypeOf((*MockState)(nil).GetClaimable), ownerID) +} + +// GetCurrentDelegatorIterator mocks base method. +func (m *MockState) GetCurrentDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentDelegatorIterator", subnetID, nodeID) + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentDelegatorIterator indicates an expected call of GetCurrentDelegatorIterator. +func (mr *MockStateMockRecorder) GetCurrentDelegatorIterator(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentDelegatorIterator", reflect.TypeOf((*MockState)(nil).GetCurrentDelegatorIterator), subnetID, nodeID) +} + +// GetCurrentStakerIterator mocks base method. +func (m *MockState) GetCurrentStakerIterator() (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentStakerIterator") + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentStakerIterator indicates an expected call of GetCurrentStakerIterator. +func (mr *MockStateMockRecorder) GetCurrentStakerIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentStakerIterator", reflect.TypeOf((*MockState)(nil).GetCurrentStakerIterator)) +} + +// GetCurrentSupply mocks base method. +func (m *MockState) GetCurrentSupply(subnetID ids.ID) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentSupply", subnetID) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentSupply indicates an expected call of GetCurrentSupply. +func (mr *MockStateMockRecorder) GetCurrentSupply(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSupply", reflect.TypeOf((*MockState)(nil).GetCurrentSupply), subnetID) +} + +// GetCurrentValidator mocks base method. +func (m *MockState) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentValidator", subnetID, nodeID) + ret0, _ := ret[0].(*Staker) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentValidator indicates an expected call of GetCurrentValidator. +func (mr *MockStateMockRecorder) GetCurrentValidator(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidator", reflect.TypeOf((*MockState)(nil).GetCurrentValidator), subnetID, nodeID) +} + +// GetDeferredStakerIterator mocks base method. +func (m *MockState) GetDeferredStakerIterator() (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeferredStakerIterator") + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDeferredStakerIterator indicates an expected call of GetDeferredStakerIterator. +func (mr *MockStateMockRecorder) GetDeferredStakerIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeferredStakerIterator", reflect.TypeOf((*MockState)(nil).GetDeferredStakerIterator)) +} + +// GetDeferredValidator mocks base method. +func (m *MockState) GetDeferredValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeferredValidator", subnetID, nodeID) + ret0, _ := ret[0].(*Staker) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDeferredValidator indicates an expected call of GetDeferredValidator. +func (mr *MockStateMockRecorder) GetDeferredValidator(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeferredValidator", reflect.TypeOf((*MockState)(nil).GetDeferredValidator), subnetID, nodeID) +} + +// GetDelegateeReward mocks base method. +func (m *MockState) GetDelegateeReward(subnetID ids.ID, nodeID ids.NodeID) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDelegateeReward", subnetID, nodeID) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDelegateeReward indicates an expected call of GetDelegateeReward. +func (mr *MockStateMockRecorder) GetDelegateeReward(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockState)(nil).GetDelegateeReward), subnetID, nodeID) +} + +// GetDeposit mocks base method. +func (m *MockState) GetDeposit(depositTxID ids.ID) (*deposit.Deposit, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeposit", depositTxID) + ret0, _ := ret[0].(*deposit.Deposit) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDeposit indicates an expected call of GetDeposit. +func (mr *MockStateMockRecorder) GetDeposit(depositTxID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeposit", reflect.TypeOf((*MockState)(nil).GetDeposit), depositTxID) +} + +// GetDepositOffer mocks base method. +func (m *MockState) GetDepositOffer(offerID ids.ID) (*deposit.Offer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDepositOffer", offerID) + ret0, _ := ret[0].(*deposit.Offer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDepositOffer indicates an expected call of GetDepositOffer. +func (mr *MockStateMockRecorder) GetDepositOffer(offerID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDepositOffer", reflect.TypeOf((*MockState)(nil).GetDepositOffer), offerID) +} + +// GetFeeDistribution mocks base method. +func (m *MockState) GetFeeDistribution() ([dac.FeeDistributionFractionsCount]uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFeeDistribution") + ret0, _ := ret[0].([dac.FeeDistributionFractionsCount]uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFeeDistribution indicates an expected call of GetFeeDistribution. +func (mr *MockStateMockRecorder) GetFeeDistribution() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFeeDistribution", reflect.TypeOf((*MockState)(nil).GetFeeDistribution)) +} + +// GetLastAccepted mocks base method. +func (m *MockState) GetLastAccepted() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLastAccepted") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// GetLastAccepted indicates an expected call of GetLastAccepted. +func (mr *MockStateMockRecorder) GetLastAccepted() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastAccepted", reflect.TypeOf((*MockState)(nil).GetLastAccepted)) +} + +// GetMultisigAlias mocks base method. +func (m *MockState) GetMultisigAlias(arg0 ids.ShortID) (*multisig.AliasWithNonce, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMultisigAlias", arg0) + ret0, _ := ret[0].(*multisig.AliasWithNonce) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMultisigAlias indicates an expected call of GetMultisigAlias. +func (mr *MockStateMockRecorder) GetMultisigAlias(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMultisigAlias", reflect.TypeOf((*MockState)(nil).GetMultisigAlias), arg0) +} + +// GetNextProposalExpirationTime mocks base method. +func (m *MockState) GetNextProposalExpirationTime(removedProposalIDs set.Set[ids.ID]) (time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNextProposalExpirationTime", removedProposalIDs) + ret0, _ := ret[0].(time.Time) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNextProposalExpirationTime indicates an expected call of GetNextProposalExpirationTime. +func (mr *MockStateMockRecorder) GetNextProposalExpirationTime(removedProposalIDs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextProposalExpirationTime", reflect.TypeOf((*MockState)(nil).GetNextProposalExpirationTime), removedProposalIDs) +} + +// GetNextToExpireProposalIDsAndTime mocks base method. +func (m *MockState) GetNextToExpireProposalIDsAndTime(removedProposalIDs set.Set[ids.ID]) ([]ids.ID, time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNextToExpireProposalIDsAndTime", removedProposalIDs) + ret0, _ := ret[0].([]ids.ID) + ret1, _ := ret[1].(time.Time) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetNextToExpireProposalIDsAndTime indicates an expected call of GetNextToExpireProposalIDsAndTime. +func (mr *MockStateMockRecorder) GetNextToExpireProposalIDsAndTime(removedProposalIDs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToExpireProposalIDsAndTime", reflect.TypeOf((*MockState)(nil).GetNextToExpireProposalIDsAndTime), removedProposalIDs) +} + +// GetNextToUnlockDepositIDsAndTime mocks base method. +func (m *MockState) GetNextToUnlockDepositIDsAndTime(removedDepositIDs set.Set[ids.ID]) ([]ids.ID, time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNextToUnlockDepositIDsAndTime", removedDepositIDs) + ret0, _ := ret[0].([]ids.ID) + ret1, _ := ret[1].(time.Time) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetNextToUnlockDepositIDsAndTime indicates an expected call of GetNextToUnlockDepositIDsAndTime. +func (mr *MockStateMockRecorder) GetNextToUnlockDepositIDsAndTime(removedDepositIDs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToUnlockDepositIDsAndTime", reflect.TypeOf((*MockState)(nil).GetNextToUnlockDepositIDsAndTime), removedDepositIDs) +} + +// GetNextToUnlockDepositTime mocks base method. +func (m *MockState) GetNextToUnlockDepositTime(removedDepositIDs set.Set[ids.ID]) (time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNextToUnlockDepositTime", removedDepositIDs) + ret0, _ := ret[0].(time.Time) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNextToUnlockDepositTime indicates an expected call of GetNextToUnlockDepositTime. +func (mr *MockStateMockRecorder) GetNextToUnlockDepositTime(removedDepositIDs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToUnlockDepositTime", reflect.TypeOf((*MockState)(nil).GetNextToUnlockDepositTime), removedDepositIDs) +} + +// GetNotDistributedValidatorReward mocks base method. +func (m *MockState) GetNotDistributedValidatorReward() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNotDistributedValidatorReward") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNotDistributedValidatorReward indicates an expected call of GetNotDistributedValidatorReward. +func (mr *MockStateMockRecorder) GetNotDistributedValidatorReward() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNotDistributedValidatorReward", reflect.TypeOf((*MockState)(nil).GetNotDistributedValidatorReward)) +} + +// GetPendingDelegatorIterator mocks base method. +func (m *MockState) GetPendingDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPendingDelegatorIterator", subnetID, nodeID) + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPendingDelegatorIterator indicates an expected call of GetPendingDelegatorIterator. +func (mr *MockStateMockRecorder) GetPendingDelegatorIterator(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingDelegatorIterator", reflect.TypeOf((*MockState)(nil).GetPendingDelegatorIterator), subnetID, nodeID) +} + +// GetPendingStakerIterator mocks base method. +func (m *MockState) GetPendingStakerIterator() (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPendingStakerIterator") + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPendingStakerIterator indicates an expected call of GetPendingStakerIterator. +func (mr *MockStateMockRecorder) GetPendingStakerIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingStakerIterator", reflect.TypeOf((*MockState)(nil).GetPendingStakerIterator)) +} + +// GetPendingValidator mocks base method. +func (m *MockState) GetPendingValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPendingValidator", subnetID, nodeID) + ret0, _ := ret[0].(*Staker) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPendingValidator indicates an expected call of GetPendingValidator. +func (mr *MockStateMockRecorder) GetPendingValidator(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockState)(nil).GetPendingValidator), subnetID, nodeID) +} + +// GetProposal mocks base method. +func (m *MockState) GetProposal(proposalID ids.ID) (dac.ProposalState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProposal", proposalID) + ret0, _ := ret[0].(dac.ProposalState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProposal indicates an expected call of GetProposal. +func (mr *MockStateMockRecorder) GetProposal(proposalID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposal", reflect.TypeOf((*MockState)(nil).GetProposal), proposalID) +} + +// GetProposalIDsToFinish mocks base method. +func (m *MockState) GetProposalIDsToFinish() ([]ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProposalIDsToFinish") + ret0, _ := ret[0].([]ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProposalIDsToFinish indicates an expected call of GetProposalIDsToFinish. +func (mr *MockStateMockRecorder) GetProposalIDsToFinish() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposalIDsToFinish", reflect.TypeOf((*MockState)(nil).GetProposalIDsToFinish)) +} + +// GetProposalIterator mocks base method. +func (m *MockState) GetProposalIterator() (ProposalsIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProposalIterator") + ret0, _ := ret[0].(ProposalsIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProposalIterator indicates an expected call of GetProposalIterator. +func (mr *MockStateMockRecorder) GetProposalIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposalIterator", reflect.TypeOf((*MockState)(nil).GetProposalIterator)) +} + +// GetRewardUTXOs mocks base method. +func (m *MockState) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRewardUTXOs", txID) + ret0, _ := ret[0].([]*avax.UTXO) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRewardUTXOs indicates an expected call of GetRewardUTXOs. +func (mr *MockStateMockRecorder) GetRewardUTXOs(txID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRewardUTXOs", reflect.TypeOf((*MockState)(nil).GetRewardUTXOs), txID) +} + +// GetShortIDLink mocks base method. +func (m *MockState) GetShortIDLink(id ids.ShortID, key ShortLinkKey) (ids.ShortID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetShortIDLink", id, key) + ret0, _ := ret[0].(ids.ShortID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetShortIDLink indicates an expected call of GetShortIDLink. +func (mr *MockStateMockRecorder) GetShortIDLink(id, key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShortIDLink", reflect.TypeOf((*MockState)(nil).GetShortIDLink), id, key) +} + +// GetStartTime mocks base method. +func (m *MockState) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStartTime", nodeID, subnetID) + ret0, _ := ret[0].(time.Time) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetStartTime indicates an expected call of GetStartTime. +func (mr *MockStateMockRecorder) GetStartTime(nodeID, subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStartTime", reflect.TypeOf((*MockState)(nil).GetStartTime), nodeID, subnetID) +} + +// GetStatelessBlock mocks base method. +func (m *MockState) GetStatelessBlock(blockID ids.ID) (block.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStatelessBlock", blockID) + ret0, _ := ret[0].(block.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetStatelessBlock indicates an expected call of GetStatelessBlock. +func (mr *MockStateMockRecorder) GetStatelessBlock(blockID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockState)(nil).GetStatelessBlock), blockID) +} + +// GetSubnetOwner mocks base method. +func (m *MockState) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetOwner", subnetID) + ret0, _ := ret[0].(fx.Owner) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetOwner indicates an expected call of GetSubnetOwner. +func (mr *MockStateMockRecorder) GetSubnetOwner(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetOwner", reflect.TypeOf((*MockState)(nil).GetSubnetOwner), subnetID) +} + +// GetSubnetTransformation mocks base method. +func (m *MockState) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetTransformation", subnetID) + ret0, _ := ret[0].(*txs.Tx) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetTransformation indicates an expected call of GetSubnetTransformation. +func (mr *MockStateMockRecorder) GetSubnetTransformation(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetTransformation", reflect.TypeOf((*MockState)(nil).GetSubnetTransformation), subnetID) +} + +// GetSubnets mocks base method. +func (m *MockState) GetSubnets() ([]*txs.Tx, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnets") + ret0, _ := ret[0].([]*txs.Tx) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnets indicates an expected call of GetSubnets. +func (mr *MockStateMockRecorder) GetSubnets() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnets", reflect.TypeOf((*MockState)(nil).GetSubnets)) +} + +// GetTimestamp mocks base method. +func (m *MockState) GetTimestamp() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTimestamp") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetTimestamp indicates an expected call of GetTimestamp. +func (mr *MockStateMockRecorder) GetTimestamp() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTimestamp", reflect.TypeOf((*MockState)(nil).GetTimestamp)) +} + +// GetTx mocks base method. +func (m *MockState) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTx", txID) + ret0, _ := ret[0].(*txs.Tx) + ret1, _ := ret[1].(status.Status) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetTx indicates an expected call of GetTx. +func (mr *MockStateMockRecorder) GetTx(txID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockState)(nil).GetTx), txID) +} + +// GetUTXO mocks base method. +func (m *MockState) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUTXO", utxoID) + ret0, _ := ret[0].(*avax.UTXO) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUTXO indicates an expected call of GetUTXO. +func (mr *MockStateMockRecorder) GetUTXO(utxoID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockState)(nil).GetUTXO), utxoID) +} + +// GetUptime mocks base method. +func (m *MockState) GetUptime(nodeID ids.NodeID, subnetID ids.ID) (time.Duration, time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUptime", nodeID, subnetID) + ret0, _ := ret[0].(time.Duration) + ret1, _ := ret[1].(time.Time) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetUptime indicates an expected call of GetUptime. +func (mr *MockStateMockRecorder) GetUptime(nodeID, subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUptime", reflect.TypeOf((*MockState)(nil).GetUptime), nodeID, subnetID) +} + +// LockedUTXOs mocks base method. +func (m *MockState) LockedUTXOs(arg0 set.Set[ids.ID], arg1 set.Set[ids.ShortID], arg2 locked.State) ([]*avax.UTXO, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LockedUTXOs", arg0, arg1, arg2) + ret0, _ := ret[0].([]*avax.UTXO) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LockedUTXOs indicates an expected call of LockedUTXOs. +func (mr *MockStateMockRecorder) LockedUTXOs(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LockedUTXOs", reflect.TypeOf((*MockState)(nil).LockedUTXOs), arg0, arg1, arg2) +} + +// ModifyDeposit mocks base method. +func (m *MockState) ModifyDeposit(depositTxID ids.ID, deposit *deposit.Deposit) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ModifyDeposit", depositTxID, deposit) +} + +// ModifyDeposit indicates an expected call of ModifyDeposit. +func (mr *MockStateMockRecorder) ModifyDeposit(depositTxID, deposit any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyDeposit", reflect.TypeOf((*MockState)(nil).ModifyDeposit), depositTxID, deposit) +} + +// ModifyProposal mocks base method. +func (m *MockState) ModifyProposal(proposalID ids.ID, proposal dac.ProposalState) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ModifyProposal", proposalID, proposal) +} + +// ModifyProposal indicates an expected call of ModifyProposal. +func (mr *MockStateMockRecorder) ModifyProposal(proposalID, proposal any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyProposal", reflect.TypeOf((*MockState)(nil).ModifyProposal), proposalID, proposal) +} + +// PruneAndIndex mocks base method. +func (m *MockState) PruneAndIndex(arg0 sync.Locker, arg1 logging.Logger) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PruneAndIndex", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// PruneAndIndex indicates an expected call of PruneAndIndex. +func (mr *MockStateMockRecorder) PruneAndIndex(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneAndIndex", reflect.TypeOf((*MockState)(nil).PruneAndIndex), arg0, arg1) +} + +// PutCurrentDelegator mocks base method. +func (m *MockState) PutCurrentDelegator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutCurrentDelegator", staker) +} + +// PutCurrentDelegator indicates an expected call of PutCurrentDelegator. +func (mr *MockStateMockRecorder) PutCurrentDelegator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentDelegator", reflect.TypeOf((*MockState)(nil).PutCurrentDelegator), staker) +} + +// PutCurrentValidator mocks base method. +func (m *MockState) PutCurrentValidator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutCurrentValidator", staker) +} + +// PutCurrentValidator indicates an expected call of PutCurrentValidator. +func (mr *MockStateMockRecorder) PutCurrentValidator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentValidator", reflect.TypeOf((*MockState)(nil).PutCurrentValidator), staker) +} + +// PutDeferredValidator mocks base method. +func (m *MockState) PutDeferredValidator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutDeferredValidator", staker) +} + +// PutDeferredValidator indicates an expected call of PutDeferredValidator. +func (mr *MockStateMockRecorder) PutDeferredValidator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutDeferredValidator", reflect.TypeOf((*MockState)(nil).PutDeferredValidator), staker) +} + +// PutPendingDelegator mocks base method. +func (m *MockState) PutPendingDelegator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutPendingDelegator", staker) +} + +// PutPendingDelegator indicates an expected call of PutPendingDelegator. +func (mr *MockStateMockRecorder) PutPendingDelegator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingDelegator", reflect.TypeOf((*MockState)(nil).PutPendingDelegator), staker) +} + +// PutPendingValidator mocks base method. +func (m *MockState) PutPendingValidator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutPendingValidator", staker) +} + +// PutPendingValidator indicates an expected call of PutPendingValidator. +func (mr *MockStateMockRecorder) PutPendingValidator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingValidator", reflect.TypeOf((*MockState)(nil).PutPendingValidator), staker) +} + +// RemoveDeposit mocks base method. +func (m *MockState) RemoveDeposit(depositTxID ids.ID, deposit *deposit.Deposit) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RemoveDeposit", depositTxID, deposit) +} + +// RemoveDeposit indicates an expected call of RemoveDeposit. +func (mr *MockStateMockRecorder) RemoveDeposit(depositTxID, deposit any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveDeposit", reflect.TypeOf((*MockState)(nil).RemoveDeposit), depositTxID, deposit) +} + +// RemoveProposal mocks base method. +func (m *MockState) RemoveProposal(proposalID ids.ID, proposal dac.ProposalState) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RemoveProposal", proposalID, proposal) +} + +// RemoveProposal indicates an expected call of RemoveProposal. +func (mr *MockStateMockRecorder) RemoveProposal(proposalID, proposal any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveProposal", reflect.TypeOf((*MockState)(nil).RemoveProposal), proposalID, proposal) +} + +// RemoveProposalIDToFinish mocks base method. +func (m *MockState) RemoveProposalIDToFinish(arg0 ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RemoveProposalIDToFinish", arg0) +} + +// RemoveProposalIDToFinish indicates an expected call of RemoveProposalIDToFinish. +func (mr *MockStateMockRecorder) RemoveProposalIDToFinish(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveProposalIDToFinish", reflect.TypeOf((*MockState)(nil).RemoveProposalIDToFinish), arg0) +} + +// SetAddressStates mocks base method. +func (m *MockState) SetAddressStates(arg0 ids.ShortID, arg1 addrstate.AddressState) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetAddressStates", arg0, arg1) +} + +// SetAddressStates indicates an expected call of SetAddressStates. +func (mr *MockStateMockRecorder) SetAddressStates(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAddressStates", reflect.TypeOf((*MockState)(nil).SetAddressStates), arg0, arg1) +} + +// SetBaseFee mocks base method. +func (m *MockState) SetBaseFee(arg0 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetBaseFee", arg0) +} + +// SetBaseFee indicates an expected call of SetBaseFee. +func (mr *MockStateMockRecorder) SetBaseFee(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBaseFee", reflect.TypeOf((*MockState)(nil).SetBaseFee), arg0) +} + +// SetClaimable mocks base method. +func (m *MockState) SetClaimable(ownerID ids.ID, claimable *Claimable) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetClaimable", ownerID, claimable) +} + +// SetClaimable indicates an expected call of SetClaimable. +func (mr *MockStateMockRecorder) SetClaimable(ownerID, claimable any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetClaimable", reflect.TypeOf((*MockState)(nil).SetClaimable), ownerID, claimable) +} + +// SetCurrentSupply mocks base method. +func (m *MockState) SetCurrentSupply(subnetID ids.ID, cs uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetCurrentSupply", subnetID, cs) +} + +// SetCurrentSupply indicates an expected call of SetCurrentSupply. +func (mr *MockStateMockRecorder) SetCurrentSupply(subnetID, cs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSupply", reflect.TypeOf((*MockState)(nil).SetCurrentSupply), subnetID, cs) +} + +// SetDelegateeReward mocks base method. +func (m *MockState) SetDelegateeReward(subnetID ids.ID, nodeID ids.NodeID, amount uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetDelegateeReward", subnetID, nodeID, amount) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetDelegateeReward indicates an expected call of SetDelegateeReward. +func (mr *MockStateMockRecorder) SetDelegateeReward(subnetID, nodeID, amount any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDelegateeReward", reflect.TypeOf((*MockState)(nil).SetDelegateeReward), subnetID, nodeID, amount) +} + +// SetDepositOffer mocks base method. +func (m *MockState) SetDepositOffer(offer *deposit.Offer) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetDepositOffer", offer) +} + +// SetDepositOffer indicates an expected call of SetDepositOffer. +func (mr *MockStateMockRecorder) SetDepositOffer(offer any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDepositOffer", reflect.TypeOf((*MockState)(nil).SetDepositOffer), offer) +} + +// SetFeeDistribution mocks base method. +func (m *MockState) SetFeeDistribution(arg0 [3]uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetFeeDistribution", arg0) +} + +// SetFeeDistribution indicates an expected call of SetFeeDistribution. +func (mr *MockStateMockRecorder) SetFeeDistribution(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFeeDistribution", reflect.TypeOf((*MockState)(nil).SetFeeDistribution), arg0) +} + +// SetHeight mocks base method. +func (m *MockState) SetHeight(height uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetHeight", height) +} + +// SetHeight indicates an expected call of SetHeight. +func (mr *MockStateMockRecorder) SetHeight(height any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeight", reflect.TypeOf((*MockState)(nil).SetHeight), height) +} + +// SetLastAccepted mocks base method. +func (m *MockState) SetLastAccepted(blkID ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetLastAccepted", blkID) +} + +// SetLastAccepted indicates an expected call of SetLastAccepted. +func (mr *MockStateMockRecorder) SetLastAccepted(blkID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockState)(nil).SetLastAccepted), blkID) +} + +// SetMultisigAlias mocks base method. +func (m *MockState) SetMultisigAlias(arg0 ids.ShortID, arg1 *multisig.AliasWithNonce) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetMultisigAlias", arg0, arg1) +} + +// SetMultisigAlias indicates an expected call of SetMultisigAlias. +func (mr *MockStateMockRecorder) SetMultisigAlias(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMultisigAlias", reflect.TypeOf((*MockState)(nil).SetMultisigAlias), arg0, arg1) +} + +// SetNotDistributedValidatorReward mocks base method. +func (m *MockState) SetNotDistributedValidatorReward(reward uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetNotDistributedValidatorReward", reward) +} + +// SetNotDistributedValidatorReward indicates an expected call of SetNotDistributedValidatorReward. +func (mr *MockStateMockRecorder) SetNotDistributedValidatorReward(reward any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNotDistributedValidatorReward", reflect.TypeOf((*MockState)(nil).SetNotDistributedValidatorReward), reward) +} + +// SetShortIDLink mocks base method. +func (m *MockState) SetShortIDLink(id ids.ShortID, key ShortLinkKey, link *ids.ShortID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetShortIDLink", id, key, link) +} + +// SetShortIDLink indicates an expected call of SetShortIDLink. +func (mr *MockStateMockRecorder) SetShortIDLink(id, key, link any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetShortIDLink", reflect.TypeOf((*MockState)(nil).SetShortIDLink), id, key, link) +} + +// SetSubnetOwner mocks base method. +func (m *MockState) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSubnetOwner", subnetID, owner) +} + +// SetSubnetOwner indicates an expected call of SetSubnetOwner. +func (mr *MockStateMockRecorder) SetSubnetOwner(subnetID, owner any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetOwner", reflect.TypeOf((*MockState)(nil).SetSubnetOwner), subnetID, owner) +} + +// SetTimestamp mocks base method. +func (m *MockState) SetTimestamp(tm time.Time) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTimestamp", tm) +} + +// SetTimestamp indicates an expected call of SetTimestamp. +func (mr *MockStateMockRecorder) SetTimestamp(tm any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockState)(nil).SetTimestamp), tm) +} + +// SetUptime mocks base method. +func (m *MockState) SetUptime(nodeID ids.NodeID, subnetID ids.ID, upDuration time.Duration, lastUpdated time.Time) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetUptime", nodeID, subnetID, upDuration, lastUpdated) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetUptime indicates an expected call of SetUptime. +func (mr *MockStateMockRecorder) SetUptime(nodeID, subnetID, upDuration, lastUpdated any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetUptime", reflect.TypeOf((*MockState)(nil).SetUptime), nodeID, subnetID, upDuration, lastUpdated) +} + +// ShouldPrune mocks base method. +func (m *MockState) ShouldPrune() (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ShouldPrune") + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ShouldPrune indicates an expected call of ShouldPrune. +func (mr *MockStateMockRecorder) ShouldPrune() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldPrune", reflect.TypeOf((*MockState)(nil).ShouldPrune)) +} + +// UTXOIDs mocks base method. +func (m *MockState) UTXOIDs(addr []byte, previous ids.ID, limit int) ([]ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UTXOIDs", addr, previous, limit) + ret0, _ := ret[0].([]ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UTXOIDs indicates an expected call of UTXOIDs. +func (mr *MockStateMockRecorder) UTXOIDs(addr, previous, limit any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UTXOIDs", reflect.TypeOf((*MockState)(nil).UTXOIDs), addr, previous, limit) +} + +// MockDiff is a mock of Diff interface. +type MockDiff struct { + ctrl *gomock.Controller + recorder *MockDiffMockRecorder +} + +// MockDiffMockRecorder is the mock recorder for MockDiff. +type MockDiffMockRecorder struct { + mock *MockDiff +} + +// NewMockDiff creates a new mock instance. +func NewMockDiff(ctrl *gomock.Controller) *MockDiff { + mock := &MockDiff{ctrl: ctrl} + mock.recorder = &MockDiffMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDiff) EXPECT() *MockDiffMockRecorder { + return m.recorder +} + +// AddChain mocks base method. +func (m *MockDiff) AddChain(createChainTx *txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddChain", createChainTx) +} + +// AddChain indicates an expected call of AddChain. +func (mr *MockDiffMockRecorder) AddChain(createChainTx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockDiff)(nil).AddChain), createChainTx) +} + +// AddDeposit mocks base method. +func (m *MockDiff) AddDeposit(depositTxID ids.ID, deposit *deposit.Deposit) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddDeposit", depositTxID, deposit) +} + +// AddDeposit indicates an expected call of AddDeposit. +func (mr *MockDiffMockRecorder) AddDeposit(depositTxID, deposit any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDeposit", reflect.TypeOf((*MockDiff)(nil).AddDeposit), depositTxID, deposit) +} + +// AddProposal mocks base method. +func (m *MockDiff) AddProposal(proposalID ids.ID, proposal dac.ProposalState) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddProposal", proposalID, proposal) +} + +// AddProposal indicates an expected call of AddProposal. +func (mr *MockDiffMockRecorder) AddProposal(proposalID, proposal any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProposal", reflect.TypeOf((*MockDiff)(nil).AddProposal), proposalID, proposal) +} + +// AddProposalIDToFinish mocks base method. +func (m *MockDiff) AddProposalIDToFinish(proposalID ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddProposalIDToFinish", proposalID) +} + +// AddProposalIDToFinish indicates an expected call of AddProposalIDToFinish. +func (mr *MockDiffMockRecorder) AddProposalIDToFinish(proposalID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProposalIDToFinish", reflect.TypeOf((*MockDiff)(nil).AddProposalIDToFinish), proposalID) +} + +// AddRewardUTXO mocks base method. +func (m *MockDiff) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddRewardUTXO", txID, utxo) +} + +// AddRewardUTXO indicates an expected call of AddRewardUTXO. +func (mr *MockDiffMockRecorder) AddRewardUTXO(txID, utxo any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRewardUTXO", reflect.TypeOf((*MockDiff)(nil).AddRewardUTXO), txID, utxo) +} + +// AddSubnet mocks base method. +func (m *MockDiff) AddSubnet(createSubnetTx *txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddSubnet", createSubnetTx) +} + +// AddSubnet indicates an expected call of AddSubnet. +func (mr *MockDiffMockRecorder) AddSubnet(createSubnetTx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnet", reflect.TypeOf((*MockDiff)(nil).AddSubnet), createSubnetTx) +} + +// AddSubnetTransformation mocks base method. +func (m *MockDiff) AddSubnetTransformation(transformSubnetTx *txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddSubnetTransformation", transformSubnetTx) +} + +// AddSubnetTransformation indicates an expected call of AddSubnetTransformation. +func (mr *MockDiffMockRecorder) AddSubnetTransformation(transformSubnetTx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnetTransformation", reflect.TypeOf((*MockDiff)(nil).AddSubnetTransformation), transformSubnetTx) +} + +// AddTx mocks base method. +func (m *MockDiff) AddTx(tx *txs.Tx, status status.Status) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddTx", tx, status) +} + +// AddTx indicates an expected call of AddTx. +func (mr *MockDiffMockRecorder) AddTx(tx, status any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockDiff)(nil).AddTx), tx, status) +} + +// AddUTXO mocks base method. +func (m *MockDiff) AddUTXO(utxo *avax.UTXO) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddUTXO", utxo) +} + +// AddUTXO indicates an expected call of AddUTXO. +func (mr *MockDiffMockRecorder) AddUTXO(utxo any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockDiff)(nil).AddUTXO), utxo) +} + +// Apply mocks base method. +func (m *MockDiff) Apply(arg0 Chain) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Apply", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Apply indicates an expected call of Apply. +func (mr *MockDiffMockRecorder) Apply(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockDiff)(nil).Apply), arg0) +} + +// ApplyCaminoState mocks base method. +func (m *MockDiff) ApplyCaminoState(arg0 Chain) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApplyCaminoState", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// ApplyCaminoState indicates an expected call of ApplyCaminoState. +func (mr *MockDiffMockRecorder) ApplyCaminoState(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyCaminoState", reflect.TypeOf((*MockDiff)(nil).ApplyCaminoState), arg0) +} + +// CaminoConfig mocks base method. +func (m *MockDiff) CaminoConfig() (*CaminoConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CaminoConfig") + ret0, _ := ret[0].(*CaminoConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CaminoConfig indicates an expected call of CaminoConfig. +func (mr *MockDiffMockRecorder) CaminoConfig() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CaminoConfig", reflect.TypeOf((*MockDiff)(nil).CaminoConfig)) +} + +// DeleteCurrentDelegator mocks base method. +func (m *MockDiff) DeleteCurrentDelegator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteCurrentDelegator", staker) +} + +// DeleteCurrentDelegator indicates an expected call of DeleteCurrentDelegator. +func (mr *MockDiffMockRecorder) DeleteCurrentDelegator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentDelegator", reflect.TypeOf((*MockDiff)(nil).DeleteCurrentDelegator), staker) +} + +// DeleteCurrentValidator mocks base method. +func (m *MockDiff) DeleteCurrentValidator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteCurrentValidator", staker) +} + +// DeleteCurrentValidator indicates an expected call of DeleteCurrentValidator. +func (mr *MockDiffMockRecorder) DeleteCurrentValidator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentValidator", reflect.TypeOf((*MockDiff)(nil).DeleteCurrentValidator), staker) +} + +// DeleteDeferredValidator mocks base method. +func (m *MockDiff) DeleteDeferredValidator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteDeferredValidator", staker) +} + +// DeleteDeferredValidator indicates an expected call of DeleteDeferredValidator. +func (mr *MockDiffMockRecorder) DeleteDeferredValidator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteDeferredValidator", reflect.TypeOf((*MockDiff)(nil).DeleteDeferredValidator), staker) +} + +// DeletePendingDelegator mocks base method. +func (m *MockDiff) DeletePendingDelegator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeletePendingDelegator", staker) +} + +// DeletePendingDelegator indicates an expected call of DeletePendingDelegator. +func (mr *MockDiffMockRecorder) DeletePendingDelegator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingDelegator", reflect.TypeOf((*MockDiff)(nil).DeletePendingDelegator), staker) +} + +// DeletePendingValidator mocks base method. +func (m *MockDiff) DeletePendingValidator(staker *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeletePendingValidator", staker) +} + +// DeletePendingValidator indicates an expected call of DeletePendingValidator. +func (mr *MockDiffMockRecorder) DeletePendingValidator(staker any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingValidator", reflect.TypeOf((*MockDiff)(nil).DeletePendingValidator), staker) +} + +// DeleteUTXO mocks base method. +func (m *MockDiff) DeleteUTXO(utxoID ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteUTXO", utxoID) +} + +// DeleteUTXO indicates an expected call of DeleteUTXO. +func (mr *MockDiffMockRecorder) DeleteUTXO(utxoID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockDiff)(nil).DeleteUTXO), utxoID) +} + +// GetAddressStates mocks base method. +func (m *MockDiff) GetAddressStates(arg0 ids.ShortID) (addrstate.AddressState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAddressStates", arg0) + ret0, _ := ret[0].(addrstate.AddressState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAddressStates indicates an expected call of GetAddressStates. +func (mr *MockDiffMockRecorder) GetAddressStates(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAddressStates", reflect.TypeOf((*MockDiff)(nil).GetAddressStates), arg0) +} + +// GetAllDepositOffers mocks base method. +func (m *MockDiff) GetAllDepositOffers() ([]*deposit.Offer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllDepositOffers") + ret0, _ := ret[0].([]*deposit.Offer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAllDepositOffers indicates an expected call of GetAllDepositOffers. +func (mr *MockDiffMockRecorder) GetAllDepositOffers() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllDepositOffers", reflect.TypeOf((*MockDiff)(nil).GetAllDepositOffers)) +} + +// GetBaseFee mocks base method. +func (m *MockDiff) GetBaseFee() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBaseFee") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBaseFee indicates an expected call of GetBaseFee. +func (mr *MockDiffMockRecorder) GetBaseFee() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBaseFee", reflect.TypeOf((*MockDiff)(nil).GetBaseFee)) +} + +// GetClaimable mocks base method. +func (m *MockDiff) GetClaimable(ownerID ids.ID) (*Claimable, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClaimable", ownerID) + ret0, _ := ret[0].(*Claimable) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetClaimable indicates an expected call of GetClaimable. +func (mr *MockDiffMockRecorder) GetClaimable(ownerID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClaimable", reflect.TypeOf((*MockDiff)(nil).GetClaimable), ownerID) +} + +// GetCurrentDelegatorIterator mocks base method. +func (m *MockDiff) GetCurrentDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentDelegatorIterator", subnetID, nodeID) + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentDelegatorIterator indicates an expected call of GetCurrentDelegatorIterator. +func (mr *MockDiffMockRecorder) GetCurrentDelegatorIterator(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentDelegatorIterator", reflect.TypeOf((*MockDiff)(nil).GetCurrentDelegatorIterator), subnetID, nodeID) +} + +// GetCurrentStakerIterator mocks base method. +func (m *MockDiff) GetCurrentStakerIterator() (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentStakerIterator") + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentStakerIterator indicates an expected call of GetCurrentStakerIterator. +func (mr *MockDiffMockRecorder) GetCurrentStakerIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentStakerIterator", reflect.TypeOf((*MockDiff)(nil).GetCurrentStakerIterator)) +} + +// GetCurrentSupply mocks base method. +func (m *MockDiff) GetCurrentSupply(subnetID ids.ID) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentSupply", subnetID) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentSupply indicates an expected call of GetCurrentSupply. +func (mr *MockDiffMockRecorder) GetCurrentSupply(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSupply", reflect.TypeOf((*MockDiff)(nil).GetCurrentSupply), subnetID) +} + +// GetCurrentValidator mocks base method. +func (m *MockDiff) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentValidator", subnetID, nodeID) + ret0, _ := ret[0].(*Staker) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentValidator indicates an expected call of GetCurrentValidator. +func (mr *MockDiffMockRecorder) GetCurrentValidator(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidator", reflect.TypeOf((*MockDiff)(nil).GetCurrentValidator), subnetID, nodeID) +} + +// GetDeferredStakerIterator mocks base method. +func (m *MockDiff) GetDeferredStakerIterator() (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeferredStakerIterator") + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDeferredStakerIterator indicates an expected call of GetDeferredStakerIterator. +func (mr *MockDiffMockRecorder) GetDeferredStakerIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeferredStakerIterator", reflect.TypeOf((*MockDiff)(nil).GetDeferredStakerIterator)) +} + +// GetDeferredValidator mocks base method. +func (m *MockDiff) GetDeferredValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeferredValidator", subnetID, nodeID) + ret0, _ := ret[0].(*Staker) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDeferredValidator indicates an expected call of GetDeferredValidator. +func (mr *MockDiffMockRecorder) GetDeferredValidator(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeferredValidator", reflect.TypeOf((*MockDiff)(nil).GetDeferredValidator), subnetID, nodeID) +} + +// GetDelegateeReward mocks base method. +func (m *MockDiff) GetDelegateeReward(subnetID ids.ID, nodeID ids.NodeID) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDelegateeReward", subnetID, nodeID) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDelegateeReward indicates an expected call of GetDelegateeReward. +func (mr *MockDiffMockRecorder) GetDelegateeReward(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockDiff)(nil).GetDelegateeReward), subnetID, nodeID) +} + +// GetDeposit mocks base method. +func (m *MockDiff) GetDeposit(depositTxID ids.ID) (*deposit.Deposit, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeposit", depositTxID) + ret0, _ := ret[0].(*deposit.Deposit) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDeposit indicates an expected call of GetDeposit. +func (mr *MockDiffMockRecorder) GetDeposit(depositTxID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeposit", reflect.TypeOf((*MockDiff)(nil).GetDeposit), depositTxID) +} + +// GetDepositOffer mocks base method. +func (m *MockDiff) GetDepositOffer(offerID ids.ID) (*deposit.Offer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDepositOffer", offerID) + ret0, _ := ret[0].(*deposit.Offer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDepositOffer indicates an expected call of GetDepositOffer. +func (mr *MockDiffMockRecorder) GetDepositOffer(offerID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDepositOffer", reflect.TypeOf((*MockDiff)(nil).GetDepositOffer), offerID) +} + +// GetFeeDistribution mocks base method. +func (m *MockDiff) GetFeeDistribution() ([dac.FeeDistributionFractionsCount]uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFeeDistribution") + ret0, _ := ret[0].([dac.FeeDistributionFractionsCount]uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFeeDistribution indicates an expected call of GetFeeDistribution. +func (mr *MockDiffMockRecorder) GetFeeDistribution() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFeeDistribution", reflect.TypeOf((*MockDiff)(nil).GetFeeDistribution)) +} + +// GetMultisigAlias mocks base method. +func (m *MockDiff) GetMultisigAlias(arg0 ids.ShortID) (*multisig.AliasWithNonce, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMultisigAlias", arg0) + ret0, _ := ret[0].(*multisig.AliasWithNonce) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMultisigAlias indicates an expected call of GetMultisigAlias. +func (mr *MockDiffMockRecorder) GetMultisigAlias(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMultisigAlias", reflect.TypeOf((*MockDiff)(nil).GetMultisigAlias), arg0) +} + +// GetNextProposalExpirationTime mocks base method. +func (m *MockDiff) GetNextProposalExpirationTime(removedProposalIDs set.Set[ids.ID]) (time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNextProposalExpirationTime", removedProposalIDs) + ret0, _ := ret[0].(time.Time) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNextProposalExpirationTime indicates an expected call of GetNextProposalExpirationTime. +func (mr *MockDiffMockRecorder) GetNextProposalExpirationTime(removedProposalIDs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextProposalExpirationTime", reflect.TypeOf((*MockDiff)(nil).GetNextProposalExpirationTime), removedProposalIDs) +} + +// GetNextToExpireProposalIDsAndTime mocks base method. +func (m *MockDiff) GetNextToExpireProposalIDsAndTime(removedProposalIDs set.Set[ids.ID]) ([]ids.ID, time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNextToExpireProposalIDsAndTime", removedProposalIDs) + ret0, _ := ret[0].([]ids.ID) + ret1, _ := ret[1].(time.Time) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetNextToExpireProposalIDsAndTime indicates an expected call of GetNextToExpireProposalIDsAndTime. +func (mr *MockDiffMockRecorder) GetNextToExpireProposalIDsAndTime(removedProposalIDs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToExpireProposalIDsAndTime", reflect.TypeOf((*MockDiff)(nil).GetNextToExpireProposalIDsAndTime), removedProposalIDs) +} + +// GetNextToUnlockDepositIDsAndTime mocks base method. +func (m *MockDiff) GetNextToUnlockDepositIDsAndTime(removedDepositIDs set.Set[ids.ID]) ([]ids.ID, time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNextToUnlockDepositIDsAndTime", removedDepositIDs) + ret0, _ := ret[0].([]ids.ID) + ret1, _ := ret[1].(time.Time) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetNextToUnlockDepositIDsAndTime indicates an expected call of GetNextToUnlockDepositIDsAndTime. +func (mr *MockDiffMockRecorder) GetNextToUnlockDepositIDsAndTime(removedDepositIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastAccepted", reflect.TypeOf((*MockState)(nil).GetLastAccepted)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToUnlockDepositIDsAndTime", reflect.TypeOf((*MockDiff)(nil).GetNextToUnlockDepositIDsAndTime), removedDepositIDs) } -// GetMultisigAlias mocks base method. -func (m *MockState) GetMultisigAlias(arg0 ids.ShortID) (*multisig.AliasWithNonce, error) { +// GetNextToUnlockDepositTime mocks base method. +func (m *MockDiff) GetNextToUnlockDepositTime(removedDepositIDs set.Set[ids.ID]) (time.Time, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMultisigAlias", arg0) - ret0, _ := ret[0].(*multisig.AliasWithNonce) + ret := m.ctrl.Call(m, "GetNextToUnlockDepositTime", removedDepositIDs) + ret0, _ := ret[0].(time.Time) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetMultisigAlias indicates an expected call of GetMultisigAlias. -func (mr *MockStateMockRecorder) GetMultisigAlias(arg0 interface{}) *gomock.Call { +// GetNextToUnlockDepositTime indicates an expected call of GetNextToUnlockDepositTime. +func (mr *MockDiffMockRecorder) GetNextToUnlockDepositTime(removedDepositIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMultisigAlias", reflect.TypeOf((*MockState)(nil).GetMultisigAlias), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextToUnlockDepositTime", reflect.TypeOf((*MockDiff)(nil).GetNextToUnlockDepositTime), removedDepositIDs) } // GetNotDistributedValidatorReward mocks base method. -func (m *MockState) GetNotDistributedValidatorReward() (uint64, error) { +func (m *MockDiff) GetNotDistributedValidatorReward() (uint64, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetNotDistributedValidatorReward") ret0, _ := ret[0].(uint64) @@ -708,28 +2849,28 @@ func (m *MockState) GetNotDistributedValidatorReward() (uint64, error) { } // GetNotDistributedValidatorReward indicates an expected call of GetNotDistributedValidatorReward. -func (mr *MockStateMockRecorder) GetNotDistributedValidatorReward() *gomock.Call { +func (mr *MockDiffMockRecorder) GetNotDistributedValidatorReward() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNotDistributedValidatorReward", reflect.TypeOf((*MockState)(nil).GetNotDistributedValidatorReward)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNotDistributedValidatorReward", reflect.TypeOf((*MockDiff)(nil).GetNotDistributedValidatorReward)) } // GetPendingDelegatorIterator mocks base method. -func (m *MockState) GetPendingDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { +func (m *MockDiff) GetPendingDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPendingDelegatorIterator", arg0, arg1) + ret := m.ctrl.Call(m, "GetPendingDelegatorIterator", subnetID, nodeID) ret0, _ := ret[0].(StakerIterator) ret1, _ := ret[1].(error) return ret0, ret1 } // GetPendingDelegatorIterator indicates an expected call of GetPendingDelegatorIterator. -func (mr *MockStateMockRecorder) GetPendingDelegatorIterator(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) GetPendingDelegatorIterator(subnetID, nodeID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingDelegatorIterator", reflect.TypeOf((*MockState)(nil).GetPendingDelegatorIterator), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingDelegatorIterator", reflect.TypeOf((*MockDiff)(nil).GetPendingDelegatorIterator), subnetID, nodeID) } // GetPendingStakerIterator mocks base method. -func (m *MockState) GetPendingStakerIterator() (StakerIterator, error) { +func (m *MockDiff) GetPendingStakerIterator() (StakerIterator, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetPendingStakerIterator") ret0, _ := ret[0].(StakerIterator) @@ -738,187 +2879,118 @@ func (m *MockState) GetPendingStakerIterator() (StakerIterator, error) { } // GetPendingStakerIterator indicates an expected call of GetPendingStakerIterator. -func (mr *MockStateMockRecorder) GetPendingStakerIterator() *gomock.Call { +func (mr *MockDiffMockRecorder) GetPendingStakerIterator() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingStakerIterator", reflect.TypeOf((*MockState)(nil).GetPendingStakerIterator)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingStakerIterator", reflect.TypeOf((*MockDiff)(nil).GetPendingStakerIterator)) } // GetPendingValidator mocks base method. -func (m *MockState) GetPendingValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { +func (m *MockDiff) GetPendingValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPendingValidator", arg0, arg1) + ret := m.ctrl.Call(m, "GetPendingValidator", subnetID, nodeID) ret0, _ := ret[0].(*Staker) ret1, _ := ret[1].(error) return ret0, ret1 } // GetPendingValidator indicates an expected call of GetPendingValidator. -func (mr *MockStateMockRecorder) GetPendingValidator(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) GetPendingValidator(subnetID, nodeID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockState)(nil).GetPendingValidator), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockDiff)(nil).GetPendingValidator), subnetID, nodeID) } -// GetDeferredStakerIterator mocks base method. -func (m *MockState) GetDeferredStakerIterator() (StakerIterator, error) { +// GetProposal mocks base method. +func (m *MockDiff) GetProposal(proposalID ids.ID) (dac.ProposalState, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDeferredStakerIterator") - ret0, _ := ret[0].(StakerIterator) + ret := m.ctrl.Call(m, "GetProposal", proposalID) + ret0, _ := ret[0].(dac.ProposalState) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetDeferredStakerIterator indicates an expected call of GetDeferredStakerIterator. -func (mr *MockStateMockRecorder) GetDeferredStakerIterator() *gomock.Call { +// GetProposal indicates an expected call of GetProposal. +func (mr *MockDiffMockRecorder) GetProposal(proposalID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeferredStakerIterator", reflect.TypeOf((*MockState)(nil).GetDeferredStakerIterator)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposal", reflect.TypeOf((*MockDiff)(nil).GetProposal), proposalID) } -// GetDeferredValidator mocks base method. -func (m *MockState) GetDeferredValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { +// GetProposalIDsToFinish mocks base method. +func (m *MockDiff) GetProposalIDsToFinish() ([]ids.ID, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDeferredValidator", arg0, arg1) - ret0, _ := ret[0].(*Staker) + ret := m.ctrl.Call(m, "GetProposalIDsToFinish") + ret0, _ := ret[0].([]ids.ID) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetDeferredValidator indicates an expected call of GetDeferredValidator. -func (mr *MockStateMockRecorder) GetDeferredValidator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeferredValidator", reflect.TypeOf((*MockState)(nil).GetDeferredValidator), arg0, arg1) -} - -// DeleteDeferredValidator mocks base method. -func (m *MockState) DeleteDeferredValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteDeferredValidator", arg0) -} - -// DeleteDeferredValidator indicates an expected call of DeleteDeferredValidator. -func (mr *MockStateMockRecorder) DeleteDeferredValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteDeferredValidator", reflect.TypeOf((*MockState)(nil).DeleteDeferredValidator), arg0) -} - -// PutDeferredValidator mocks base method. -func (m *MockState) PutDeferredValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutDeferredValidator", arg0) -} - -// PutDeferredValidator indicates an expected call of PutDeferredValidator. -func (mr *MockStateMockRecorder) PutDeferredValidator(arg0 interface{}) *gomock.Call { +// GetProposalIDsToFinish indicates an expected call of GetProposalIDsToFinish. +func (mr *MockDiffMockRecorder) GetProposalIDsToFinish() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutDeferredValidator", reflect.TypeOf((*MockState)(nil).PutDeferredValidator), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposalIDsToFinish", reflect.TypeOf((*MockDiff)(nil).GetProposalIDsToFinish)) } -// GetRewardUTXOs mocks base method. -func (m *MockState) GetRewardUTXOs(arg0 ids.ID) ([]*avax.UTXO, error) { +// GetProposalIterator mocks base method. +func (m *MockDiff) GetProposalIterator() (ProposalsIterator, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRewardUTXOs", arg0) - ret0, _ := ret[0].([]*avax.UTXO) + ret := m.ctrl.Call(m, "GetProposalIterator") + ret0, _ := ret[0].(ProposalsIterator) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetRewardUTXOs indicates an expected call of GetRewardUTXOs. -func (mr *MockStateMockRecorder) GetRewardUTXOs(arg0 interface{}) *gomock.Call { +// GetProposalIterator indicates an expected call of GetProposalIterator. +func (mr *MockDiffMockRecorder) GetProposalIterator() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRewardUTXOs", reflect.TypeOf((*MockState)(nil).GetRewardUTXOs), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposalIterator", reflect.TypeOf((*MockDiff)(nil).GetProposalIterator)) } // GetShortIDLink mocks base method. -func (m *MockState) GetShortIDLink(arg0 ids.ShortID, arg1 ShortLinkKey) (ids.ShortID, error) { +func (m *MockDiff) GetShortIDLink(id ids.ShortID, key ShortLinkKey) (ids.ShortID, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetShortIDLink", arg0, arg1) + ret := m.ctrl.Call(m, "GetShortIDLink", id, key) ret0, _ := ret[0].(ids.ShortID) ret1, _ := ret[1].(error) return ret0, ret1 } // GetShortIDLink indicates an expected call of GetShortIDLink. -func (mr *MockStateMockRecorder) GetShortIDLink(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShortIDLink", reflect.TypeOf((*MockState)(nil).GetShortIDLink), arg0, arg1) -} - -// GetStartTime mocks base method. -func (m *MockState) GetStartTime(arg0 ids.NodeID, arg1 ids.ID) (time.Time, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStartTime", arg0, arg1) - ret0, _ := ret[0].(time.Time) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetStartTime indicates an expected call of GetStartTime. -func (mr *MockStateMockRecorder) GetStartTime(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStartTime", reflect.TypeOf((*MockState)(nil).GetStartTime), arg0, arg1) -} - -// GetStatelessBlock mocks base method. -func (m *MockState) GetStatelessBlock(arg0 ids.ID) (block.Block, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStatelessBlock", arg0) - ret0, _ := ret[0].(block.Block) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetStatelessBlock indicates an expected call of GetStatelessBlock. -func (mr *MockStateMockRecorder) GetStatelessBlock(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) GetShortIDLink(id, key any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockState)(nil).GetStatelessBlock), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShortIDLink", reflect.TypeOf((*MockDiff)(nil).GetShortIDLink), id, key) } // GetSubnetOwner mocks base method. -func (m *MockState) GetSubnetOwner(arg0 ids.ID) (fx.Owner, error) { +func (m *MockDiff) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnetOwner", arg0) + ret := m.ctrl.Call(m, "GetSubnetOwner", subnetID) ret0, _ := ret[0].(fx.Owner) ret1, _ := ret[1].(error) return ret0, ret1 } // GetSubnetOwner indicates an expected call of GetSubnetOwner. -func (mr *MockStateMockRecorder) GetSubnetOwner(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) GetSubnetOwner(subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetOwner", reflect.TypeOf((*MockState)(nil).GetSubnetOwner), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetOwner", reflect.TypeOf((*MockDiff)(nil).GetSubnetOwner), subnetID) } // GetSubnetTransformation mocks base method. -func (m *MockState) GetSubnetTransformation(arg0 ids.ID) (*txs.Tx, error) { +func (m *MockDiff) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnetTransformation", arg0) + ret := m.ctrl.Call(m, "GetSubnetTransformation", subnetID) ret0, _ := ret[0].(*txs.Tx) ret1, _ := ret[1].(error) return ret0, ret1 } // GetSubnetTransformation indicates an expected call of GetSubnetTransformation. -func (mr *MockStateMockRecorder) GetSubnetTransformation(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetTransformation", reflect.TypeOf((*MockState)(nil).GetSubnetTransformation), arg0) -} - -// GetSubnets mocks base method. -func (m *MockState) GetSubnets() ([]*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnets") - ret0, _ := ret[0].([]*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSubnets indicates an expected call of GetSubnets. -func (mr *MockStateMockRecorder) GetSubnets() *gomock.Call { +func (mr *MockDiffMockRecorder) GetSubnetTransformation(subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnets", reflect.TypeOf((*MockState)(nil).GetSubnets)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetTransformation", reflect.TypeOf((*MockDiff)(nil).GetSubnetTransformation), subnetID) } // GetTimestamp mocks base method. -func (m *MockState) GetTimestamp() time.Time { +func (m *MockDiff) GetTimestamp() time.Time { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetTimestamp") ret0, _ := ret[0].(time.Time) @@ -926,15 +2998,15 @@ func (m *MockState) GetTimestamp() time.Time { } // GetTimestamp indicates an expected call of GetTimestamp. -func (mr *MockStateMockRecorder) GetTimestamp() *gomock.Call { +func (mr *MockDiffMockRecorder) GetTimestamp() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTimestamp", reflect.TypeOf((*MockState)(nil).GetTimestamp)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTimestamp", reflect.TypeOf((*MockDiff)(nil).GetTimestamp)) } // GetTx mocks base method. -func (m *MockState) GetTx(arg0 ids.ID) (*txs.Tx, status.Status, error) { +func (m *MockDiff) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTx", arg0) + ret := m.ctrl.Call(m, "GetTx", txID) ret0, _ := ret[0].(*txs.Tx) ret1, _ := ret[1].(status.Status) ret2, _ := ret[2].(error) @@ -942,58 +3014,28 @@ func (m *MockState) GetTx(arg0 ids.ID) (*txs.Tx, status.Status, error) { } // GetTx indicates an expected call of GetTx. -func (mr *MockStateMockRecorder) GetTx(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) GetTx(txID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockState)(nil).GetTx), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockDiff)(nil).GetTx), txID) } // GetUTXO mocks base method. -func (m *MockState) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { +func (m *MockDiff) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUTXO", arg0) + ret := m.ctrl.Call(m, "GetUTXO", utxoID) ret0, _ := ret[0].(*avax.UTXO) ret1, _ := ret[1].(error) return ret0, ret1 } // GetUTXO indicates an expected call of GetUTXO. -func (mr *MockStateMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockState)(nil).GetUTXO), arg0) -} - -// GetUptime mocks base method. -func (m *MockState) GetUptime(arg0 ids.NodeID, arg1 ids.ID) (time.Duration, time.Time, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUptime", arg0, arg1) - ret0, _ := ret[0].(time.Duration) - ret1, _ := ret[1].(time.Time) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetUptime indicates an expected call of GetUptime. -func (mr *MockStateMockRecorder) GetUptime(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUptime", reflect.TypeOf((*MockState)(nil).GetUptime), arg0, arg1) -} - -// PruneAndIndex mocks base method. -func (m *MockState) PruneAndIndex(arg0 sync.Locker, arg1 logging.Logger) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PruneAndIndex", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// PruneAndIndex indicates an expected call of PruneAndIndex. -func (mr *MockStateMockRecorder) PruneAndIndex(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) GetUTXO(utxoID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneAndIndex", reflect.TypeOf((*MockState)(nil).PruneAndIndex), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockDiff)(nil).GetUTXO), utxoID) } // LockedUTXOs mocks base method. -func (m *MockState) LockedUTXOs(arg0 set.Set[ids.ID], arg1 set.Set[ids.ShortID], arg2 locked.State) ([]*avax.UTXO, error) { +func (m *MockDiff) LockedUTXOs(arg0 set.Set[ids.ID], arg1 set.Set[ids.ShortID], arg2 locked.State) ([]*avax.UTXO, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LockedUTXOs", arg0, arg1, arg2) ret0, _ := ret[0].([]*avax.UTXO) @@ -1002,353 +3044,311 @@ func (m *MockState) LockedUTXOs(arg0 set.Set[ids.ID], arg1 set.Set[ids.ShortID], } // LockedUTXOs indicates an expected call of LockedUTXOs. -func (mr *MockStateMockRecorder) LockedUTXOs(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) LockedUTXOs(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LockedUTXOs", reflect.TypeOf((*MockState)(nil).LockedUTXOs), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LockedUTXOs", reflect.TypeOf((*MockDiff)(nil).LockedUTXOs), arg0, arg1, arg2) } -// PutCurrentDelegator mocks base method. -func (m *MockState) PutCurrentDelegator(arg0 *Staker) { +// ModifyDeposit mocks base method. +func (m *MockDiff) ModifyDeposit(depositTxID ids.ID, deposit *deposit.Deposit) { m.ctrl.T.Helper() - m.ctrl.Call(m, "PutCurrentDelegator", arg0) + m.ctrl.Call(m, "ModifyDeposit", depositTxID, deposit) } -// PutCurrentDelegator indicates an expected call of PutCurrentDelegator. -func (mr *MockStateMockRecorder) PutCurrentDelegator(arg0 interface{}) *gomock.Call { +// ModifyDeposit indicates an expected call of ModifyDeposit. +func (mr *MockDiffMockRecorder) ModifyDeposit(depositTxID, deposit any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentDelegator", reflect.TypeOf((*MockState)(nil).PutCurrentDelegator), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyDeposit", reflect.TypeOf((*MockDiff)(nil).ModifyDeposit), depositTxID, deposit) } -// PutCurrentValidator mocks base method. -func (m *MockState) PutCurrentValidator(arg0 *Staker) { +// ModifyProposal mocks base method. +func (m *MockDiff) ModifyProposal(proposalID ids.ID, proposal dac.ProposalState) { m.ctrl.T.Helper() - m.ctrl.Call(m, "PutCurrentValidator", arg0) + m.ctrl.Call(m, "ModifyProposal", proposalID, proposal) } -// PutCurrentValidator indicates an expected call of PutCurrentValidator. -func (mr *MockStateMockRecorder) PutCurrentValidator(arg0 interface{}) *gomock.Call { +// ModifyProposal indicates an expected call of ModifyProposal. +func (mr *MockDiffMockRecorder) ModifyProposal(proposalID, proposal any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentValidator", reflect.TypeOf((*MockState)(nil).PutCurrentValidator), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyProposal", reflect.TypeOf((*MockDiff)(nil).ModifyProposal), proposalID, proposal) } -// PutPendingDelegator mocks base method. -func (m *MockState) PutPendingDelegator(arg0 *Staker) { +// PutCurrentDelegator mocks base method. +func (m *MockDiff) PutCurrentDelegator(staker *Staker) { m.ctrl.T.Helper() - m.ctrl.Call(m, "PutPendingDelegator", arg0) + m.ctrl.Call(m, "PutCurrentDelegator", staker) } -// PutPendingDelegator indicates an expected call of PutPendingDelegator. -func (mr *MockStateMockRecorder) PutPendingDelegator(arg0 interface{}) *gomock.Call { +// PutCurrentDelegator indicates an expected call of PutCurrentDelegator. +func (mr *MockDiffMockRecorder) PutCurrentDelegator(staker any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingDelegator", reflect.TypeOf((*MockState)(nil).PutPendingDelegator), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentDelegator", reflect.TypeOf((*MockDiff)(nil).PutCurrentDelegator), staker) } -// PutPendingValidator mocks base method. -func (m *MockState) PutPendingValidator(arg0 *Staker) { +// PutCurrentValidator mocks base method. +func (m *MockDiff) PutCurrentValidator(staker *Staker) { m.ctrl.T.Helper() - m.ctrl.Call(m, "PutPendingValidator", arg0) + m.ctrl.Call(m, "PutCurrentValidator", staker) } -// PutPendingValidator indicates an expected call of PutPendingValidator. -func (mr *MockStateMockRecorder) PutPendingValidator(arg0 interface{}) *gomock.Call { +// PutCurrentValidator indicates an expected call of PutCurrentValidator. +func (mr *MockDiffMockRecorder) PutCurrentValidator(staker any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingValidator", reflect.TypeOf((*MockState)(nil).PutPendingValidator), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentValidator", reflect.TypeOf((*MockDiff)(nil).PutCurrentValidator), staker) } -// SetAddressStates mocks base method. -func (m *MockState) SetAddressStates(arg0 ids.ShortID, arg1 addrstate.AddressState) { +// PutDeferredValidator mocks base method. +func (m *MockDiff) PutDeferredValidator(staker *Staker) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetAddressStates", arg0, arg1) + m.ctrl.Call(m, "PutDeferredValidator", staker) } -// SetAddressStates indicates an expected call of SetAddressStates. -func (mr *MockStateMockRecorder) SetAddressStates(arg0, arg1 interface{}) *gomock.Call { +// PutDeferredValidator indicates an expected call of PutDeferredValidator. +func (mr *MockDiffMockRecorder) PutDeferredValidator(staker any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAddressStates", reflect.TypeOf((*MockState)(nil).SetAddressStates), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutDeferredValidator", reflect.TypeOf((*MockDiff)(nil).PutDeferredValidator), staker) } -// SetClaimable mocks base method. -func (m *MockState) SetClaimable(arg0 ids.ID, arg1 *Claimable) { +// PutPendingDelegator mocks base method. +func (m *MockDiff) PutPendingDelegator(staker *Staker) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetClaimable", arg0, arg1) + m.ctrl.Call(m, "PutPendingDelegator", staker) } -// SetClaimable indicates an expected call of SetClaimable. -func (mr *MockStateMockRecorder) SetClaimable(arg0, arg1 interface{}) *gomock.Call { +// PutPendingDelegator indicates an expected call of PutPendingDelegator. +func (mr *MockDiffMockRecorder) PutPendingDelegator(staker any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetClaimable", reflect.TypeOf((*MockState)(nil).SetClaimable), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingDelegator", reflect.TypeOf((*MockDiff)(nil).PutPendingDelegator), staker) } -// AddProposal mocks base method. -func (m *MockState) AddProposal(arg0 ids.ID, arg1 dac.ProposalState) { +// PutPendingValidator mocks base method. +func (m *MockDiff) PutPendingValidator(staker *Staker) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddProposal", arg0, arg1) + m.ctrl.Call(m, "PutPendingValidator", staker) } -// AddProposal indicates an expected call of AddProposal. -func (mr *MockStateMockRecorder) AddProposal(arg0, arg1 interface{}) *gomock.Call { +// PutPendingValidator indicates an expected call of PutPendingValidator. +func (mr *MockDiffMockRecorder) PutPendingValidator(staker any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProposal", reflect.TypeOf((*MockState)(nil).AddProposal), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingValidator", reflect.TypeOf((*MockDiff)(nil).PutPendingValidator), staker) } -// ModifyProposal mocks base method. -func (m *MockState) ModifyProposal(arg0 ids.ID, arg1 dac.ProposalState) { +// RemoveDeposit mocks base method. +func (m *MockDiff) RemoveDeposit(depositTxID ids.ID, deposit *deposit.Deposit) { m.ctrl.T.Helper() - m.ctrl.Call(m, "ModifyProposal", arg0, arg1) + m.ctrl.Call(m, "RemoveDeposit", depositTxID, deposit) } -// ModifyProposal indicates an expected call of ModifyProposal. -func (mr *MockStateMockRecorder) ModifyProposal(arg0, arg1 interface{}) *gomock.Call { +// RemoveDeposit indicates an expected call of RemoveDeposit. +func (mr *MockDiffMockRecorder) RemoveDeposit(depositTxID, deposit any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyProposal", reflect.TypeOf((*MockState)(nil).ModifyProposal), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveDeposit", reflect.TypeOf((*MockDiff)(nil).RemoveDeposit), depositTxID, deposit) } // RemoveProposal mocks base method. -func (m *MockState) RemoveProposal(arg0 ids.ID, arg1 dac.ProposalState) { +func (m *MockDiff) RemoveProposal(proposalID ids.ID, proposal dac.ProposalState) { m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveProposal", arg0, arg1) + m.ctrl.Call(m, "RemoveProposal", proposalID, proposal) } // RemoveProposal indicates an expected call of RemoveProposal. -func (mr *MockStateMockRecorder) RemoveProposal(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) RemoveProposal(proposalID, proposal any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveProposal", reflect.TypeOf((*MockState)(nil).RemoveProposal), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveProposal", reflect.TypeOf((*MockDiff)(nil).RemoveProposal), proposalID, proposal) } -// AddProposalIDToFinish mocks base method. -func (m *MockState) AddProposalIDToFinish(arg0 ids.ID) { +// RemoveProposalIDToFinish mocks base method. +func (m *MockDiff) RemoveProposalIDToFinish(arg0 ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddProposalIDToFinish", arg0) + m.ctrl.Call(m, "RemoveProposalIDToFinish", arg0) } -// AddProposalIDToFinish indicates an expected call of AddProposalIDToFinish. -func (mr *MockStateMockRecorder) AddProposalIDToFinish(arg0 interface{}) *gomock.Call { +// RemoveProposalIDToFinish indicates an expected call of RemoveProposalIDToFinish. +func (mr *MockDiffMockRecorder) RemoveProposalIDToFinish(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProposalIDToFinish", reflect.TypeOf((*MockState)(nil).AddProposalIDToFinish), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveProposalIDToFinish", reflect.TypeOf((*MockDiff)(nil).RemoveProposalIDToFinish), arg0) } -// RemoveProposalIDToFinish mocks base method. -func (m *MockState) RemoveProposalIDToFinish(arg0 ids.ID) { +// SetAddressStates mocks base method. +func (m *MockDiff) SetAddressStates(arg0 ids.ShortID, arg1 addrstate.AddressState) { m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveProposalIDToFinish", arg0) + m.ctrl.Call(m, "SetAddressStates", arg0, arg1) } -// RemoveProposalIDToFinish indicates an expected call of RemoveProposalIDToFinish. -func (mr *MockStateMockRecorder) RemoveProposalIDToFinish(arg0 interface{}) *gomock.Call { +// SetAddressStates indicates an expected call of SetAddressStates. +func (mr *MockDiffMockRecorder) SetAddressStates(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveProposalIDToFinish", reflect.TypeOf((*MockState)(nil).RemoveProposalIDToFinish), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAddressStates", reflect.TypeOf((*MockDiff)(nil).SetAddressStates), arg0, arg1) } // SetBaseFee mocks base method. -func (m *MockState) SetBaseFee(arg0 uint64) { +func (m *MockDiff) SetBaseFee(arg0 uint64) { m.ctrl.T.Helper() m.ctrl.Call(m, "SetBaseFee", arg0) } // SetBaseFee indicates an expected call of SetBaseFee. -func (mr *MockStateMockRecorder) SetBaseFee(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) SetBaseFee(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBaseFee", reflect.TypeOf((*MockState)(nil).SetBaseFee), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBaseFee", reflect.TypeOf((*MockDiff)(nil).SetBaseFee), arg0) } -// SetFeeDistribution mocks base method. -func (m *MockState) SetFeeDistribution(arg0 [dac.FeeDistributionFractionsCount]uint64) { +// SetClaimable mocks base method. +func (m *MockDiff) SetClaimable(ownerID ids.ID, claimable *Claimable) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetFeeDistribution", arg0) + m.ctrl.Call(m, "SetClaimable", ownerID, claimable) } -// SetFeeDistribution indicates an expected call of SetFeeDistribution. -func (mr *MockStateMockRecorder) SetFeeDistribution(arg0 interface{}) *gomock.Call { +// SetClaimable indicates an expected call of SetClaimable. +func (mr *MockDiffMockRecorder) SetClaimable(ownerID, claimable any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFeeDistribution", reflect.TypeOf((*MockState)(nil).SetFeeDistribution), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetClaimable", reflect.TypeOf((*MockDiff)(nil).SetClaimable), ownerID, claimable) } // SetCurrentSupply mocks base method. -func (m *MockState) SetCurrentSupply(arg0 ids.ID, arg1 uint64) { +func (m *MockDiff) SetCurrentSupply(subnetID ids.ID, cs uint64) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetCurrentSupply", arg0, arg1) + m.ctrl.Call(m, "SetCurrentSupply", subnetID, cs) } // SetCurrentSupply indicates an expected call of SetCurrentSupply. -func (mr *MockStateMockRecorder) SetCurrentSupply(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) SetCurrentSupply(subnetID, cs any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSupply", reflect.TypeOf((*MockState)(nil).SetCurrentSupply), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSupply", reflect.TypeOf((*MockDiff)(nil).SetCurrentSupply), subnetID, cs) } // SetDelegateeReward mocks base method. -func (m *MockState) SetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID, arg2 uint64) error { +func (m *MockDiff) SetDelegateeReward(subnetID ids.ID, nodeID ids.NodeID, amount uint64) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetDelegateeReward", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "SetDelegateeReward", subnetID, nodeID, amount) ret0, _ := ret[0].(error) return ret0 } // SetDelegateeReward indicates an expected call of SetDelegateeReward. -func (mr *MockStateMockRecorder) SetDelegateeReward(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDelegateeReward", reflect.TypeOf((*MockState)(nil).SetDelegateeReward), arg0, arg1, arg2) -} - -// SetHeight mocks base method. -func (m *MockState) SetHeight(arg0 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetHeight", arg0) -} - -// SetHeight indicates an expected call of SetHeight. -func (mr *MockStateMockRecorder) SetHeight(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) SetDelegateeReward(subnetID, nodeID, amount any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeight", reflect.TypeOf((*MockState)(nil).SetHeight), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDelegateeReward", reflect.TypeOf((*MockDiff)(nil).SetDelegateeReward), subnetID, nodeID, amount) } -// SetLastAccepted mocks base method. -func (m *MockState) SetLastAccepted(arg0 ids.ID) { +// SetDepositOffer mocks base method. +func (m *MockDiff) SetDepositOffer(offer *deposit.Offer) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetLastAccepted", arg0) + m.ctrl.Call(m, "SetDepositOffer", offer) } -// SetLastAccepted indicates an expected call of SetLastAccepted. -func (mr *MockStateMockRecorder) SetLastAccepted(arg0 interface{}) *gomock.Call { +// SetDepositOffer indicates an expected call of SetDepositOffer. +func (mr *MockDiffMockRecorder) SetDepositOffer(offer any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockState)(nil).SetLastAccepted), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDepositOffer", reflect.TypeOf((*MockDiff)(nil).SetDepositOffer), offer) } -// SetSubnetOwner mocks base method. -func (m *MockState) SetSubnetOwner(arg0 ids.ID, arg1 fx.Owner) { +// SetFeeDistribution mocks base method. +func (m *MockDiff) SetFeeDistribution(arg0 [3]uint64) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSubnetOwner", arg0, arg1) + m.ctrl.Call(m, "SetFeeDistribution", arg0) } -// SetSubnetOwner indicates an expected call of SetSubnetOwner. -func (mr *MockStateMockRecorder) SetSubnetOwner(arg0, arg1 interface{}) *gomock.Call { +// SetFeeDistribution indicates an expected call of SetFeeDistribution. +func (mr *MockDiffMockRecorder) SetFeeDistribution(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetOwner", reflect.TypeOf((*MockState)(nil).SetSubnetOwner), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFeeDistribution", reflect.TypeOf((*MockDiff)(nil).SetFeeDistribution), arg0) } // SetMultisigAlias mocks base method. -func (m *MockState) SetMultisigAlias(arg0 ids.ShortID, arg1 *multisig.AliasWithNonce) { +func (m *MockDiff) SetMultisigAlias(arg0 ids.ShortID, arg1 *multisig.AliasWithNonce) { m.ctrl.T.Helper() m.ctrl.Call(m, "SetMultisigAlias", arg0, arg1) } // SetMultisigAlias indicates an expected call of SetMultisigAlias. -func (mr *MockStateMockRecorder) SetMultisigAlias(arg0, arg1 any) *gomock.Call { +func (mr *MockDiffMockRecorder) SetMultisigAlias(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMultisigAlias", reflect.TypeOf((*MockState)(nil).SetMultisigAlias), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMultisigAlias", reflect.TypeOf((*MockDiff)(nil).SetMultisigAlias), arg0, arg1) } // SetNotDistributedValidatorReward mocks base method. -func (m *MockState) SetNotDistributedValidatorReward(arg0 uint64) { +func (m *MockDiff) SetNotDistributedValidatorReward(reward uint64) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetNotDistributedValidatorReward", arg0) + m.ctrl.Call(m, "SetNotDistributedValidatorReward", reward) } // SetNotDistributedValidatorReward indicates an expected call of SetNotDistributedValidatorReward. -func (mr *MockStateMockRecorder) SetNotDistributedValidatorReward(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) SetNotDistributedValidatorReward(reward any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNotDistributedValidatorReward", reflect.TypeOf((*MockState)(nil).SetNotDistributedValidatorReward), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNotDistributedValidatorReward", reflect.TypeOf((*MockDiff)(nil).SetNotDistributedValidatorReward), reward) } // SetShortIDLink mocks base method. -func (m *MockState) SetShortIDLink(arg0 ids.ShortID, arg1 ShortLinkKey, arg2 *ids.ShortID) { +func (m *MockDiff) SetShortIDLink(id ids.ShortID, key ShortLinkKey, link *ids.ShortID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetShortIDLink", arg0, arg1, arg2) + m.ctrl.Call(m, "SetShortIDLink", id, key, link) } // SetShortIDLink indicates an expected call of SetShortIDLink. -func (mr *MockStateMockRecorder) SetShortIDLink(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetShortIDLink", reflect.TypeOf((*MockState)(nil).SetShortIDLink), arg0, arg1, arg2) -} - -// SetTimestamp mocks base method. -func (m *MockState) SetTimestamp(arg0 time.Time) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetTimestamp", arg0) -} - -// SetTimestamp indicates an expected call of SetTimestamp. -func (mr *MockStateMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockState)(nil).SetTimestamp), arg0) -} - -// SetUptime mocks base method. -func (m *MockState) SetUptime(arg0 ids.NodeID, arg1 ids.ID, arg2 time.Duration, arg3 time.Time) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetUptime", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetUptime indicates an expected call of SetUptime. -func (mr *MockStateMockRecorder) SetUptime(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) SetShortIDLink(id, key, link any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetUptime", reflect.TypeOf((*MockState)(nil).SetUptime), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetShortIDLink", reflect.TypeOf((*MockDiff)(nil).SetShortIDLink), id, key, link) } -// ShouldPrune mocks base method. -func (m *MockState) ShouldPrune() (bool, error) { +// SetSubnetOwner mocks base method. +func (m *MockDiff) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ShouldPrune") - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 + m.ctrl.Call(m, "SetSubnetOwner", subnetID, owner) } -// ShouldPrune indicates an expected call of ShouldPrune. -func (mr *MockStateMockRecorder) ShouldPrune() *gomock.Call { +// SetSubnetOwner indicates an expected call of SetSubnetOwner. +func (mr *MockDiffMockRecorder) SetSubnetOwner(subnetID, owner any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldPrune", reflect.TypeOf((*MockState)(nil).ShouldPrune)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetOwner", reflect.TypeOf((*MockDiff)(nil).SetSubnetOwner), subnetID, owner) } -// UTXOIDs mocks base method. -func (m *MockState) UTXOIDs(arg0 []byte, arg1 ids.ID, arg2 int) ([]ids.ID, error) { +// SetTimestamp mocks base method. +func (m *MockDiff) SetTimestamp(tm time.Time) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UTXOIDs", arg0, arg1, arg2) - ret0, _ := ret[0].([]ids.ID) - ret1, _ := ret[1].(error) - return ret0, ret1 + m.ctrl.Call(m, "SetTimestamp", tm) } -// UTXOIDs indicates an expected call of UTXOIDs. -func (mr *MockStateMockRecorder) UTXOIDs(arg0, arg1, arg2 interface{}) *gomock.Call { +// SetTimestamp indicates an expected call of SetTimestamp. +func (mr *MockDiffMockRecorder) SetTimestamp(tm any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UTXOIDs", reflect.TypeOf((*MockState)(nil).UTXOIDs), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockDiff)(nil).SetTimestamp), tm) } -// AddDeposit mocks base method. -func (m *MockState) AddDeposit(arg0 ids.ID, arg1 *deposit.Deposit) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddDeposit", arg0, arg1) +// MockVersions is a mock of Versions interface. +type MockVersions struct { + ctrl *gomock.Controller + recorder *MockVersionsMockRecorder } -// AddDeposit indicates an expected call of AddDeposit. -func (mr *MockStateMockRecorder) AddDeposit(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDeposit", reflect.TypeOf((*MockState)(nil).AddDeposit), arg0, arg1) +// MockVersionsMockRecorder is the mock recorder for MockVersions. +type MockVersionsMockRecorder struct { + mock *MockVersions } -// ModifyDeposit mocks base method. -func (m *MockState) ModifyDeposit(arg0 ids.ID, arg1 *deposit.Deposit) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "ModifyDeposit", arg0, arg1) +// NewMockVersions creates a new mock instance. +func NewMockVersions(ctrl *gomock.Controller) *MockVersions { + mock := &MockVersions{ctrl: ctrl} + mock.recorder = &MockVersionsMockRecorder{mock} + return mock } -// ModifyDeposit indicates an expected call of ModifyDeposit. -func (mr *MockStateMockRecorder) ModifyDeposit(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyDeposit", reflect.TypeOf((*MockState)(nil).ModifyDeposit), arg0, arg1) +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockVersions) EXPECT() *MockVersionsMockRecorder { + return m.recorder } -// RemoveDeposit mocks base method. -func (m *MockState) RemoveDeposit(arg0 ids.ID, arg1 *deposit.Deposit) { +// GetState mocks base method. +func (m *MockVersions) GetState(blkID ids.ID) (Chain, bool) { m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveDeposit", arg0, arg1) + ret := m.ctrl.Call(m, "GetState", blkID) + ret0, _ := ret[0].(Chain) + ret1, _ := ret[1].(bool) + return ret0, ret1 } -// RemoveDeposit indicates an expected call of RemoveDeposit. -func (mr *MockStateMockRecorder) RemoveDeposit(arg0, arg1 interface{}) *gomock.Call { +// GetState indicates an expected call of GetState. +func (mr *MockVersionsMockRecorder) GetState(blkID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveDeposit", reflect.TypeOf((*MockState)(nil).RemoveDeposit), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockVersions)(nil).GetState), blkID) } diff --git a/vms/platformvm/state/mock_versions.go b/vms/platformvm/state/mock_versions.go deleted file mode 100644 index 3f8a20550b22..000000000000 --- a/vms/platformvm/state/mock_versions.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: Versions) - -// Package state is a generated GoMock package. -package state - -import ( - reflect "reflect" - - ids "github.com/ava-labs/avalanchego/ids" - gomock "go.uber.org/mock/gomock" -) - -// MockVersions is a mock of Versions interface. -type MockVersions struct { - ctrl *gomock.Controller - recorder *MockVersionsMockRecorder -} - -// MockVersionsMockRecorder is the mock recorder for MockVersions. -type MockVersionsMockRecorder struct { - mock *MockVersions -} - -// NewMockVersions creates a new mock instance. -func NewMockVersions(ctrl *gomock.Controller) *MockVersions { - mock := &MockVersions{ctrl: ctrl} - mock.recorder = &MockVersionsMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockVersions) EXPECT() *MockVersionsMockRecorder { - return m.recorder -} - -// GetState mocks base method. -func (m *MockVersions) GetState(arg0 ids.ID) (Chain, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetState", arg0) - ret0, _ := ret[0].(Chain) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetState indicates an expected call of GetState. -func (mr *MockVersionsMockRecorder) GetState(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockVersions)(nil).GetState), arg0) -} diff --git a/vms/platformvm/state/slice_iterator_test.go b/vms/platformvm/state/slice_iterator_test.go index 96a686cddf92..408ffe837a27 100644 --- a/vms/platformvm/state/slice_iterator_test.go +++ b/vms/platformvm/state/slice_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/platformvm/state/staker.go b/vms/platformvm/state/staker.go index 37bc512e36cb..a9ba52595062 100644 --- a/vms/platformvm/state/staker.go +++ b/vms/platformvm/state/staker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -83,7 +83,12 @@ func (s *Staker) Less(than *Staker) bool { return bytes.Compare(s.TxID[:], than.TxID[:]) == -1 } -func NewCurrentStaker(txID ids.ID, staker txs.Staker, potentialReward uint64) (*Staker, error) { +func NewCurrentStaker( + txID ids.ID, + staker txs.Staker, + startTime time.Time, + potentialReward uint64, +) (*Staker, error) { publicKey, _, err := staker.PublicKey() if err != nil { return nil, err @@ -95,7 +100,7 @@ func NewCurrentStaker(txID ids.ID, staker txs.Staker, potentialReward uint64) (* PublicKey: publicKey, SubnetID: staker.SubnetID(), Weight: staker.Weight(), - StartTime: staker.StartTime(), + StartTime: startTime, EndTime: endTime, PotentialReward: potentialReward, NextTime: endTime, @@ -103,7 +108,7 @@ func NewCurrentStaker(txID ids.ID, staker txs.Staker, potentialReward uint64) (* }, nil } -func NewPendingStaker(txID ids.ID, staker txs.Staker) (*Staker, error) { +func NewPendingStaker(txID ids.ID, staker txs.ScheduledStaker) (*Staker, error) { publicKey, _, err := staker.PublicKey() if err != nil { return nil, err diff --git a/vms/platformvm/state/staker_diff_iterator.go b/vms/platformvm/state/staker_diff_iterator.go index d9c194cd2702..d47ab49ac572 100644 --- a/vms/platformvm/state/staker_diff_iterator.go +++ b/vms/platformvm/state/staker_diff_iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/platformvm/state/staker_diff_iterator_test.go b/vms/platformvm/state/staker_diff_iterator_test.go index c008b06fb716..468b8800a23d 100644 --- a/vms/platformvm/state/staker_diff_iterator_test.go +++ b/vms/platformvm/state/staker_diff_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/platformvm/state/staker_status.go b/vms/platformvm/state/staker_status.go index b74064c4dc0b..0adc46244f92 100644 --- a/vms/platformvm/state/staker_status.go +++ b/vms/platformvm/state/staker_status.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/platformvm/state/staker_test.go b/vms/platformvm/state/staker_test.go index 747f442e5eda..1b72385dbd13 100644 --- a/vms/platformvm/state/staker_test.go +++ b/vms/platformvm/state/staker_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -144,20 +144,19 @@ func TestNewCurrentStaker(t *testing.T) { subnetID := ids.GenerateTestID() weight := uint64(12345) startTime := time.Now() - endTime := time.Now() + endTime := startTime.Add(time.Hour) potentialReward := uint64(54321) currentPriority := txs.SubnetPermissionedValidatorCurrentPriority stakerTx := txs.NewMockStaker(ctrl) + stakerTx.EXPECT().EndTime().Return(endTime) stakerTx.EXPECT().NodeID().Return(nodeID) stakerTx.EXPECT().PublicKey().Return(publicKey, true, nil) stakerTx.EXPECT().SubnetID().Return(subnetID) stakerTx.EXPECT().Weight().Return(weight) - stakerTx.EXPECT().StartTime().Return(startTime) - stakerTx.EXPECT().EndTime().Return(endTime) stakerTx.EXPECT().CurrentPriority().Return(currentPriority) - staker, err := NewCurrentStaker(txID, stakerTx, potentialReward) + staker, err := NewCurrentStaker(txID, stakerTx, startTime, potentialReward) require.NotNil(staker) require.NoError(err) require.Equal(txID, staker.TxID) @@ -173,7 +172,7 @@ func TestNewCurrentStaker(t *testing.T) { stakerTx.EXPECT().PublicKey().Return(nil, false, errCustom) - _, err = NewCurrentStaker(txID, stakerTx, potentialReward) + _, err = NewCurrentStaker(txID, stakerTx, startTime, potentialReward) require.ErrorIs(err, errCustom) } @@ -192,7 +191,7 @@ func TestNewPendingStaker(t *testing.T) { endTime := time.Now() pendingPriority := txs.SubnetPermissionedValidatorPendingPriority - stakerTx := txs.NewMockStaker(ctrl) + stakerTx := txs.NewMockScheduledStaker(ctrl) stakerTx.EXPECT().NodeID().Return(nodeID) stakerTx.EXPECT().PublicKey().Return(publicKey, true, nil) stakerTx.EXPECT().SubnetID().Return(subnetID) diff --git a/vms/platformvm/state/stakers.go b/vms/platformvm/state/stakers.go index 5276ff4f8204..f787749f72df 100644 --- a/vms/platformvm/state/stakers.go +++ b/vms/platformvm/state/stakers.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/platformvm/state/stakers_test.go b/vms/platformvm/state/stakers_test.go index 9894d9479653..5c6d9a8b28f8 100644 --- a/vms/platformvm/state/stakers_test.go +++ b/vms/platformvm/state/stakers_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 6944cf741d8d..1938b0d2e8f1 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -71,35 +71,35 @@ var ( errValidatorSetAlreadyPopulated = errors.New("validator set already populated") errIsNotSubnet = errors.New("is not a subnet") - blockIDPrefix = []byte("blockID") - blockPrefix = []byte("block") - validatorsPrefix = []byte("validators") - currentPrefix = []byte("current") - pendingPrefix = []byte("pending") - validatorPrefix = []byte("validator") - delegatorPrefix = []byte("delegator") - subnetValidatorPrefix = []byte("subnetValidator") - subnetDelegatorPrefix = []byte("subnetDelegator") - nestedValidatorWeightDiffsPrefix = []byte("validatorDiffs") - nestedValidatorPublicKeyDiffsPrefix = []byte("publicKeyDiffs") - flatValidatorWeightDiffsPrefix = []byte("flatValidatorDiffs") - flatValidatorPublicKeyDiffsPrefix = []byte("flatPublicKeyDiffs") - txPrefix = []byte("tx") - rewardUTXOsPrefix = []byte("rewardUTXOs") - utxoPrefix = []byte("utxo") - subnetPrefix = []byte("subnet") - subnetOwnerPrefix = []byte("subnetOwner") - transformedSubnetPrefix = []byte("transformedSubnet") - supplyPrefix = []byte("supply") - chainPrefix = []byte("chain") - singletonPrefix = []byte("singleton") - - timestampKey = []byte("timestamp") - currentSupplyKey = []byte("current supply") - lastAcceptedKey = []byte("last accepted") - heightsIndexedKey = []byte("heights indexed") - initializedKey = []byte("initialized") - prunedKey = []byte("pruned") + BlockIDPrefix = []byte("blockID") + BlockPrefix = []byte("block") + ValidatorsPrefix = []byte("validators") + CurrentPrefix = []byte("current") + PendingPrefix = []byte("pending") + ValidatorPrefix = []byte("validator") + DelegatorPrefix = []byte("delegator") + SubnetValidatorPrefix = []byte("subnetValidator") + SubnetDelegatorPrefix = []byte("subnetDelegator") + NestedValidatorWeightDiffsPrefix = []byte("validatorDiffs") + NestedValidatorPublicKeyDiffsPrefix = []byte("publicKeyDiffs") + FlatValidatorWeightDiffsPrefix = []byte("flatValidatorDiffs") + FlatValidatorPublicKeyDiffsPrefix = []byte("flatPublicKeyDiffs") + TxPrefix = []byte("tx") + RewardUTXOsPrefix = []byte("rewardUTXOs") + UTXOPrefix = []byte("utxo") + SubnetPrefix = []byte("subnet") + SubnetOwnerPrefix = []byte("subnetOwner") + TransformedSubnetPrefix = []byte("transformedSubnet") + SupplyPrefix = []byte("supply") + ChainPrefix = []byte("chain") + SingletonPrefix = []byte("singleton") + + TimestampKey = []byte("timestamp") + CurrentSupplyKey = []byte("current supply") + LastAcceptedKey = []byte("last accepted") + HeightsIndexedKey = []byte("heights indexed") + InitializedKey = []byte("initialized") + PrunedKey = []byte("pruned") ) // Chain collects all methods to manage the state of the chain for block @@ -117,10 +117,8 @@ type Chain interface { GetCurrentSupply(subnetID ids.ID) (uint64, error) SetCurrentSupply(subnetID ids.ID, cs uint64) - GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) - GetSubnets() ([]*txs.Tx, error) AddSubnet(createSubnetTx *txs.Tx) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) @@ -129,7 +127,6 @@ type Chain interface { GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) AddSubnetTransformation(transformSubnetTx *txs.Tx) - GetChains(subnetID ids.ID) ([]*txs.Tx, error) AddChain(createChainTx *txs.Tx) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) @@ -151,9 +148,9 @@ type State interface { GetBlockIDAtHeight(height uint64) (ids.ID, error) - // ApplyCurrentValidators adds all the current validators and delegators of - // [subnetID] into [vdrs]. - ApplyCurrentValidators(subnetID ids.ID, vdrs validators.Manager) error + GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) + GetSubnets() ([]*txs.Tx, error) + GetChains(subnetID ids.ID) ([]*txs.Tx, error) // ApplyValidatorWeightDiffs iterates from [startHeight] towards the genesis // block until it has applied all of the diffs up to and including @@ -301,11 +298,11 @@ type stateBlk struct { type state struct { validatorState - cfg *config.Config - ctx *snow.Context - metrics metrics.Metrics - rewards reward.Calculator - bootstrapped *utils.Atomic[bool] + validators validators.Manager + ctx *snow.Context + cfg *config.Config + metrics metrics.Metrics + rewards reward.Calculator baseDB *versiondb.Database @@ -474,7 +471,6 @@ func New( ctx *snow.Context, metrics metrics.Metrics, rewards reward.Calculator, - bootstrapped *utils.Atomic[bool], ) (State, error) { s, err := newState( db, @@ -484,7 +480,6 @@ func New( ctx, metricsReg, rewards, - bootstrapped, ) if err != nil { return nil, err @@ -509,7 +504,7 @@ func New( // If the pruned key is on disk, we must delete it to ensure our disk // can't get into a partially pruned state if the node restarts mid-way // through pruning. - if err := s.singletonDB.Delete(prunedKey); err != nil { + if err := s.singletonDB.Delete(PrunedKey); err != nil { return nil, fmt.Errorf("failed to remove prunedKey from singletonDB: %w", err) } @@ -529,7 +524,6 @@ func newState( ctx *snow.Context, metricsReg prometheus.Registerer, rewards reward.Calculator, - bootstrapped *utils.Atomic[bool], ) (*state, error) { blockIDCache, err := metercacher.New[uint64, ids.ID]( "block_id_cache", @@ -551,24 +545,24 @@ func newState( baseDB := versiondb.New(db) - validatorsDB := prefixdb.New(validatorsPrefix, baseDB) + validatorsDB := prefixdb.New(ValidatorsPrefix, baseDB) - currentValidatorsDB := prefixdb.New(currentPrefix, validatorsDB) - currentValidatorBaseDB := prefixdb.New(validatorPrefix, currentValidatorsDB) - currentDelegatorBaseDB := prefixdb.New(delegatorPrefix, currentValidatorsDB) - currentSubnetValidatorBaseDB := prefixdb.New(subnetValidatorPrefix, currentValidatorsDB) - currentSubnetDelegatorBaseDB := prefixdb.New(subnetDelegatorPrefix, currentValidatorsDB) + currentValidatorsDB := prefixdb.New(CurrentPrefix, validatorsDB) + currentValidatorBaseDB := prefixdb.New(ValidatorPrefix, currentValidatorsDB) + currentDelegatorBaseDB := prefixdb.New(DelegatorPrefix, currentValidatorsDB) + currentSubnetValidatorBaseDB := prefixdb.New(SubnetValidatorPrefix, currentValidatorsDB) + currentSubnetDelegatorBaseDB := prefixdb.New(SubnetDelegatorPrefix, currentValidatorsDB) - pendingValidatorsDB := prefixdb.New(pendingPrefix, validatorsDB) - pendingValidatorBaseDB := prefixdb.New(validatorPrefix, pendingValidatorsDB) - pendingDelegatorBaseDB := prefixdb.New(delegatorPrefix, pendingValidatorsDB) - pendingSubnetValidatorBaseDB := prefixdb.New(subnetValidatorPrefix, pendingValidatorsDB) - pendingSubnetDelegatorBaseDB := prefixdb.New(subnetDelegatorPrefix, pendingValidatorsDB) + pendingValidatorsDB := prefixdb.New(PendingPrefix, validatorsDB) + pendingValidatorBaseDB := prefixdb.New(ValidatorPrefix, pendingValidatorsDB) + pendingDelegatorBaseDB := prefixdb.New(DelegatorPrefix, pendingValidatorsDB) + pendingSubnetValidatorBaseDB := prefixdb.New(SubnetValidatorPrefix, pendingValidatorsDB) + pendingSubnetDelegatorBaseDB := prefixdb.New(SubnetDelegatorPrefix, pendingValidatorsDB) - nestedValidatorWeightDiffsDB := prefixdb.New(nestedValidatorWeightDiffsPrefix, validatorsDB) - nestedValidatorPublicKeyDiffsDB := prefixdb.New(nestedValidatorPublicKeyDiffsPrefix, validatorsDB) - flatValidatorWeightDiffsDB := prefixdb.New(flatValidatorWeightDiffsPrefix, validatorsDB) - flatValidatorPublicKeyDiffsDB := prefixdb.New(flatValidatorPublicKeyDiffsPrefix, validatorsDB) + nestedValidatorWeightDiffsDB := prefixdb.New(NestedValidatorWeightDiffsPrefix, validatorsDB) + nestedValidatorPublicKeyDiffsDB := prefixdb.New(NestedValidatorPublicKeyDiffsPrefix, validatorsDB) + flatValidatorWeightDiffsDB := prefixdb.New(FlatValidatorWeightDiffsPrefix, validatorsDB) + flatValidatorPublicKeyDiffsDB := prefixdb.New(FlatValidatorPublicKeyDiffsPrefix, validatorsDB) txCache, err := metercacher.New( "tx_cache", @@ -579,7 +573,7 @@ func newState( return nil, err } - rewardUTXODB := prefixdb.New(rewardUTXOsPrefix, baseDB) + rewardUTXODB := prefixdb.New(RewardUTXOsPrefix, baseDB) rewardUTXOsCache, err := metercacher.New[ids.ID, []*avax.UTXO]( "reward_utxos_cache", metricsReg, @@ -589,15 +583,15 @@ func newState( return nil, err } - utxoDB := prefixdb.New(utxoPrefix, baseDB) + utxoDB := prefixdb.New(UTXOPrefix, baseDB) utxoState, err := avax.NewMeteredUTXOState(utxoDB, txs.GenesisCodec, metricsReg, execCfg.ChecksumsEnabled) if err != nil { return nil, err } - subnetBaseDB := prefixdb.New(subnetPrefix, baseDB) + subnetBaseDB := prefixdb.New(SubnetPrefix, baseDB) - subnetOwnerDB := prefixdb.New(subnetOwnerPrefix, baseDB) + subnetOwnerDB := prefixdb.New(SubnetOwnerPrefix, baseDB) subnetOwnerCache, err := metercacher.New[ids.ID, fxOwnerAndSize]( "subnet_owner_cache", metricsReg, @@ -653,20 +647,20 @@ func newState( return &state{ validatorState: newValidatorState(), - cfg: cfg, - ctx: ctx, - metrics: metrics, - rewards: rewards, - bootstrapped: bootstrapped, - baseDB: baseDB, + validators: cfg.Validators, + ctx: ctx, + cfg: cfg, + metrics: metrics, + rewards: rewards, + baseDB: baseDB, addedBlockIDs: make(map[uint64]ids.ID), blockIDCache: blockIDCache, - blockIDDB: prefixdb.New(blockIDPrefix, baseDB), + blockIDDB: prefixdb.New(BlockIDPrefix, baseDB), addedBlocks: make(map[ids.ID]block.Block), blockCache: blockCache, - blockDB: prefixdb.New(blockPrefix, baseDB), + blockDB: prefixdb.New(BlockPrefix, baseDB), currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), @@ -698,7 +692,7 @@ func newState( flatValidatorPublicKeyDiffsDB: flatValidatorPublicKeyDiffsDB, addedTxs: make(map[ids.ID]*txAndStatus), - txDB: prefixdb.New(txPrefix, baseDB), + txDB: prefixdb.New(TxPrefix, baseDB), txCache: txCache, addedRewardUTXOs: make(map[ids.ID][]*avax.UTXO), @@ -718,18 +712,18 @@ func newState( transformedSubnets: make(map[ids.ID]*txs.Tx), transformedSubnetCache: transformedSubnetCache, - transformedSubnetDB: prefixdb.New(transformedSubnetPrefix, baseDB), + transformedSubnetDB: prefixdb.New(TransformedSubnetPrefix, baseDB), modifiedSupplies: make(map[ids.ID]uint64), supplyCache: supplyCache, - supplyDB: prefixdb.New(supplyPrefix, baseDB), + supplyDB: prefixdb.New(SupplyPrefix, baseDB), addedChains: make(map[ids.ID][]*txs.Tx), - chainDB: prefixdb.New(chainPrefix, baseDB), + chainDB: prefixdb.New(ChainPrefix, baseDB), chainCache: chainCache, chainDBCache: chainDBCache, - singletonDB: prefixdb.New(singletonPrefix, baseDB), + singletonDB: prefixdb.New(SingletonPrefix, baseDB), }, nil } @@ -790,16 +784,16 @@ func (s *state) GetPendingStakerIterator() (StakerIterator, error) { } func (s *state) shouldInit() (bool, error) { - has, err := s.singletonDB.Has(initializedKey) + has, err := s.singletonDB.Has(InitializedKey) return !has, err } func (s *state) doneInit() error { - return s.singletonDB.Put(initializedKey, nil) + return s.singletonDB.Put(InitializedKey, nil) } func (s *state) ShouldPrune() (bool, error) { - has, err := s.singletonDB.Has(prunedKey) + has, err := s.singletonDB.Has(PrunedKey) if err != nil { return true, err } @@ -826,7 +820,7 @@ func (s *state) ShouldPrune() (bool, error) { } func (s *state) donePrune() error { - return s.singletonDB.Put(prunedKey, nil) + return s.singletonDB.Put(PrunedKey, nil) } func (s *state) GetSubnets() ([]*txs.Tx, error) { @@ -1159,26 +1153,6 @@ func (s *state) SetCurrentSupply(subnetID ids.ID, cs uint64) { } } -func (s *state) ApplyCurrentValidators(subnetID ids.ID, vdrs validators.Manager) error { - for nodeID, validator := range s.currentStakers.validators[subnetID] { - staker := validator.validator - if err := vdrs.AddStaker(subnetID, nodeID, staker.PublicKey, staker.TxID, staker.Weight); err != nil { - return err - } - - delegatorIterator := NewTreeIterator(validator.delegators) - for delegatorIterator.Next() { - staker := delegatorIterator.Value() - if err := vdrs.AddWeight(subnetID, nodeID, staker.Weight); err != nil { - delegatorIterator.Release() - return err - } - } - delegatorIterator.Release() - } - return nil -} - func (s *state) ApplyValidatorWeightDiffs( ctx context.Context, validators map[ids.NodeID]*validators.GetValidatorOutput, @@ -1204,6 +1178,18 @@ func (s *state) ApplyValidatorWeightDiffs( if err != nil { return err } + + if parsedHeight > prevHeight { + s.ctx.Log.Error("unexpected parsed height", + zap.Stringer("subnetID", subnetID), + zap.Uint64("parsedHeight", parsedHeight), + zap.Stringer("nodeID", nodeID), + zap.Uint64("prevHeight", prevHeight), + zap.Uint64("startHeight", startHeight), + zap.Uint64("endHeight", endHeight), + ) + } + // If the parsedHeight is less than our target endHeight, then we have // fully processed the diffs from startHeight through endHeight. if parsedHeight < endHeight { @@ -1236,7 +1222,7 @@ func (s *state) ApplyValidatorWeightDiffs( Height: height, SubnetID: subnetID, } - prefixBytes, err := block.GenesisCodec.Marshal(block.Version, prefixStruct) + prefixBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, prefixStruct) if err != nil { return err } @@ -1368,13 +1354,20 @@ func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) er // Persist primary network validator set at genesis for _, vdrTx := range genesis.Validators { - tx, ok := vdrTx.Unsigned.(txs.ValidatorTx) + // We expect genesis validator txs to be either AddValidatorTx or + // AddPermissionlessValidatorTx. + // + // TODO: Enforce stricter type check + validatorTx, ok := vdrTx.Unsigned.(txs.ScheduledStaker) if !ok { - return fmt.Errorf("expected tx type txs.ValidatorTx but got %T", vdrTx.Unsigned) + return fmt.Errorf("expected a scheduled staker but got %T", vdrTx.Unsigned) } - stakeAmount := tx.Weight() - stakeDuration := tx.EndTime().Sub(tx.StartTime()) + stakeAmount := validatorTx.Weight() + // Note: We use [StartTime()] here because genesis transactions are + // guaranteed to be pre-Durango activation. + startTime := validatorTx.StartTime() + stakeDuration := validatorTx.EndTime().Sub(startTime) currentSupply, err := s.GetCurrentSupply(constants.PrimaryNetworkID) if err != nil { return err @@ -1390,7 +1383,7 @@ func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) er return err } - staker, err := NewCurrentStaker(vdrTx.ID(), tx, potentialReward) + staker, err := NewCurrentStaker(vdrTx.ID(), validatorTx, startTime, potentialReward) if err != nil { return err } @@ -1434,21 +1427,21 @@ func (s *state) load() error { } func (s *state) loadMetadata() error { - timestamp, err := database.GetTimestamp(s.singletonDB, timestampKey) + timestamp, err := database.GetTimestamp(s.singletonDB, TimestampKey) if err != nil { return err } s.persistedTimestamp = timestamp s.SetTimestamp(timestamp) - currentSupply, err := database.GetUInt64(s.singletonDB, currentSupplyKey) + currentSupply, err := database.GetUInt64(s.singletonDB, CurrentSupplyKey) if err != nil { return err } s.persistedCurrentSupply = currentSupply s.SetCurrentSupply(constants.PrimaryNetworkID, currentSupply) - lastAccepted, err := database.GetID(s.singletonDB, lastAcceptedKey) + lastAccepted, err := database.GetID(s.singletonDB, LastAcceptedKey) if err != nil { return err } @@ -1457,7 +1450,7 @@ func (s *state) loadMetadata() error { // Lookup the most recently indexed range on disk. If we haven't started // indexing the weights, then we keep the indexed heights as nil. - indexedHeightsBytes, err := s.singletonDB.Get(heightsIndexedKey) + indexedHeightsBytes, err := s.singletonDB.Get(HeightsIndexedKey) if err == database.ErrNotFound { return nil } @@ -1497,25 +1490,35 @@ func (s *state) loadCurrentValidators() error { } tx, _, err := s.GetTx(txID) if err != nil { - return err + return fmt.Errorf("failed loading validator transaction txID %s, %w", txID, err) + } + + stakerTx, ok := tx.Unsigned.(txs.Staker) + if !ok { + return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } metadataBytes := validatorIt.Value() metadata := &validatorMetadata{ txID: txID, - // Note: we don't provide [LastUpdated] here because we expect it to + } + if scheduledStakerTx, ok := tx.Unsigned.(txs.ScheduledStaker); ok { + // Populate [StakerStartTime] using the tx as a default in the event + // it was added pre-durango and is not stored in the database. + // + // Note: We do not populate [LastUpdated] since it is expected to // always be present on disk. + metadata.StakerStartTime = uint64(scheduledStakerTx.StartTime().Unix()) } if err := parseValidatorMetadata(metadataBytes, metadata); err != nil { return err } - stakerTx, ok := tx.Unsigned.(txs.Staker) - if !ok { - return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) - } - - staker, err := NewCurrentStaker(txID, stakerTx, metadata.PotentialReward) + staker, err := NewCurrentStaker( + txID, + stakerTx, + time.Unix(int64(metadata.StakerStartTime), 0), + metadata.PotentialReward) if err != nil { return err } @@ -1549,15 +1552,24 @@ func (s *state) loadCurrentValidators() error { metadataBytes := subnetValidatorIt.Value() metadata := &validatorMetadata{ txID: txID, - // use the start time as the fallback value - // in case it's not stored in the database - LastUpdated: uint64(stakerTx.StartTime().Unix()), + } + if scheduledStakerTx, ok := tx.Unsigned.(txs.ScheduledStaker); ok { + // Populate [StakerStartTime] and [LastUpdated] using the tx as a + // default in the event they are not stored in the database. + startTime := uint64(scheduledStakerTx.StartTime().Unix()) + metadata.StakerStartTime = startTime + metadata.LastUpdated = startTime } if err := parseValidatorMetadata(metadataBytes, metadata); err != nil { return err } - staker, err := NewCurrentStaker(txID, stakerTx, metadata.PotentialReward) + staker, err := NewCurrentStaker( + txID, + stakerTx, + time.Unix(int64(metadata.StakerStartTime), 0), + metadata.PotentialReward, + ) if err != nil { return err } @@ -1587,20 +1599,32 @@ func (s *state) loadCurrentValidators() error { return err } + stakerTx, ok := tx.Unsigned.(txs.Staker) + if !ok { + return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) + } + + metadataBytes := delegatorIt.Value() metadata := &delegatorMetadata{ txID: txID, } - err = parseDelegatorMetadata(delegatorIt.Value(), metadata) + if scheduledStakerTx, ok := tx.Unsigned.(txs.ScheduledStaker); ok { + // Populate [StakerStartTime] using the tx as a default in the + // event it was added pre-durango and is not stored in the + // database. + metadata.StakerStartTime = uint64(scheduledStakerTx.StartTime().Unix()) + } + err = parseDelegatorMetadata(metadataBytes, metadata) if err != nil { return err } - stakerTx, ok := tx.Unsigned.(txs.Staker) - if !ok { - return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) - } - - staker, err := NewCurrentStaker(txID, stakerTx, metadata.PotentialReward) + staker, err := NewCurrentStaker( + txID, + stakerTx, + time.Unix(int64(metadata.StakerStartTime), 0), + metadata.PotentialReward, + ) if err != nil { return err } @@ -1644,7 +1668,7 @@ func (s *state) loadPendingValidators() error { return err } - stakerTx, ok := tx.Unsigned.(txs.Staker) + stakerTx, ok := tx.Unsigned.(txs.ScheduledStaker) if !ok { return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } @@ -1679,7 +1703,7 @@ func (s *state) loadPendingValidators() error { return err } - stakerTx, ok := tx.Unsigned.(txs.Staker) + stakerTx, ok := tx.Unsigned.(txs.ScheduledStaker) if !ok { return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } @@ -1710,47 +1734,50 @@ func (s *state) loadPendingValidators() error { // Invariant: initValidatorSets requires loadCurrentValidators to have already // been called. func (s *state) initValidatorSets() error { - if s.cfg.Validators.Count(constants.PrimaryNetworkID) != 0 { - // Enforce the invariant that the validator set is empty here. - return errValidatorSetAlreadyPopulated - } - err := s.ApplyCurrentValidators(constants.PrimaryNetworkID, s.cfg.Validators) - if err != nil { - return err - } + for subnetID, validators := range s.currentStakers.validators { + if s.validators.Count(subnetID) != 0 { + // Enforce the invariant that the validator set is empty here. + return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID) + } - vl := validators.NewLogger(s.ctx.Log, s.bootstrapped, constants.PrimaryNetworkID, s.ctx.NodeID) - s.cfg.Validators.RegisterCallbackListener(constants.PrimaryNetworkID, vl) + for nodeID, validator := range validators { + validatorStaker := validator.validator + if err := s.validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil { + return err + } - s.metrics.SetLocalStake(s.cfg.Validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) - totalWeight, err := s.cfg.Validators.TotalWeight(constants.PrimaryNetworkID) + delegatorIterator := NewTreeIterator(validator.delegators) + for delegatorIterator.Next() { + delegatorStaker := delegatorIterator.Value() + if err := s.validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil { + delegatorIterator.Release() + return err + } + } + delegatorIterator.Release() + } + } + + s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) + totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) if err != nil { return fmt.Errorf("failed to get total weight of primary network validators: %w", err) } s.metrics.SetTotalStake(totalWeight) - - for subnetID := range s.cfg.TrackedSubnets { - if s.cfg.Validators.Count(subnetID) != 0 { - // Enforce the invariant that the validator set is empty here. - return errValidatorSetAlreadyPopulated - } - err := s.ApplyCurrentValidators(subnetID, s.cfg.Validators) - if err != nil { - return err - } - - vl := validators.NewLogger(s.ctx.Log, s.bootstrapped, subnetID, s.ctx.NodeID) - s.cfg.Validators.RegisterCallbackListener(subnetID, vl) - } return nil } func (s *state) write(updateValidators bool, height uint64) error { + codecVersion := CodecVersion1 + if !s.cfg.IsDurangoActivated(s.GetTimestamp()) { + codecVersion = CodecVersion0 + } + return utils.Err( s.writeBlocks(), - s.writeCurrentStakers(updateValidators, height), + s.writeCurrentStakers(updateValidators, height, codecVersion), s.writePendingStakers(), - s.WriteValidatorMetadata(s.currentValidatorList, s.currentSubnetValidatorList), // Must be called after writeCurrentStakers + s.WriteValidatorMetadata(s.currentValidatorList, s.currentSubnetValidatorList, codecVersion), // Must be called after writeCurrentStakers s.writeTXs(), s.writeRewardUTXOs(), s.writeUTXOs(), @@ -1982,7 +2009,7 @@ func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { return blkID, nil } -func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error { +func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecVersion uint16) error { heightBytes := database.PackUInt64(height) rawNestedPublicKeyDiffDB := prefixdb.New(heightBytes, s.nestedValidatorPublicKeyDiffsDB) nestedPKDiffDB := linkeddb.NewDefault(rawNestedPublicKeyDiffDB) @@ -2002,7 +2029,7 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error Height: height, SubnetID: subnetID, } - prefixBytes, err := block.GenesisCodec.Marshal(block.Version, prefixStruct) + prefixBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, prefixStruct) if err != nil { return fmt.Errorf("failed to create prefix bytes: %w", err) } @@ -2041,17 +2068,19 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error // // Invariant: It's impossible for a delegator to have been // rewarded in the same block that the validator was added. + startTime := uint64(staker.StartTime.Unix()) metadata := &validatorMetadata{ txID: staker.TxID, lastUpdated: staker.StartTime, UpDuration: 0, - LastUpdated: uint64(staker.StartTime.Unix()), + LastUpdated: startTime, + StakerStartTime: startTime, PotentialReward: staker.PotentialReward, PotentialDelegateeReward: 0, } - metadataBytes, err := metadataCodec.Marshal(v0, metadata) + metadataBytes, err := MetadataCodec.Marshal(codecVersion, metadata) if err != nil { return fmt.Errorf("failed to serialize current validator: %w", err) } @@ -2088,7 +2117,7 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error // // Note: We store the compressed public key here. pkBytes := bls.PublicKeyToBytes(staker.PublicKey) - if err := nestedPKDiffDB.Put(nodeID[:], pkBytes); err != nil { + if err := nestedPKDiffDB.Put(nodeID.Bytes(), pkBytes); err != nil { return err } } @@ -2104,6 +2133,7 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error delegatorDB, weightDiff, validatorDiff, + codecVersion, ) if err != nil { return err @@ -2123,11 +2153,11 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error } // TODO: Remove this once we no longer support version rollbacks. - weightDiffBytes, err := block.GenesisCodec.Marshal(block.Version, weightDiff) + weightDiffBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, weightDiff) if err != nil { return fmt.Errorf("failed to serialize validator weight diff: %w", err) } - if err := nestedWeightDiffDB.Put(nodeID[:], weightDiffBytes); err != nil { + if err := nestedWeightDiffDB.Put(nodeID.Bytes(), weightDiffBytes); err != nil { return err } @@ -2136,17 +2166,12 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error continue } - // We only track the current validator set of tracked subnets. - if subnetID != constants.PrimaryNetworkID && !s.cfg.TrackedSubnets.Contains(subnetID) { - continue - } - if weightDiff.Decrease { - err = s.cfg.Validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) + err = s.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) } else { if validatorDiff.validatorStatus == added { staker := validatorDiff.validator - err = s.cfg.Validators.AddStaker( + err = s.validators.AddStaker( subnetID, nodeID, staker.PublicKey, @@ -2154,7 +2179,7 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error weightDiff.Amount, ) } else { - err = s.cfg.Validators.AddWeight(subnetID, nodeID, weightDiff.Amount) + err = s.validators.AddWeight(subnetID, nodeID, weightDiff.Amount) } } if err != nil { @@ -2170,12 +2195,12 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error return nil } - totalWeight, err := s.cfg.Validators.TotalWeight(constants.PrimaryNetworkID) + totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) if err != nil { return fmt.Errorf("failed to get total weight of primary network: %w", err) } - s.metrics.SetLocalStake(s.cfg.Validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) + s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) s.metrics.SetTotalStake(totalWeight) return nil } @@ -2184,6 +2209,7 @@ func writeCurrentDelegatorDiff( currentDelegatorList linkeddb.LinkedDB, weightDiff *ValidatorWeightDiff, validatorDiff *diffValidator, + codecVersion uint16, ) error { addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators) defer addedDelegatorIterator.Release() @@ -2197,8 +2223,9 @@ func writeCurrentDelegatorDiff( metadata := &delegatorMetadata{ txID: staker.TxID, PotentialReward: staker.PotentialReward, + StakerStartTime: uint64(staker.StartTime.Unix()), } - if err := writeDelegatorMetadata(currentDelegatorList, metadata); err != nil { + if err := writeDelegatorMetadata(currentDelegatorList, metadata, codecVersion); err != nil { return fmt.Errorf("failed to write current delegator to list: %w", err) } } @@ -2287,7 +2314,7 @@ func (s *state) writeTXs() error { // Note that we're serializing a [txBytesAndStatus] here, not a // *txs.Tx, so we don't use [txs.Codec]. - txBytes, err := txs.GenesisCodec.Marshal(txs.Version, &stx) + txBytes, err := txs.GenesisCodec.Marshal(txs.CodecVersion, &stx) if err != nil { return fmt.Errorf("failed to serialize tx: %w", err) } @@ -2312,7 +2339,7 @@ func (s *state) writeRewardUTXOs() error { txDB := linkeddb.NewDefault(rawTxDB) for _, utxo := range utxos { - utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.GenesisCodec.Marshal(txs.CodecVersion, utxo) if err != nil { return fmt.Errorf("failed to serialize reward UTXO: %w", err) } @@ -2360,7 +2387,7 @@ func (s *state) writeSubnetOwners() error { owner := owner delete(s.subnetOwners, subnetID) - ownerBytes, err := block.GenesisCodec.Marshal(block.Version, &owner) + ownerBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &owner) if err != nil { return fmt.Errorf("failed to marshal subnet owner: %w", err) } @@ -2422,30 +2449,30 @@ func (s *state) writeChains() error { func (s *state) writeMetadata() error { if !s.persistedTimestamp.Equal(s.timestamp) { - if err := database.PutTimestamp(s.singletonDB, timestampKey, s.timestamp); err != nil { + if err := database.PutTimestamp(s.singletonDB, TimestampKey, s.timestamp); err != nil { return fmt.Errorf("failed to write timestamp: %w", err) } s.persistedTimestamp = s.timestamp } if s.persistedCurrentSupply != s.currentSupply { - if err := database.PutUInt64(s.singletonDB, currentSupplyKey, s.currentSupply); err != nil { + if err := database.PutUInt64(s.singletonDB, CurrentSupplyKey, s.currentSupply); err != nil { return fmt.Errorf("failed to write current supply: %w", err) } s.persistedCurrentSupply = s.currentSupply } if s.persistedLastAccepted != s.lastAccepted { - if err := database.PutID(s.singletonDB, lastAcceptedKey, s.lastAccepted); err != nil { + if err := database.PutID(s.singletonDB, LastAcceptedKey, s.lastAccepted); err != nil { return fmt.Errorf("failed to write last accepted: %w", err) } s.persistedLastAccepted = s.lastAccepted } if s.indexedHeights != nil { - indexedHeightsBytes, err := block.GenesisCodec.Marshal(block.Version, s.indexedHeights) + indexedHeightsBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, s.indexedHeights) if err != nil { return err } - if err := s.singletonDB.Put(heightsIndexedKey, indexedHeightsBytes); err != nil { + if err := s.singletonDB.Put(HeightsIndexedKey, indexedHeightsBytes); err != nil { return fmt.Errorf("failed to write indexed range: %w", err) } } diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 5a29619c1beb..c0fdb1e60269 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state import ( "context" + "fmt" "math" "testing" "time" @@ -21,7 +22,6 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/units" @@ -33,8 +33,11 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/genesis" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" + "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/types" safemath "github.com/ava-labs/avalanchego/utils/math" ) @@ -50,23 +53,23 @@ func TestStateInitialization(t *testing.T) { require := require.New(t) s, db := newUninitializedState(require) - shouldInit, err := s.(*state).shouldInit() + shouldInit, err := s.shouldInit() require.NoError(err) require.True(shouldInit) - require.NoError(s.(*state).doneInit()) + require.NoError(s.doneInit()) require.NoError(s.Commit()) s = newStateFromDB(require, db) - shouldInit, err = s.(*state).shouldInit() + shouldInit, err = s.shouldInit() require.NoError(err) require.False(shouldInit) } func TestStateSyncGenesis(t *testing.T) { require := require.New(t) - state, _ := newInitializedState(require) + state := newInitializedState(require) staker, err := state.GetCurrentValidator(constants.PrimaryNetworkID, initialNodeID) require.NoError(err) @@ -89,8 +92,626 @@ func TestStateSyncGenesis(t *testing.T) { assertIteratorsEqual(t, EmptyIterator, delegatorIterator) } -func newInitializedState(require *require.Assertions) (State, database.Database) { - s, db := newUninitializedState(require) +// Whenever we store a staker, a whole bunch a data structures are updated +// This test is meant to capture which updates are carried out +func TestPersistStakers(t *testing.T) { + tests := map[string]struct { + // Insert or delete a staker to state and store it + storeStaker func(*require.Assertions, ids.ID /*=subnetID*/, *state) *Staker + + // Check that the staker is duly stored/removed in P-chain state + checkStakerInState func(*require.Assertions, *state, *Staker) + + // Check whether validators are duly reported in the validator set, + // with the right weight and showing the BLS key + checkValidatorsSet func(*require.Assertions, *state, *Staker) + + // Check that node duly track stakers uptimes + checkValidatorUptimes func(*require.Assertions, *state, *Staker) + + // Check whether weight/bls keys diffs are duly stored + checkDiffs func(*require.Assertions, *state, *Staker, uint64) + }{ + "add current validator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + var ( + startTime = time.Now().Unix() + endTime = time.Now().Add(14 * 24 * time.Hour).Unix() + + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + End: uint64(endTime), + Wght: 1234, + } + validatorReward uint64 = 5678 + ) + + utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utx} + r.NoError(addPermValTx.Initialize(txs.Codec)) + + staker, err := NewCurrentStaker( + addPermValTx.ID(), + utx, + time.Unix(startTime, 0), + validatorReward, + ) + r.NoError(err) + + s.PutCurrentValidator(staker) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + return staker + }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + retrievedStaker, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) + r.NoError(err) + r.Equal(staker, retrievedStaker) + }, + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Len(valsMap, 1) + valOut, found := valsMap[staker.NodeID] + r.True(found) + r.Equal(valOut, &validators.GetValidatorOutput{ + NodeID: staker.NodeID, + PublicKey: staker.PublicKey, + Weight: staker.Weight, + }) + }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + upDuration, lastUpdated, err := s.GetUptime(staker.NodeID, staker.SubnetID) + r.NoError(err) + r.Equal(upDuration, time.Duration(0)) + r.Equal(lastUpdated, staker.StartTime) + }, + checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { + weightDiffBytes, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.NoError(err) + weightDiff, err := unmarshalWeightDiff(weightDiffBytes) + r.NoError(err) + r.Equal(weightDiff, &ValidatorWeightDiff{ + Decrease: false, + Amount: staker.Weight, + }) + + blsDiffBytes, err := s.flatValidatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + if staker.SubnetID == constants.PrimaryNetworkID { + r.NoError(err) + r.Nil(blsDiffBytes) + } else { + r.ErrorIs(err, database.ErrNotFound) + } + }, + }, + "add current delegator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + // insert the delegator and its validator + var ( + valStartTime = time.Now().Truncate(time.Second).Unix() + delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() + delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() + valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() + + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + End: uint64(valEndTime), + Wght: 1234, + } + validatorReward uint64 = 5678 + + delegatorData = txs.Validator{ + NodeID: validatorsData.NodeID, + End: uint64(delEndTime), + Wght: validatorsData.Wght / 2, + } + delegatorReward uint64 = 5432 + ) + + utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utxVal} + r.NoError(addPermValTx.Initialize(txs.Codec)) + + val, err := NewCurrentStaker( + addPermValTx.ID(), + utxVal, + time.Unix(valStartTime, 0), + validatorReward, + ) + r.NoError(err) + + utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) + addPermDelTx := &txs.Tx{Unsigned: utxDel} + r.NoError(addPermDelTx.Initialize(txs.Codec)) + + del, err := NewCurrentStaker( + addPermDelTx.ID(), + utxDel, + time.Unix(delStartTime, 0), + delegatorReward, + ) + r.NoError(err) + + s.PutCurrentValidator(val) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + + s.PutCurrentDelegator(del) + s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + return del + }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + delIt, err := s.GetCurrentDelegatorIterator(staker.SubnetID, staker.NodeID) + r.NoError(err) + r.True(delIt.Next()) + retrievedDelegator := delIt.Value() + r.False(delIt.Next()) + delIt.Release() + r.Equal(staker, retrievedDelegator) + }, + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + val, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) + r.NoError(err) + + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Len(valsMap, 1) + valOut, found := valsMap[staker.NodeID] + r.True(found) + r.Equal(valOut.NodeID, staker.NodeID) + r.Equal(valOut.Weight, val.Weight+staker.Weight) + }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + }, + checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { + // validator's weight must increase of delegator's weight amount + weightDiffBytes, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.NoError(err) + weightDiff, err := unmarshalWeightDiff(weightDiffBytes) + r.NoError(err) + r.Equal(weightDiff, &ValidatorWeightDiff{ + Decrease: false, + Amount: staker.Weight, + }) + }, + }, + "add pending validator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + var ( + startTime = time.Now().Unix() + endTime = time.Now().Add(14 * 24 * time.Hour).Unix() + + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(startTime), + End: uint64(endTime), + Wght: 1234, + } + ) + + utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utx} + r.NoError(addPermValTx.Initialize(txs.Codec)) + + staker, err := NewPendingStaker( + addPermValTx.ID(), + utx, + ) + r.NoError(err) + + s.PutPendingValidator(staker) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + return staker + }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + retrievedStaker, err := s.GetPendingValidator(staker.SubnetID, staker.NodeID) + r.NoError(err) + r.Equal(staker, retrievedStaker) + }, + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + // pending validators are not showed in validators set + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Len(valsMap, 0) + }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + // pending validators uptime is not tracked + _, _, err := s.GetUptime(staker.NodeID, staker.SubnetID) + r.ErrorIs(err, database.ErrNotFound) + }, + checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { + // pending validators weight diff and bls diffs are not stored + _, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.ErrorIs(err, database.ErrNotFound) + + _, err = s.flatValidatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.ErrorIs(err, database.ErrNotFound) + }, + }, + "add pending delegator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + // insert the delegator and its validator + var ( + valStartTime = time.Now().Truncate(time.Second).Unix() + delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() + delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() + valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() + + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(valStartTime), + End: uint64(valEndTime), + Wght: 1234, + } + + delegatorData = txs.Validator{ + NodeID: validatorsData.NodeID, + Start: uint64(delStartTime), + End: uint64(delEndTime), + Wght: validatorsData.Wght / 2, + } + ) + + utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utxVal} + r.NoError(addPermValTx.Initialize(txs.Codec)) + + val, err := NewPendingStaker(addPermValTx.ID(), utxVal) + r.NoError(err) + + utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) + addPermDelTx := &txs.Tx{Unsigned: utxDel} + r.NoError(addPermDelTx.Initialize(txs.Codec)) + + del, err := NewPendingStaker(addPermDelTx.ID(), utxDel) + r.NoError(err) + + s.PutPendingValidator(val) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + + s.PutPendingDelegator(del) + s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + + return del + }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + delIt, err := s.GetPendingDelegatorIterator(staker.SubnetID, staker.NodeID) + r.NoError(err) + r.True(delIt.Next()) + retrievedDelegator := delIt.Value() + r.False(delIt.Next()) + delIt.Release() + r.Equal(staker, retrievedDelegator) + }, + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Len(valsMap, 0) + }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + // nothing to do here + }, + checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { + // nothing to do here + }, + }, + "delete current validator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + // add them remove the validator + var ( + startTime = time.Now().Unix() + endTime = time.Now().Add(14 * 24 * time.Hour).Unix() + + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + End: uint64(endTime), + Wght: 1234, + } + validatorReward uint64 = 5678 + ) + + utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utx} + r.NoError(addPermValTx.Initialize(txs.Codec)) + + staker, err := NewCurrentStaker( + addPermValTx.ID(), + utx, + time.Unix(startTime, 0), + validatorReward, + ) + r.NoError(err) + + s.PutCurrentValidator(staker) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + + s.DeleteCurrentValidator(staker) + r.NoError(s.Commit()) + return staker + }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + _, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) + r.ErrorIs(err, database.ErrNotFound) + }, + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + // deleted validators are not showed in the validators set anymore + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Len(valsMap, 0) + }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + // uptimes of delete validators are dropped + _, _, err := s.GetUptime(staker.NodeID, staker.SubnetID) + r.ErrorIs(err, database.ErrNotFound) + }, + checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { + weightDiffBytes, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.NoError(err) + weightDiff, err := unmarshalWeightDiff(weightDiffBytes) + r.NoError(err) + r.Equal(weightDiff, &ValidatorWeightDiff{ + Decrease: true, + Amount: staker.Weight, + }) + + blsDiffBytes, err := s.flatValidatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + if staker.SubnetID == constants.PrimaryNetworkID { + r.NoError(err) + r.Equal(bls.DeserializePublicKey(blsDiffBytes), staker.PublicKey) + } else { + r.ErrorIs(err, database.ErrNotFound) + } + }, + }, + "delete current delegator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + // insert validator and delegator, then remove the delegator + var ( + valStartTime = time.Now().Truncate(time.Second).Unix() + delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() + delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() + valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() + + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + End: uint64(valEndTime), + Wght: 1234, + } + validatorReward uint64 = 5678 + + delegatorData = txs.Validator{ + NodeID: validatorsData.NodeID, + End: uint64(delEndTime), + Wght: validatorsData.Wght / 2, + } + delegatorReward uint64 = 5432 + ) + + utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utxVal} + r.NoError(addPermValTx.Initialize(txs.Codec)) + + val, err := NewCurrentStaker( + addPermValTx.ID(), + utxVal, + time.Unix(valStartTime, 0), + validatorReward, + ) + r.NoError(err) + + utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) + addPermDelTx := &txs.Tx{Unsigned: utxDel} + r.NoError(addPermDelTx.Initialize(txs.Codec)) + + del, err := NewCurrentStaker( + addPermDelTx.ID(), + utxDel, + time.Unix(delStartTime, 0), + delegatorReward, + ) + r.NoError(err) + + s.PutCurrentValidator(val) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + + s.PutCurrentDelegator(del) + s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + + s.DeleteCurrentDelegator(del) + r.NoError(s.Commit()) + + return del + }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + delIt, err := s.GetCurrentDelegatorIterator(staker.SubnetID, staker.NodeID) + r.NoError(err) + r.False(delIt.Next()) + delIt.Release() + }, + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + val, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) + r.NoError(err) + + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Len(valsMap, 1) + valOut, found := valsMap[staker.NodeID] + r.True(found) + r.Equal(valOut.NodeID, staker.NodeID) + r.Equal(valOut.Weight, val.Weight) + }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + // nothing to do here + }, + checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { + // validator's weight must decrease of delegator's weight amount + weightDiffBytes, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.NoError(err) + weightDiff, err := unmarshalWeightDiff(weightDiffBytes) + r.NoError(err) + r.Equal(weightDiff, &ValidatorWeightDiff{ + Decrease: true, + Amount: staker.Weight, + }) + }, + }, + "delete pending validator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + var ( + startTime = time.Now().Unix() + endTime = time.Now().Add(14 * 24 * time.Hour).Unix() + + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(startTime), + End: uint64(endTime), + Wght: 1234, + } + ) + + utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utx} + r.NoError(addPermValTx.Initialize(txs.Codec)) + + staker, err := NewPendingStaker( + addPermValTx.ID(), + utx, + ) + r.NoError(err) + + s.PutPendingValidator(staker) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + + s.DeletePendingValidator(staker) + r.NoError(s.Commit()) + + return staker + }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + _, err := s.GetPendingValidator(staker.SubnetID, staker.NodeID) + r.ErrorIs(err, database.ErrNotFound) + }, + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Len(valsMap, 0) + }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + _, _, err := s.GetUptime(staker.NodeID, staker.SubnetID) + r.ErrorIs(err, database.ErrNotFound) + }, + checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { + _, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.ErrorIs(err, database.ErrNotFound) + + _, err = s.flatValidatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.ErrorIs(err, database.ErrNotFound) + }, + }, + "delete pending delegator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + // insert validator and delegator the remove the validator + var ( + valStartTime = time.Now().Truncate(time.Second).Unix() + delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() + delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() + valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() + + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(valStartTime), + End: uint64(valEndTime), + Wght: 1234, + } + + delegatorData = txs.Validator{ + NodeID: validatorsData.NodeID, + Start: uint64(delStartTime), + End: uint64(delEndTime), + Wght: validatorsData.Wght / 2, + } + ) + + utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utxVal} + r.NoError(addPermValTx.Initialize(txs.Codec)) + + val, err := NewPendingStaker(addPermValTx.ID(), utxVal) + r.NoError(err) + + utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) + addPermDelTx := &txs.Tx{Unsigned: utxDel} + r.NoError(addPermDelTx.Initialize(txs.Codec)) + + del, err := NewPendingStaker(addPermDelTx.ID(), utxDel) + r.NoError(err) + + s.PutPendingValidator(val) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + + s.PutPendingDelegator(del) + s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + + s.DeletePendingDelegator(del) + r.NoError(s.Commit()) + return del + }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + delIt, err := s.GetPendingDelegatorIterator(staker.SubnetID, staker.NodeID) + r.NoError(err) + r.False(delIt.Next()) + delIt.Release() + }, + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Len(valsMap, 0) + }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + }, + checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { + }, + }, + } + + subnetIDs := []ids.ID{constants.PrimaryNetworkID, ids.GenerateTestID()} + for _, subnetID := range subnetIDs { + for name, test := range tests { + t.Run(fmt.Sprintf("%s - subnetID %s", name, subnetID), func(t *testing.T) { + require := require.New(t) + + state, db := newUninitializedState(require) + + // create and store the staker + staker := test.storeStaker(require, subnetID, state) + + // check all relevant data are stored + test.checkStakerInState(require, state, staker) + test.checkValidatorsSet(require, state, staker) + test.checkValidatorUptimes(require, state, staker) + test.checkDiffs(require, state, staker, 0 /*height*/) + + // rebuild the state + rebuiltState := newStateFromDB(require, db) + + // load relevant quantities + require.NoError(rebuiltState.loadCurrentValidators()) + require.NoError(rebuiltState.loadPendingValidators()) + require.NoError(rebuiltState.initValidatorSets()) + + // check again that all relevant data are still available in rebuilt state + test.checkStakerInState(require, state, staker) + test.checkValidatorsSet(require, state, staker) + test.checkValidatorUptimes(require, state, staker) + test.checkDiffs(require, state, staker, 0 /*height*/) + }) + } + } +} + +func newInitializedState(require *require.Assertions) State { + s, _ := newUninitializedState(require) initialValidator := &txs.AddValidatorTx{ Validator: txs.Validator{ @@ -151,17 +772,17 @@ func newInitializedState(require *require.Assertions) (State, database.Database) genesisBlk, err := block.NewApricotCommitBlock(genesisBlkID, 0) require.NoError(err) - require.NoError(s.(*state).syncGenesis(genesisBlk, genesisState)) + require.NoError(s.syncGenesis(genesisBlk, genesisState)) - return s, db + return s } -func newUninitializedState(require *require.Assertions) (State, database.Database) { +func newUninitializedState(require *require.Assertions) (*state, database.Database) { db := memdb.New() return newStateFromDB(require, db), db } -func newStateFromDB(require *require.Assertions, db database.Database) State { +func newStateFromDB(require *require.Assertions, db database.Database) *state { execCfg, _ := config.GetExecutionConfig(nil) state, err := newState( db, @@ -178,13 +799,142 @@ func newStateFromDB(require *require.Assertions, db database.Database) State { MintingPeriod: 365 * 24 * time.Hour, SupplyCap: 720 * units.MegaAvax, }), - &utils.Atomic[bool]{}, ) require.NoError(err) require.NotNil(state) return state } +func createPermissionlessValidatorTx(r *require.Assertions, subnetID ids.ID, validatorsData txs.Validator) *txs.AddPermissionlessValidatorTx { + var sig signer.Signer = &signer.Empty{} + if subnetID == constants.PrimaryNetworkID { + sk, err := bls.NewSecretKey() + r.NoError(err) + sig = signer.NewProofOfPossession(sk) + } + + return &txs.AddPermissionlessValidatorTx{ + BaseTx: txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: ids.GenerateTestID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: ids.GenerateTestID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 2 * units.KiloAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{1}, + }, + }, + }, + }, + Memo: types.JSONByteSlice{}, + }, + }, + Validator: validatorsData, + Subnet: subnetID, + Signer: sig, + + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: ids.GenerateTestID(), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 2 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + }, + }, + }, + ValidatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + DelegatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + DelegationShares: reward.PercentDenominator, + } +} + +func createPermissionlessDelegatorTx(subnetID ids.ID, delegatorData txs.Validator) *txs.AddPermissionlessDelegatorTx { + return &txs.AddPermissionlessDelegatorTx{ + BaseTx: txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: ids.GenerateTestID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: ids.GenerateTestID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 2 * units.KiloAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{1}, + }, + }, + }, + }, + Memo: types.JSONByteSlice{}, + }, + }, + Validator: delegatorData, + Subnet: subnetID, + + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: ids.GenerateTestID(), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 2 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + }, + }, + }, + DelegationRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + } +} + func TestValidatorWeightDiff(t *testing.T) { type test struct { name string @@ -330,7 +1080,7 @@ func TestValidatorWeightDiff(t *testing.T) { func TestStateAddRemoveValidator(t *testing.T) { require := require.New(t) - state, _ := newInitializedState(require) + state := newInitializedState(require) var ( numNodes = 3 @@ -583,23 +1333,25 @@ func TestParsedStateBlock(t *testing.T) { } { - blk, err := block.NewApricotProposalBlock(ids.GenerateTestID(), 1000, &txs.Tx{ + tx := &txs.Tx{ Unsigned: &txs.RewardValidatorTx{ TxID: ids.GenerateTestID(), }, - }) + } + require.NoError(tx.Initialize(txs.Codec)) + blk, err := block.NewApricotProposalBlock(ids.GenerateTestID(), 1000, tx) require.NoError(err) blks = append(blks, blk) } { - blk, err := block.NewApricotStandardBlock(ids.GenerateTestID(), 1000, []*txs.Tx{ - { - Unsigned: &txs.RewardValidatorTx{ - TxID: ids.GenerateTestID(), - }, + tx := &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: ids.GenerateTestID(), }, - }) + } + require.NoError(tx.Initialize(txs.Codec)) + blk, err := block.NewApricotStandardBlock(ids.GenerateTestID(), 1000, []*txs.Tx{tx}) require.NoError(err) blks = append(blks, blk) } @@ -617,23 +1369,27 @@ func TestParsedStateBlock(t *testing.T) { } { - blk, err := block.NewBanffProposalBlock(time.Now(), ids.GenerateTestID(), 1000, &txs.Tx{ + tx := &txs.Tx{ Unsigned: &txs.RewardValidatorTx{ TxID: ids.GenerateTestID(), }, - }) + } + require.NoError(tx.Initialize(txs.Codec)) + + blk, err := block.NewBanffProposalBlock(time.Now(), ids.GenerateTestID(), 1000, tx, []*txs.Tx{}) require.NoError(err) blks = append(blks, blk) } { - blk, err := block.NewBanffStandardBlock(time.Now(), ids.GenerateTestID(), 1000, []*txs.Tx{ - { - Unsigned: &txs.RewardValidatorTx{ - TxID: ids.GenerateTestID(), - }, + tx := &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: ids.GenerateTestID(), }, - }) + } + require.NoError(tx.Initialize(txs.Codec)) + + blk, err := block.NewBanffStandardBlock(time.Now(), ids.GenerateTestID(), 1000, []*txs.Tx{tx}) require.NoError(err) blks = append(blks, blk) } @@ -645,7 +1401,7 @@ func TestParsedStateBlock(t *testing.T) { Status: choices.Accepted, } - stBlkBytes, err := block.GenesisCodec.Marshal(block.Version, &stBlk) + stBlkBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &stBlk) require.NoError(err) gotBlk, _, isStateBlk, err := parseStoredBlock(stBlkBytes) @@ -663,7 +1419,7 @@ func TestParsedStateBlock(t *testing.T) { func TestStateSubnetOwner(t *testing.T) { require := require.New(t) - state, _ := newInitializedState(require) + state := newInitializedState(require) ctrl := gomock.NewController(t) var ( diff --git a/vms/platformvm/state/test/camino_test_state.go b/vms/platformvm/state/test/camino_test_state.go index 40c3c305eeb9..1f6c72f8e528 100644 --- a/vms/platformvm/state/test/camino_test_state.go +++ b/vms/platformvm/state/test/camino_test_state.go @@ -11,7 +11,6 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/vms/platformvm/api" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" @@ -39,7 +38,6 @@ func State( ctx, metrics.Noop, rewards, - &utils.Atomic[bool]{}, ) require.NoError(t, err) // persist and reload to init a bunch of in-memory stuff diff --git a/vms/platformvm/state/tree_iterator.go b/vms/platformvm/state/tree_iterator.go index a71b35e21346..920bc1377902 100644 --- a/vms/platformvm/state/tree_iterator.go +++ b/vms/platformvm/state/tree_iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/platformvm/state/tree_iterator_test.go b/vms/platformvm/state/tree_iterator_test.go index 57fa5727a4f4..7047d350bae1 100644 --- a/vms/platformvm/state/tree_iterator_test.go +++ b/vms/platformvm/state/tree_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/platformvm/state/versions.go b/vms/platformvm/state/versions.go index da84182bb683..6afb0fe8e5f2 100644 --- a/vms/platformvm/state/versions.go +++ b/vms/platformvm/state/versions.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/platformvm/status/blockchain_status.go b/vms/platformvm/status/blockchain_status.go index 1ab25e588b3d..5d427e5a9383 100644 --- a/vms/platformvm/status/blockchain_status.go +++ b/vms/platformvm/status/blockchain_status.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package status diff --git a/vms/platformvm/status/blockchain_status_test.go b/vms/platformvm/status/blockchain_status_test.go index 97b96badcb93..d0710d2f2cb1 100644 --- a/vms/platformvm/status/blockchain_status_test.go +++ b/vms/platformvm/status/blockchain_status_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package status diff --git a/vms/platformvm/status/status.go b/vms/platformvm/status/status.go index a67fb6c38e81..2a674250d20e 100644 --- a/vms/platformvm/status/status.go +++ b/vms/platformvm/status/status.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package status diff --git a/vms/platformvm/status/status_test.go b/vms/platformvm/status/status_test.go index 59316f983722..cd6ed5f814f7 100644 --- a/vms/platformvm/status/status_test.go +++ b/vms/platformvm/status/status_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package status diff --git a/vms/platformvm/test/camino_defaults.go b/vms/platformvm/test/camino_defaults.go index 80180c29d5c2..082d069eb6f3 100644 --- a/vms/platformvm/test/camino_defaults.go +++ b/vms/platformvm/test/camino_defaults.go @@ -4,8 +4,6 @@ package test import ( - "context" - "errors" "testing" "time" @@ -18,6 +16,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" @@ -47,9 +46,6 @@ const ( ) var ( - avaxAssetID = ids.ID{'C', 'A', 'M'} - cChainID = ids.ID{'C', '-', 'C', 'H', 'A', 'I', 'N'} - xChainID = ids.ID{'X', '-', 'C', 'H', 'A', 'I', 'N'} rewardConfig = reward.Config{ MaxConsumptionRate: .12 * reward.PercentDenominator, MinConsumptionRate: .10 * reward.PercentDenominator, @@ -76,11 +72,15 @@ func Config(t *testing.T, phase Phase) *config.Config { cortinaTime = mockable.MaxTime berlinTime = mockable.MaxTime cairoTime = mockable.MaxTime + durangoTime = mockable.MaxTime ) // always reset LatestForkTime (a package level variable) // to ensure test independence switch phase { + case PhaseDurango: + durangoTime = LatestPhaseTime + fallthrough case PhaseCairo: cairoTime = LatestPhaseTime fallthrough @@ -124,6 +124,7 @@ func Config(t *testing.T, phase Phase) *config.Config { CortinaTime: cortinaTime, BerlinPhaseTime: berlinTime, CairoPhaseTime: cairoTime, + DurangoTime: durangoTime, CaminoConfig: caminoconfig.Config{ DACProposalBondAmount: 100 * units.Avax, }, @@ -151,10 +152,10 @@ func Genesis(t *testing.T, avaxAssetID ids.ID, caminoGenesisConfig api.Camino, a caminoGenesisConfig.ValidatorDeposits = make([][]api.UTXODeposit, len(FundedKeys)) caminoGenesisConfig.ValidatorConsortiumMembers = make([]ids.ShortID, len(FundedKeys)) - genesisValidators := make([]api.PermissionlessValidator, len(FundedKeys)) + genesisValidators := make([]api.GenesisPermissionlessValidator, len(FundedKeys)) for i, key := range FundedKeys { - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidators[i] = api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(ValidatorStartTime.Unix()), EndTime: json.Uint64(ValidatorEndTime.Unix()), NodeID: FundedNodeIDs[i], @@ -203,32 +204,7 @@ func Genesis(t *testing.T, avaxAssetID ids.ID, caminoGenesisConfig api.Camino, a func Context(t *testing.T) *snow.Context { t.Helper() - - aliaser := ids.NewAliaser() - require.NoError(t, aliaser.Alias(constants.PlatformChainID, "P")) - - ctx := snow.DefaultContextTest() - ctx.AVAXAssetID = avaxAssetID - ctx.ChainID = constants.PlatformChainID - ctx.XChainID = xChainID - ctx.CChainID = cChainID - ctx.BCLookup = aliaser - ctx.NetworkID = constants.UnitTestID - ctx.SubnetID = constants.PrimaryNetworkID - ctx.ValidatorState = &validators.TestState{ - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - constants.PlatformChainID: ctx.SubnetID, - ctx.XChainID: ctx.SubnetID, - ctx.CChainID: ctx.SubnetID, - }[chainID] - if !ok { - return ids.Empty, errors.New("missing") - } - return subnetID, nil - }, - } - return ctx + return snowtest.Context(t, snowtest.PChainID) } func ContextWithSharedMemory(t *testing.T, db database.Database) *snow.Context { @@ -271,7 +247,7 @@ func Fx(t *testing.T, clk *mockable.Clock, log logging.Logger, isBootstrapped bo t.Helper() fxVMInt := &fxVMInt{ - registry: linearcodec.NewDefault(), + registry: linearcodec.NewDefault(time.Time{}), clk: clk, log: log, } diff --git a/vms/platformvm/test/camino_phase.go b/vms/platformvm/test/camino_phase.go index 8d96e075523e..922075bf3a33 100644 --- a/vms/platformvm/test/camino_phase.go +++ b/vms/platformvm/test/camino_phase.go @@ -4,6 +4,7 @@ package test import ( + "math" "testing" "time" @@ -24,6 +25,7 @@ const ( PhaseCortina Phase = 3 // avax, included into Berlin phase PhaseBerlin Phase = 3 PhaseCairo Phase = 4 + PhaseDurango Phase = math.MaxInt // avax ) // TODO @evlekht we might want to clean up sunrise/banff timestamps/relations later diff --git a/vms/platformvm/txs/add_delegator_test.go b/vms/platformvm/txs/add_delegator_test.go index cf053d45baba..ac3290fb2431 100644 --- a/vms/platformvm/txs/add_delegator_test.go +++ b/vms/platformvm/txs/add_delegator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -23,8 +23,7 @@ var preFundedKeys = secp256k1.TestKeys() func TestAddDelegatorTxSyntacticVerify(t *testing.T) { require := require.New(t) clk := mockable.Clock{} - ctx := snow.DefaultContextTest() - ctx.AVAXAssetID = ids.GenerateTestID() + ctx := snowtest.Context(t, snowtest.PChainID) signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( @@ -130,8 +129,7 @@ func TestAddDelegatorTxSyntacticVerify(t *testing.T) { func TestAddDelegatorTxSyntacticVerifyNotAVAX(t *testing.T) { require := require.New(t) clk := mockable.Clock{} - ctx := snow.DefaultContextTest() - ctx.AVAXAssetID = ids.GenerateTestID() + ctx := snowtest.Context(t, snowtest.PChainID) signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( diff --git a/vms/platformvm/txs/add_delegator_tx.go b/vms/platformvm/txs/add_delegator_tx.go index 4f6fbe395b02..3df97cf0af74 100644 --- a/vms/platformvm/txs/add_delegator_tx.go +++ b/vms/platformvm/txs/add_delegator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -19,7 +19,8 @@ import ( ) var ( - _ DelegatorTx = (*AddDelegatorTx)(nil) + _ DelegatorTx = (*AddDelegatorTx)(nil) + _ ScheduledStaker = (*AddDelegatorTx)(nil) errDelegatorWeightMismatch = errors.New("delegator weight is not equal to total stake weight") errStakeMustBeAVAX = errors.New("stake must be AVAX") diff --git a/vms/platformvm/txs/add_permissionless_delegator_tx.go b/vms/platformvm/txs/add_permissionless_delegator_tx.go index 43db685d7629..9c29b97339d0 100644 --- a/vms/platformvm/txs/add_permissionless_delegator_tx.go +++ b/vms/platformvm/txs/add_permissionless_delegator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -17,7 +17,10 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var _ DelegatorTx = (*AddPermissionlessDelegatorTx)(nil) +var ( + _ DelegatorTx = (*AddPermissionlessDelegatorTx)(nil) + _ ScheduledStaker = (*AddPermissionlessDelegatorTx)(nil) +) // AddPermissionlessDelegatorTx is an unsigned addPermissionlessDelegatorTx type AddPermissionlessDelegatorTx struct { diff --git a/vms/platformvm/txs/add_permissionless_delegator_tx_test.go b/vms/platformvm/txs/add_permissionless_delegator_tx_test.go index 821a3b7da849..1099e910f09b 100644 --- a/vms/platformvm/txs/add_permissionless_delegator_tx_test.go +++ b/vms/platformvm/txs/add_permissionless_delegator_tx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -54,11 +54,11 @@ func TestAddPermissionlessPrimaryDelegatorSerialization(t *testing.T) { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, } - nodeID := ids.NodeID{ + nodeID := ids.BuildTestNodeID([]byte{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, - } + }) simpleAddPrimaryTx := &AddPermissionlessDelegatorTx{ BaseTx: BaseTx{ @@ -216,7 +216,7 @@ func TestAddPermissionlessPrimaryDelegatorSerialization(t *testing.T) { 0x44, 0x55, 0x66, 0x77, } var unsignedSimpleAddPrimaryTx UnsignedTx = simpleAddPrimaryTx - unsignedSimpleAddPrimaryTxBytes, err := Codec.Marshal(Version, &unsignedSimpleAddPrimaryTx) + unsignedSimpleAddPrimaryTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleAddPrimaryTx) require.NoError(err) require.Equal(expectedUnsignedSimpleAddPrimaryTxBytes, unsignedSimpleAddPrimaryTxBytes) @@ -599,7 +599,7 @@ func TestAddPermissionlessPrimaryDelegatorSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, } var unsignedComplexAddPrimaryTx UnsignedTx = complexAddPrimaryTx - unsignedComplexAddPrimaryTxBytes, err := Codec.Marshal(Version, &unsignedComplexAddPrimaryTx) + unsignedComplexAddPrimaryTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexAddPrimaryTx) require.NoError(err) require.Equal(expectedUnsignedComplexAddPrimaryTxBytes, unsignedComplexAddPrimaryTxBytes) @@ -768,11 +768,11 @@ func TestAddPermissionlessSubnetDelegatorSerialization(t *testing.T) { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, } - nodeID := ids.NodeID{ + nodeID := ids.BuildTestNodeID([]byte{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, - } + }) subnetID := ids.ID{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, @@ -972,7 +972,7 @@ func TestAddPermissionlessSubnetDelegatorSerialization(t *testing.T) { 0x44, 0x55, 0x66, 0x77, } var unsignedSimpleAddSubnetTx UnsignedTx = simpleAddSubnetTx - unsignedSimpleAddSubnetTxBytes, err := Codec.Marshal(Version, &unsignedSimpleAddSubnetTx) + unsignedSimpleAddSubnetTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleAddSubnetTx) require.NoError(err) require.Equal(expectedUnsignedSimpleAddSubnetTxBytes, unsignedSimpleAddSubnetTxBytes) @@ -1355,7 +1355,7 @@ func TestAddPermissionlessSubnetDelegatorSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, } var unsignedComplexAddSubnetTx UnsignedTx = complexAddSubnetTx - unsignedComplexAddSubnetTxBytes, err := Codec.Marshal(Version, &unsignedComplexAddSubnetTx) + unsignedComplexAddSubnetTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexAddSubnetTx) require.NoError(err) require.Equal(expectedUnsignedComplexAddSubnetTxBytes, unsignedComplexAddSubnetTxBytes) diff --git a/vms/platformvm/txs/add_permissionless_validator_tx.go b/vms/platformvm/txs/add_permissionless_validator_tx.go index 8f313ae000b9..0f655c8daea4 100644 --- a/vms/platformvm/txs/add_permissionless_validator_tx.go +++ b/vms/platformvm/txs/add_permissionless_validator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -21,7 +21,8 @@ import ( ) var ( - _ ValidatorTx = (*AddPermissionlessValidatorTx)(nil) + _ ValidatorTx = (*AddPermissionlessValidatorTx)(nil) + _ ScheduledStaker = (*AddPermissionlessDelegatorTx)(nil) errEmptyNodeID = errors.New("validator nodeID cannot be empty") errNoStake = errors.New("no stake") diff --git a/vms/platformvm/txs/add_permissionless_validator_tx_test.go b/vms/platformvm/txs/add_permissionless_validator_tx_test.go index 79b1a64abd00..58dd373010b7 100644 --- a/vms/platformvm/txs/add_permissionless_validator_tx_test.go +++ b/vms/platformvm/txs/add_permissionless_validator_tx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -60,11 +60,11 @@ func TestAddPermissionlessPrimaryValidator(t *testing.T) { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, } - nodeID := ids.NodeID{ + nodeID := ids.BuildTestNodeID([]byte{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, - } + }) simpleAddPrimaryTx := &AddPermissionlessValidatorTx{ BaseTx: BaseTx{ @@ -267,7 +267,7 @@ func TestAddPermissionlessPrimaryValidator(t *testing.T) { 0x00, 0x0f, 0x42, 0x40, } var unsignedSimpleAddPrimaryTx UnsignedTx = simpleAddPrimaryTx - unsignedSimpleAddPrimaryTxBytes, err := Codec.Marshal(Version, &unsignedSimpleAddPrimaryTx) + unsignedSimpleAddPrimaryTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleAddPrimaryTx) require.NoError(err) require.Equal(expectedUnsignedSimpleAddPrimaryTxBytes, unsignedSimpleAddPrimaryTxBytes) @@ -695,7 +695,7 @@ func TestAddPermissionlessPrimaryValidator(t *testing.T) { 0x00, 0x0f, 0x42, 0x40, } var unsignedComplexAddPrimaryTx UnsignedTx = complexAddPrimaryTx - unsignedComplexAddPrimaryTxBytes, err := Codec.Marshal(Version, &unsignedComplexAddPrimaryTx) + unsignedComplexAddPrimaryTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexAddPrimaryTx) require.NoError(err) require.Equal(expectedUnsignedComplexAddPrimaryTxBytes, unsignedComplexAddPrimaryTxBytes) } @@ -725,11 +725,11 @@ func TestAddPermissionlessSubnetValidator(t *testing.T) { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, } - nodeID := ids.NodeID{ + nodeID := ids.BuildTestNodeID([]byte{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, - } + }) subnetID := ids.ID{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, @@ -954,7 +954,7 @@ func TestAddPermissionlessSubnetValidator(t *testing.T) { 0x00, 0x0f, 0x42, 0x40, } var unsignedSimpleAddSubnetTx UnsignedTx = simpleAddSubnetTx - unsignedSimpleAddSubnetTxBytes, err := Codec.Marshal(Version, &unsignedSimpleAddSubnetTx) + unsignedSimpleAddSubnetTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleAddSubnetTx) require.NoError(err) require.Equal(expectedUnsignedSimpleAddSubnetTxBytes, unsignedSimpleAddSubnetTxBytes) @@ -1362,7 +1362,7 @@ func TestAddPermissionlessSubnetValidator(t *testing.T) { 0x00, 0x0f, 0x42, 0x40, } var unsignedComplexAddSubnetTx UnsignedTx = complexAddSubnetTx - unsignedComplexAddSubnetTxBytes, err := Codec.Marshal(Version, &unsignedComplexAddSubnetTx) + unsignedComplexAddSubnetTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexAddSubnetTx) require.NoError(err) require.Equal(expectedUnsignedComplexAddSubnetTxBytes, unsignedComplexAddSubnetTxBytes) } diff --git a/vms/platformvm/txs/add_subnet_validator_test.go b/vms/platformvm/txs/add_subnet_validator_test.go index 8e07469ea2a0..8dc8d76782ac 100644 --- a/vms/platformvm/txs/add_subnet_validator_test.go +++ b/vms/platformvm/txs/add_subnet_validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -22,7 +22,7 @@ import ( func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { require := require.New(t) clk := mockable.Clock{} - ctx := snow.DefaultContextTest() + ctx := snowtest.Context(t, snowtest.PChainID) signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( @@ -140,7 +140,7 @@ func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { func TestAddSubnetValidatorMarshal(t *testing.T) { require := require.New(t) clk := mockable.Clock{} - ctx := snow.DefaultContextTest() + ctx := snowtest.Context(t, snowtest.PChainID) signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( @@ -201,7 +201,7 @@ func TestAddSubnetValidatorMarshal(t *testing.T) { require.NoError(err) require.NoError(stx.SyntacticVerify(ctx)) - txBytes, err := Codec.Marshal(Version, stx) + txBytes, err := Codec.Marshal(CodecVersion, stx) require.NoError(err) parsedTx, err := Parse(Codec, txBytes) diff --git a/vms/platformvm/txs/add_subnet_validator_tx.go b/vms/platformvm/txs/add_subnet_validator_tx.go index 0ac3474e1bd6..b6ce0d0fe4da 100644 --- a/vms/platformvm/txs/add_subnet_validator_tx.go +++ b/vms/platformvm/txs/add_subnet_validator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -14,7 +14,8 @@ import ( ) var ( - _ StakerTx = (*AddSubnetValidatorTx)(nil) + _ StakerTx = (*AddSubnetValidatorTx)(nil) + _ ScheduledStaker = (*AddSubnetValidatorTx)(nil) errAddPrimaryNetworkValidator = errors.New("can't add primary network validator with AddSubnetValidatorTx") ) diff --git a/vms/platformvm/txs/add_validator_test.go b/vms/platformvm/txs/add_validator_test.go index 1076b2da69b2..daf32f66746d 100644 --- a/vms/platformvm/txs/add_validator_test.go +++ b/vms/platformvm/txs/add_validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -22,8 +22,7 @@ import ( func TestAddValidatorTxSyntacticVerify(t *testing.T) { require := require.New(t) clk := mockable.Clock{} - ctx := snow.DefaultContextTest() - ctx.AVAXAssetID = ids.GenerateTestID() + ctx := snowtest.Context(t, snowtest.PChainID) signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( @@ -146,8 +145,7 @@ func TestAddValidatorTxSyntacticVerify(t *testing.T) { func TestAddValidatorTxSyntacticVerifyNotAVAX(t *testing.T) { require := require.New(t) clk := mockable.Clock{} - ctx := snow.DefaultContextTest() - ctx.AVAXAssetID = ids.GenerateTestID() + ctx := snowtest.Context(t, snowtest.PChainID) signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( diff --git a/vms/platformvm/txs/add_validator_tx.go b/vms/platformvm/txs/add_validator_tx.go index be6a93c2e42b..b6ab65b56e8f 100644 --- a/vms/platformvm/txs/add_validator_tx.go +++ b/vms/platformvm/txs/add_validator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -19,7 +19,8 @@ import ( ) var ( - _ ValidatorTx = (*AddValidatorTx)(nil) + _ ValidatorTx = (*AddValidatorTx)(nil) + _ ScheduledStaker = (*AddValidatorTx)(nil) errTooManyShares = fmt.Errorf("a staker can only require at most %d shares from delegators", reward.PercentDenominator) ) diff --git a/vms/platformvm/txs/advance_time_tx.go b/vms/platformvm/txs/advance_time_tx.go index fc889da9ef0b..80b277fcb7e5 100644 --- a/vms/platformvm/txs/advance_time_tx.go +++ b/vms/platformvm/txs/advance_time_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/platformvm/txs/base_tx.go b/vms/platformvm/txs/base_tx.go index 5ffb308fe425..8a0be1edd76c 100644 --- a/vms/platformvm/txs/base_tx.go +++ b/vms/platformvm/txs/base_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/platformvm/txs/base_tx_test.go b/vms/platformvm/txs/base_tx_test.go index f06ae663dd0c..e8d4da2d7eb7 100644 --- a/vms/platformvm/txs/base_tx_test.go +++ b/vms/platformvm/txs/base_tx_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -128,7 +128,7 @@ func TestBaseTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, } var unsignedSimpleBaseTx UnsignedTx = simpleBaseTx - unsignedSimpleBaseTxBytes, err := Codec.Marshal(Version, &unsignedSimpleBaseTx) + unsignedSimpleBaseTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleBaseTx) require.NoError(err) require.Equal(expectedUnsignedSimpleBaseTxBytes, unsignedSimpleBaseTxBytes) @@ -368,7 +368,7 @@ func TestBaseTxSerialization(t *testing.T) { 0x01, 0x23, 0x45, 0x21, } var unsignedComplexBaseTx UnsignedTx = complexBaseTx - unsignedComplexBaseTxBytes, err := Codec.Marshal(Version, &unsignedComplexBaseTx) + unsignedComplexBaseTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexBaseTx) require.NoError(err) require.Equal(expectedUnsignedComplexBaseTxBytes, unsignedComplexBaseTxBytes) diff --git a/vms/platformvm/txs/builder/builder.go b/vms/platformvm/txs/builder/builder.go index 6c796d085abb..665342dab5b7 100644 --- a/vms/platformvm/txs/builder/builder.go +++ b/vms/platformvm/txs/builder/builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder @@ -6,7 +6,6 @@ package builder import ( "errors" "fmt" - "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -183,10 +182,6 @@ type ProposalTxBuilder interface { changeAddr ids.ShortID, ) (*txs.Tx, error) - // newAdvanceTimeTx creates a new tx that, if it is accepted and followed by a - // Commit block, will set the chain's timestamp to [timestamp]. - NewAdvanceTimeTx(timestamp time.Time) (*txs.Tx, error) - // RewardStakerTx creates a new transaction that proposes to remove the staker // [validatorID] from the default validator set. NewRewardValidatorTx(txID ids.ID) (*txs.Tx, error) @@ -615,15 +610,6 @@ func (b *builder) NewRemoveSubnetValidatorTx( return tx, tx.SyntacticVerify(b.ctx) } -func (b *builder) NewAdvanceTimeTx(timestamp time.Time) (*txs.Tx, error) { - utx := &txs.AdvanceTimeTx{Time: uint64(timestamp.Unix())} - tx, err := txs.NewSigned(utx, txs.Codec, nil) - if err != nil { - return nil, err - } - return tx, tx.SyntacticVerify(b.ctx) -} - func (b *builder) NewRewardValidatorTx(txID ids.ID) (*txs.Tx, error) { utx := &txs.RewardValidatorTx{TxID: txID} tx, err := txs.NewSigned(utx, txs.Codec, nil) diff --git a/vms/platformvm/txs/builder/camino_builder_test.go b/vms/platformvm/txs/builder/camino_builder_test.go index 80a904450572..8c8a080bf29d 100644 --- a/vms/platformvm/txs/builder/camino_builder_test.go +++ b/vms/platformvm/txs/builder/camino_builder_test.go @@ -933,7 +933,7 @@ func TestNewRewardsImportTx(t *testing.T) { if utxo.Timestamp == 0 { toMarshal = utxo.UTXO } - utxoBytes, err := txs.Codec.Marshal(txs.Version, toMarshal) + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, toMarshal) require.NoError(t, err) utxosBytes[i] = utxoBytes } diff --git a/vms/platformvm/txs/builder/mock_builder.go b/vms/platformvm/txs/builder/mock_builder.go deleted file mode 100644 index 19f74a7bed2f..000000000000 --- a/vms/platformvm/txs/builder/mock_builder.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/txs/builder (interfaces: Builder) - -// Package builder is a generated GoMock package. -package builder - -import ( - reflect "reflect" - time "time" - - ids "github.com/ava-labs/avalanchego/ids" - secp256k1 "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" - secp256k1fx "github.com/ava-labs/avalanchego/vms/secp256k1fx" - gomock "go.uber.org/mock/gomock" -) - -// MockBuilder is a mock of Builder interface. -type MockBuilder struct { - ctrl *gomock.Controller - recorder *MockBuilderMockRecorder -} - -// MockBuilderMockRecorder is the mock recorder for MockBuilder. -type MockBuilderMockRecorder struct { - mock *MockBuilder -} - -// NewMockBuilder creates a new mock instance. -func NewMockBuilder(ctrl *gomock.Controller) *MockBuilder { - mock := &MockBuilder{ctrl: ctrl} - mock.recorder = &MockBuilderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBuilder) EXPECT() *MockBuilderMockRecorder { - return m.recorder -} - -// NewAddDelegatorTx mocks base method. -func (m *MockBuilder) NewAddDelegatorTx(arg0, arg1, arg2 uint64, arg3 ids.NodeID, arg4 ids.ShortID, arg5 []*secp256k1.PrivateKey, arg6 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewAddDelegatorTx", arg0, arg1, arg2, arg3, arg4, arg5, arg6) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewAddDelegatorTx indicates an expected call of NewAddDelegatorTx. -func (mr *MockBuilderMockRecorder) NewAddDelegatorTx(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewAddDelegatorTx", reflect.TypeOf((*MockBuilder)(nil).NewAddDelegatorTx), arg0, arg1, arg2, arg3, arg4, arg5, arg6) -} - -// NewAddSubnetValidatorTx mocks base method. -func (m *MockBuilder) NewAddSubnetValidatorTx(arg0, arg1, arg2 uint64, arg3 ids.NodeID, arg4 ids.ID, arg5 []*secp256k1.PrivateKey, arg6 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewAddSubnetValidatorTx", arg0, arg1, arg2, arg3, arg4, arg5, arg6) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewAddSubnetValidatorTx indicates an expected call of NewAddSubnetValidatorTx. -func (mr *MockBuilderMockRecorder) NewAddSubnetValidatorTx(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewAddSubnetValidatorTx", reflect.TypeOf((*MockBuilder)(nil).NewAddSubnetValidatorTx), arg0, arg1, arg2, arg3, arg4, arg5, arg6) -} - -// NewAddValidatorTx mocks base method. -func (m *MockBuilder) NewAddValidatorTx(arg0, arg1, arg2 uint64, arg3 ids.NodeID, arg4 ids.ShortID, arg5 uint32, arg6 []*secp256k1.PrivateKey, arg7 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewAddValidatorTx", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewAddValidatorTx indicates an expected call of NewAddValidatorTx. -func (mr *MockBuilderMockRecorder) NewAddValidatorTx(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewAddValidatorTx", reflect.TypeOf((*MockBuilder)(nil).NewAddValidatorTx), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) -} - -// NewAdvanceTimeTx mocks base method. -func (m *MockBuilder) NewAdvanceTimeTx(arg0 time.Time) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewAdvanceTimeTx", arg0) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewAdvanceTimeTx indicates an expected call of NewAdvanceTimeTx. -func (mr *MockBuilderMockRecorder) NewAdvanceTimeTx(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewAdvanceTimeTx", reflect.TypeOf((*MockBuilder)(nil).NewAdvanceTimeTx), arg0) -} - -// NewBaseTx mocks base method. -func (m *MockBuilder) NewBaseTx(arg0 uint64, arg1 secp256k1fx.OutputOwners, arg2 []*secp256k1.PrivateKey, arg3 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewBaseTx", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewBaseTx indicates an expected call of NewBaseTx. -func (mr *MockBuilderMockRecorder) NewBaseTx(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBaseTx", reflect.TypeOf((*MockBuilder)(nil).NewBaseTx), arg0, arg1, arg2, arg3) -} - -// NewCreateChainTx mocks base method. -func (m *MockBuilder) NewCreateChainTx(arg0 ids.ID, arg1 []byte, arg2 ids.ID, arg3 []ids.ID, arg4 string, arg5 []*secp256k1.PrivateKey, arg6 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewCreateChainTx", arg0, arg1, arg2, arg3, arg4, arg5, arg6) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewCreateChainTx indicates an expected call of NewCreateChainTx. -func (mr *MockBuilderMockRecorder) NewCreateChainTx(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewCreateChainTx", reflect.TypeOf((*MockBuilder)(nil).NewCreateChainTx), arg0, arg1, arg2, arg3, arg4, arg5, arg6) -} - -// NewCreateSubnetTx mocks base method. -func (m *MockBuilder) NewCreateSubnetTx(arg0 uint32, arg1 []ids.ShortID, arg2 []*secp256k1.PrivateKey, arg3 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewCreateSubnetTx", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewCreateSubnetTx indicates an expected call of NewCreateSubnetTx. -func (mr *MockBuilderMockRecorder) NewCreateSubnetTx(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewCreateSubnetTx", reflect.TypeOf((*MockBuilder)(nil).NewCreateSubnetTx), arg0, arg1, arg2, arg3) -} - -// NewExportTx mocks base method. -func (m *MockBuilder) NewExportTx(arg0 uint64, arg1 ids.ID, arg2 ids.ShortID, arg3 []*secp256k1.PrivateKey, arg4 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewExportTx", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewExportTx indicates an expected call of NewExportTx. -func (mr *MockBuilderMockRecorder) NewExportTx(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewExportTx", reflect.TypeOf((*MockBuilder)(nil).NewExportTx), arg0, arg1, arg2, arg3, arg4) -} - -// NewImportTx mocks base method. -func (m *MockBuilder) NewImportTx(arg0 ids.ID, arg1 ids.ShortID, arg2 []*secp256k1.PrivateKey, arg3 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewImportTx", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewImportTx indicates an expected call of NewImportTx. -func (mr *MockBuilderMockRecorder) NewImportTx(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewImportTx", reflect.TypeOf((*MockBuilder)(nil).NewImportTx), arg0, arg1, arg2, arg3) -} - -// NewRemoveSubnetValidatorTx mocks base method. -func (m *MockBuilder) NewRemoveSubnetValidatorTx(arg0 ids.NodeID, arg1 ids.ID, arg2 []*secp256k1.PrivateKey, arg3 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewRemoveSubnetValidatorTx", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewRemoveSubnetValidatorTx indicates an expected call of NewRemoveSubnetValidatorTx. -func (mr *MockBuilderMockRecorder) NewRemoveSubnetValidatorTx(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewRemoveSubnetValidatorTx", reflect.TypeOf((*MockBuilder)(nil).NewRemoveSubnetValidatorTx), arg0, arg1, arg2, arg3) -} - -// NewRewardValidatorTx mocks base method. -func (m *MockBuilder) NewRewardValidatorTx(arg0 ids.ID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewRewardValidatorTx", arg0) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewRewardValidatorTx indicates an expected call of NewRewardValidatorTx. -func (mr *MockBuilderMockRecorder) NewRewardValidatorTx(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewRewardValidatorTx", reflect.TypeOf((*MockBuilder)(nil).NewRewardValidatorTx), arg0) -} - -// NewTransferSubnetOwnershipTx mocks base method. -func (m *MockBuilder) NewTransferSubnetOwnershipTx(arg0 ids.ID, arg1 uint32, arg2 []ids.ShortID, arg3 []*secp256k1.PrivateKey, arg4 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewTransferSubnetOwnershipTx", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewTransferSubnetOwnershipTx indicates an expected call of NewTransferSubnetOwnershipTx. -func (mr *MockBuilderMockRecorder) NewTransferSubnetOwnershipTx(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewTransferSubnetOwnershipTx", reflect.TypeOf((*MockBuilder)(nil).NewTransferSubnetOwnershipTx), arg0, arg1, arg2, arg3, arg4) -} diff --git a/vms/platformvm/txs/camino_add_deposit_offer_tx_test.go b/vms/platformvm/txs/camino_add_deposit_offer_tx_test.go index 53bb5b74d7c6..cf0d27d04704 100644 --- a/vms/platformvm/txs/camino_add_deposit_offer_tx_test.go +++ b/vms/platformvm/txs/camino_add_deposit_offer_tx_test.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/deposit" "github.com/ava-labs/avalanchego/vms/platformvm/locked" @@ -19,7 +20,7 @@ import ( ) func TestAddDepositOfferTxSyntacticVerify(t *testing.T) { - ctx := defaultContext() + ctx := snowtest.Context(t, snowtest.PChainID) owner1 := secp256k1fx.OutputOwners{Threshold: 1, Addrs: []ids.ShortID{{0, 0, 1}}} depositTxID := ids.ID{0, 1} creatorAddress := ids.ShortID{1} diff --git a/vms/platformvm/txs/camino_add_proposal_tx_test.go b/vms/platformvm/txs/camino_add_proposal_tx_test.go index b2258b0323c4..aac1b24b6b3c 100644 --- a/vms/platformvm/txs/camino_add_proposal_tx_test.go +++ b/vms/platformvm/txs/camino_add_proposal_tx_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/dac" "github.com/ava-labs/avalanchego/vms/platformvm/locked" @@ -18,18 +19,18 @@ import ( ) func TestAddProposalTxSyntacticVerify(t *testing.T) { - ctx := defaultContext() + ctx := snowtest.Context(t, snowtest.PChainID) owner1 := secp256k1fx.OutputOwners{Threshold: 1, Addrs: []ids.ShortID{{0, 0, 1}}} badProposal := &ProposalWrapper{Proposal: &dac.BaseFeeProposal{Options: []uint64{}}} - badProposalBytes, err := Codec.Marshal(Version, badProposal) + badProposalBytes, err := Codec.Marshal(CodecVersion, badProposal) require.NoError(t, err) proposal := &ProposalWrapper{Proposal: &dac.BaseFeeProposal{ End: 1, Options: []uint64{1}, }} - proposalBytes, err := Codec.Marshal(Version, proposal) + proposalBytes, err := Codec.Marshal(CodecVersion, proposal) require.NoError(t, err) baseTx := BaseTx{BaseTx: avax.BaseTx{ @@ -129,7 +130,7 @@ func TestAddProposalTxProposal(t *testing.T) { Start: 11, End: 12, Options: []uint64{555, 123, 7}, }} - proposalBytes, err := Codec.Marshal(Version, expectedProposal) + proposalBytes, err := Codec.Marshal(CodecVersion, expectedProposal) require.NoError(t, err) tx := &AddProposalTx{ diff --git a/vms/platformvm/txs/camino_add_vote_tx_test.go b/vms/platformvm/txs/camino_add_vote_tx_test.go index 01f07751c5c6..ff56b0e74940 100644 --- a/vms/platformvm/txs/camino_add_vote_tx_test.go +++ b/vms/platformvm/txs/camino_add_vote_tx_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/dac" "github.com/ava-labs/avalanchego/vms/platformvm/locked" @@ -17,15 +18,15 @@ import ( ) func TestAddVoteTxSyntacticVerify(t *testing.T) { - ctx := defaultContext() + ctx := snowtest.Context(t, snowtest.PChainID) owner1 := secp256k1fx.OutputOwners{Threshold: 1, Addrs: []ids.ShortID{{0, 0, 1}}} badVote := &VoteWrapper{Vote: &dac.DummyVote{ErrorStr: "test errr"}} - badVoteBytes, err := Codec.Marshal(Version, badVote) + badVoteBytes, err := Codec.Marshal(CodecVersion, badVote) require.NoError(t, err) vote := &VoteWrapper{Vote: &dac.DummyVote{}} - voteBytes, err := Codec.Marshal(Version, vote) + voteBytes, err := Codec.Marshal(CodecVersion, vote) require.NoError(t, err) baseTx := BaseTx{BaseTx: avax.BaseTx{ @@ -136,7 +137,7 @@ func TestAddVoteTxSyntacticVerify(t *testing.T) { func TestAddVoteTxVote(t *testing.T) { expectedVote := &VoteWrapper{Vote: &dac.DummyVote{ErrorStr: "some data"}} - voteBytes, err := Codec.Marshal(Version, expectedVote) + voteBytes, err := Codec.Marshal(CodecVersion, expectedVote) require.NoError(t, err) tx := &AddVoteTx{VotePayload: voteBytes} diff --git a/vms/platformvm/txs/camino_claim_tx_test.go b/vms/platformvm/txs/camino_claim_tx_test.go index 648fe948e260..2d81ed894a1d 100644 --- a/vms/platformvm/txs/camino_claim_tx_test.go +++ b/vms/platformvm/txs/camino_claim_tx_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/locked" "github.com/ava-labs/avalanchego/vms/platformvm/test/generate" @@ -16,7 +17,7 @@ import ( ) func TestClaimTxSyntacticVerify(t *testing.T) { - ctx := defaultContext() + ctx := snowtest.Context(t, snowtest.PChainID) owner1 := secp256k1fx.OutputOwners{Threshold: 1, Addrs: []ids.ShortID{{0, 0, 1}}} depositTxID := ids.ID{0, 1} claimableOwnerID1 := ids.ID{0, 2} diff --git a/vms/platformvm/txs/camino_deposit_tx_test.go b/vms/platformvm/txs/camino_deposit_tx_test.go index 5b4e0657b435..4d032c6ba7c8 100644 --- a/vms/platformvm/txs/camino_deposit_tx_test.go +++ b/vms/platformvm/txs/camino_deposit_tx_test.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/locked" "github.com/ava-labs/avalanchego/vms/platformvm/test/generate" @@ -18,7 +19,7 @@ import ( ) func TestDepositTxSyntacticVerify(t *testing.T) { - ctx := defaultContext() + ctx := snowtest.Context(t, snowtest.PChainID) owner1 := secp256k1fx.OutputOwners{Threshold: 1, Addrs: []ids.ShortID{{1}}} tests := map[string]struct { diff --git a/vms/platformvm/txs/camino_finish_proposals_tx_test.go b/vms/platformvm/txs/camino_finish_proposals_tx_test.go index 024fcb9ed102..2c8cda0056bb 100644 --- a/vms/platformvm/txs/camino_finish_proposals_tx_test.go +++ b/vms/platformvm/txs/camino_finish_proposals_tx_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/locked" "github.com/ava-labs/avalanchego/vms/platformvm/test/generate" @@ -16,7 +17,7 @@ import ( ) func TestFinishProposalsTxSyntacticVerify(t *testing.T) { - ctx := defaultContext() + ctx := snowtest.Context(t, snowtest.PChainID) owner1 := secp256k1fx.OutputOwners{Threshold: 1, Addrs: []ids.ShortID{{0, 0, 1}}} proposalID1 := ids.ID{1} diff --git a/vms/platformvm/txs/camino_multisig_alias_tx_test.go b/vms/platformvm/txs/camino_multisig_alias_tx_test.go index 3721f9f8c068..f5bfce4ba160 100644 --- a/vms/platformvm/txs/camino_multisig_alias_tx_test.go +++ b/vms/platformvm/txs/camino_multisig_alias_tx_test.go @@ -11,13 +11,14 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/multisig" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestMultisigAliasTxSyntacticVerify(t *testing.T) { - ctx := defaultContext() + ctx := snowtest.Context(t, snowtest.PChainID) memo := []byte("memo") bigMemo := make([]byte, 257) diff --git a/vms/platformvm/txs/camino_owner_id.go b/vms/platformvm/txs/camino_owner_id.go index 676b0f7226c4..04906b8a3a10 100644 --- a/vms/platformvm/txs/camino_owner_id.go +++ b/vms/platformvm/txs/camino_owner_id.go @@ -16,7 +16,7 @@ var errOutNotOwned = errors.New("out doesn't implement fx.Owned interface") // Returns hash of marshalled bytes of owner, which can be treated as owner ID. func GetOwnerID(owner interface{}) (ids.ID, error) { - ownerBytes, err := Codec.Marshal(Version, owner) + ownerBytes, err := Codec.Marshal(CodecVersion, owner) if err != nil { return ids.Empty, fmt.Errorf("couldn't marshal owner: %w", err) } diff --git a/vms/platformvm/txs/camino_register_node_tx_test.go b/vms/platformvm/txs/camino_register_node_tx_test.go index 073c9cfdbea6..9082904bfdfa 100644 --- a/vms/platformvm/txs/camino_register_node_tx_test.go +++ b/vms/platformvm/txs/camino_register_node_tx_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/locked" "github.com/ava-labs/avalanchego/vms/platformvm/test/generate" @@ -16,7 +17,7 @@ import ( ) func TestRegisterNodeTxSyntacticVerify(t *testing.T) { - ctx := defaultContext() + ctx := snowtest.Context(t, snowtest.PChainID) owner1 := secp256k1fx.OutputOwners{Threshold: 1, Addrs: []ids.ShortID{{0, 1}}} depositTxID := ids.ID{1} diff --git a/vms/platformvm/txs/camino_rewards_import_tx_test.go b/vms/platformvm/txs/camino_rewards_import_tx_test.go index 5a80e5bf5ebe..bfba38f23af2 100644 --- a/vms/platformvm/txs/camino_rewards_import_tx_test.go +++ b/vms/platformvm/txs/camino_rewards_import_tx_test.go @@ -9,13 +9,14 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/locked" "github.com/ava-labs/avalanchego/vms/platformvm/test/generate" ) func TestRewardsImportTxSyntacticVerify(t *testing.T) { - ctx := defaultContext() + ctx := snowtest.Context(t, snowtest.PChainID) tests := map[string]struct { tx *RewardsImportTx diff --git a/vms/platformvm/txs/camino_unlock_deposit_tx_test.go b/vms/platformvm/txs/camino_unlock_deposit_tx_test.go index 9e7ad8da5be0..52fb3ba51e3b 100644 --- a/vms/platformvm/txs/camino_unlock_deposit_tx_test.go +++ b/vms/platformvm/txs/camino_unlock_deposit_tx_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/locked" "github.com/ava-labs/avalanchego/vms/platformvm/test/generate" @@ -16,7 +17,7 @@ import ( ) func TestUnlockDepositTxSyntacticVerify(t *testing.T) { - ctx := defaultContext() + ctx := snowtest.Context(t, snowtest.PChainID) tests := map[string]struct { tx *UnlockDepositTx diff --git a/vms/platformvm/txs/codec.go b/vms/platformvm/txs/codec.go index 8ab7da693a22..59ac2d252cc3 100644 --- a/vms/platformvm/txs/codec.go +++ b/vms/platformvm/txs/codec.go @@ -8,13 +8,14 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -27,8 +28,7 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -// Version is the current default codec version -const Version = 0 +const CodecVersion = 0 var ( Codec codec.Manager @@ -40,11 +40,13 @@ var ( GenesisCodec codec.Manager ) -func init() { - c := linearcodec.NewCaminoDefault() - Codec = codec.NewDefaultManager() - gc := linearcodec.NewCaminoCustomMaxLength(math.MaxInt32) - GenesisCodec = codec.NewManager(math.MaxInt32) +// TODO: Remove after v1.11.x has activated +// +// Invariant: InitCodec, Codec, and GenesisCodec must not be accessed +// concurrently +func InitCodec(durangoTime time.Time) error { + c := linearcodec.NewCaminoDefault(durangoTime) + gc := linearcodec.NewCaminoCustomMaxLength(time.Time{}, math.MaxInt32) errs := wrappers.Errs{} for _, c := range []linearcodec.CaminoCodec{c, gc} { @@ -59,12 +61,25 @@ func init() { errs.Add(RegisterDUnsignedTxsTypes(c)) } + + newCodec := codec.NewDefaultManager() + newGenesisCodec := codec.NewManager(math.MaxInt32) errs.Add( - Codec.RegisterCodec(Version, c), - GenesisCodec.RegisterCodec(Version, gc), + newCodec.RegisterCodec(CodecVersion, c), + newGenesisCodec.RegisterCodec(CodecVersion, gc), ) if errs.Errored() { - panic(errs.Err) + return errs.Err + } + + Codec = newCodec + GenesisCodec = newGenesisCodec + return nil +} + +func init() { + if err := InitCodec(time.Time{}); err != nil { + panic(err) } } diff --git a/vms/platformvm/txs/create_chain_test.go b/vms/platformvm/txs/create_chain_test.go index 7154072bd8a9..787aaa2a7ccb 100644 --- a/vms/platformvm/txs/create_chain_test.go +++ b/vms/platformvm/txs/create_chain_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -17,7 +17,7 @@ import ( ) func TestUnsignedCreateChainTxVerify(t *testing.T) { - ctx := snow.DefaultContextTest() + ctx := snowtest.Context(t, snowtest.PChainID) testSubnet1ID := ids.GenerateTestID() testSubnet1ControlKeys := []*secp256k1.PrivateKey{ preFundedKeys[0], diff --git a/vms/platformvm/txs/create_chain_tx.go b/vms/platformvm/txs/create_chain_tx.go index 37166e91ff98..84a9c72f43b3 100644 --- a/vms/platformvm/txs/create_chain_tx.go +++ b/vms/platformvm/txs/create_chain_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/platformvm/txs/create_subnet_tx.go b/vms/platformvm/txs/create_subnet_tx.go index 02f41faefe4c..e560c9dd5f94 100644 --- a/vms/platformvm/txs/create_subnet_tx.go +++ b/vms/platformvm/txs/create_subnet_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/platformvm/txs/executor/advance_time_test.go b/vms/platformvm/txs/executor/advance_time_test.go index 9bf5aafed7ac..b7b636ad6dbe 100644 --- a/vms/platformvm/txs/executor/advance_time_test.go +++ b/vms/platformvm/txs/executor/advance_time_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/platformvm/reward" @@ -20,26 +21,39 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) +func newAdvanceTimeTx(t testing.TB, timestamp time.Time) (*txs.Tx, error) { + utx := &txs.AdvanceTimeTx{Time: uint64(timestamp.Unix())} + tx, err := txs.NewSigned(utx, txs.Codec, nil) + if err != nil { + return nil, err + } + return tx, tx.SyntacticVerify(snowtest.Context(t, snowtest.PChainID)) +} + // Ensure semantic verification updates the current and pending staker set // for the primary network func TestAdvanceTimeTxUpdatePrimaryNetworkStakers(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() dummyHeight := uint64(1) // Case: Timestamp is after next validator start time // Add a pending validator - pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) + pendingValidatorStartTime := defaultValidateStartTime.Add(1 * time.Second) pendingValidatorEndTime := pendingValidatorStartTime.Add(defaultMinStakingDuration) nodeID := ids.GenerateTestNodeID() - addPendingValidatorTx, err := addPendingValidator(env, pendingValidatorStartTime, pendingValidatorEndTime, nodeID, []*secp256k1.PrivateKey{preFundedKeys[0]}) + addPendingValidatorTx, err := addPendingValidator( + env, + pendingValidatorStartTime, + pendingValidatorEndTime, + nodeID, + []*secp256k1.PrivateKey{preFundedKeys[0]}, + ) require.NoError(err) - tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime) + tx, err := newAdvanceTimeTx(t, pendingValidatorStartTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -83,12 +97,9 @@ func TestAdvanceTimeTxUpdatePrimaryNetworkStakers(t *testing.T) { // Ensure semantic verification fails when proposed timestamp is at or before current timestamp func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) - tx, err := env.txBuilder.NewAdvanceTimeTx(defaultGenesisTime) + tx, err := newAdvanceTimeTx(t, env.state.GetTimestamp()) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -110,19 +121,20 @@ func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) { // Ensure semantic verification fails when proposed timestamp is after next validator set change time func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() // Case: Timestamp is after next validator start time // Add a pending validator - pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) + pendingValidatorStartTime := defaultValidateStartTime.Add(1 * time.Second) pendingValidatorEndTime := pendingValidatorStartTime.Add(defaultMinStakingDuration) nodeID := ids.GenerateTestNodeID() _, err := addPendingValidator(env, pendingValidatorStartTime, pendingValidatorEndTime, nodeID, []*secp256k1.PrivateKey{preFundedKeys[0]}) require.NoError(err) { - tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime.Add(1 * time.Second)) + tx, err := newAdvanceTimeTx(t, pendingValidatorStartTime.Add(1*time.Second)) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -141,21 +153,17 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { require.ErrorIs(err, ErrChildBlockAfterStakerChangeTime) } - require.NoError(shutdownEnvironment(env)) - // Case: Timestamp is after next validator end time - env = newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env = newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() // fast forward clock to 10 seconds before genesis validators stop validating env.clk.Set(defaultValidateEndTime.Add(-10 * time.Second)) { // Proposes advancing timestamp to 1 second after genesis validators stop validating - tx, err := env.txBuilder.NewAdvanceTimeTx(defaultValidateEndTime.Add(1 * time.Second)) + tx, err := newAdvanceTimeTx(t, defaultValidateEndTime.Add(1*time.Second)) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -207,8 +215,8 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { // Staker5: |--------------------| staker1 := staker{ nodeID: ids.GenerateTestNodeID(), - startTime: defaultGenesisTime.Add(1 * time.Minute), - endTime: defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute), + startTime: defaultValidateStartTime.Add(1 * time.Minute), + endTime: defaultValidateStartTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute), } staker2 := staker{ nodeID: ids.GenerateTestNodeID(), @@ -346,11 +354,9 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() dummyHeight := uint64(1) @@ -394,7 +400,7 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { for _, newTime := range test.advanceTimeTo { env.clk.Set(newTime) - tx, err := env.txBuilder.NewAdvanceTimeTx(newTime) + tx, err := newAdvanceTimeTx(t, newTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -451,19 +457,16 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { // is after the new timestamp func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() subnetID := testSubnet1.ID() env.config.TrackedSubnets.Add(subnetID) dummyHeight := uint64(1) // Add a subnet validator to the staker set - subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) - // Starts after the corre + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultValidateStartTime subnetVdr1EndTime := defaultValidateStartTime.Add(defaultMinStakingDuration) tx, err := env.txBuilder.NewAddSubnetValidatorTx( @@ -477,9 +480,11 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { ) require.NoError(err) + addSubnetValTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddSubnetValidatorTx), + addSubnetValTx, + addSubnetValTx.StartTime(), 0, ) require.NoError(err) @@ -492,7 +497,7 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { // The above validator is now part of the staking set // Queue a staker that joins the staker set after the above validator leaves - subnetVdr2NodeID := ids.NodeID(preFundedKeys[1].PublicKey().Address()) + subnetVdr2NodeID := genesisNodeIDs[1] tx, err = env.txBuilder.NewAddSubnetValidatorTx( 1, // Weight uint64(subnetVdr1EndTime.Add(time.Second).Unix()), // Start time @@ -519,7 +524,7 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { // Advance time to the first staker's end time. env.clk.Set(subnetVdr1EndTime) - tx, err = env.txBuilder.NewAdvanceTimeTx(subnetVdr1EndTime) + tx, err = newAdvanceTimeTx(t, subnetVdr1EndTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -554,11 +559,9 @@ func TestTrackedSubnet(t *testing.T) { for _, tracked := range []bool{true, false} { t.Run(fmt.Sprintf("tracked %t", tracked), func(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() dummyHeight := uint64(1) subnetID := testSubnet1.ID() @@ -567,15 +570,15 @@ func TestTrackedSubnet(t *testing.T) { } // Add a subnet validator to the staker set - subnetValidatorNodeID := preFundedKeys[0].PublicKey().Address() + subnetValidatorNodeID := genesisNodeIDs[0] - subnetVdr1StartTime := defaultGenesisTime.Add(1 * time.Minute) - subnetVdr1EndTime := defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) + subnetVdr1StartTime := defaultValidateStartTime.Add(1 * time.Minute) + subnetVdr1EndTime := defaultValidateStartTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) tx, err := env.txBuilder.NewAddSubnetValidatorTx( 1, // Weight uint64(subnetVdr1StartTime.Unix()), // Start time uint64(subnetVdr1EndTime.Unix()), // end time - ids.NodeID(subnetValidatorNodeID), // Node ID + subnetValidatorNodeID, // Node ID subnetID, // Subnet ID []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, @@ -595,7 +598,7 @@ func TestTrackedSubnet(t *testing.T) { // Advance time to the staker's start time. env.clk.Set(subnetVdr1StartTime) - tx, err = env.txBuilder.NewAdvanceTimeTx(subnetVdr1StartTime) + tx, err = newAdvanceTimeTx(t, subnetVdr1StartTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -616,24 +619,22 @@ func TestTrackedSubnet(t *testing.T) { env.state.SetHeight(dummyHeight) require.NoError(env.state.Commit()) - _, ok := env.config.Validators.GetValidator(subnetID, ids.NodeID(subnetValidatorNodeID)) - require.Equal(tracked, ok) + _, ok := env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) + require.True(ok) }) } } func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() dummyHeight := uint64(1) // Case: Timestamp is after next validator start time // Add a pending validator - pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) + pendingValidatorStartTime := defaultValidateStartTime.Add(1 * time.Second) pendingValidatorEndTime := pendingValidatorStartTime.Add(defaultMaxStakingDuration) nodeID := ids.GenerateTestNodeID() _, err := addPendingValidator( @@ -645,7 +646,7 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { ) require.NoError(err) - tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime) + tx, err := newAdvanceTimeTx(t, pendingValidatorStartTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -702,7 +703,7 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { require.NoError(env.state.Commit()) // Advance Time - tx, err = env.txBuilder.NewAdvanceTimeTx(pendingDelegatorStartTime) + tx, err = newAdvanceTimeTx(t, pendingDelegatorStartTime) require.NoError(err) onCommitState, err = state.NewDiff(lastAcceptedID, env) @@ -731,22 +732,20 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() dummyHeight := uint64(1) // Case: Timestamp is after next validator start time // Add a pending validator - pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) + pendingValidatorStartTime := defaultValidateStartTime.Add(1 * time.Second) pendingValidatorEndTime := pendingValidatorStartTime.Add(defaultMinStakingDuration) nodeID := ids.GenerateTestNodeID() _, err := addPendingValidator(env, pendingValidatorStartTime, pendingValidatorEndTime, nodeID, []*secp256k1.PrivateKey{preFundedKeys[0]}) require.NoError(err) - tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime) + tx, err := newAdvanceTimeTx(t, pendingValidatorStartTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -798,7 +797,7 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { require.NoError(env.state.Commit()) // Advance Time - tx, err = env.txBuilder.NewAdvanceTimeTx(pendingDelegatorStartTime) + tx, err = newAdvanceTimeTx(t, pendingDelegatorStartTime) require.NoError(err) onCommitState, err = state.NewDiff(lastAcceptedID, env) @@ -825,49 +824,19 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { require.Equal(env.config.MinDelegatorStake+env.config.MinValidatorStake, vdrWeight) } -// Test method InitiallyPrefersCommit -func TestAdvanceTimeTxInitiallyPrefersCommit(t *testing.T) { - require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) - env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - env.clk.Set(defaultGenesisTime) // VM's clock reads the genesis time - - // Proposed advancing timestamp to 1 second after sync bound - tx, err := env.txBuilder.NewAdvanceTimeTx(defaultGenesisTime.Add(SyncBound)) - require.NoError(err) - - onCommitState, err := state.NewDiff(lastAcceptedID, env) - require.NoError(err) - - onAbortState, err := state.NewDiff(lastAcceptedID, env) - require.NoError(err) - - executor := ProposalTxExecutor{ - OnCommitState: onCommitState, - OnAbortState: onAbortState, - Backend: &env.backend, - Tx: tx, - } - require.NoError(tx.Unsigned.Visit(&executor)) - - require.True(executor.PrefersCommit, "should prefer to commit this tx because its proposed timestamp it's within sync bound") -} - func TestAdvanceTimeTxAfterBanff(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() env.clk.Set(defaultGenesisTime) // VM's clock reads the genesis time - env.config.BanffTime = defaultGenesisTime.Add(SyncBound) + upgradeTime := env.clk.Time().Add(SyncBound) + env.config.BanffTime = upgradeTime + env.config.CortinaTime = upgradeTime + env.config.DurangoTime = upgradeTime // Proposed advancing timestamp to the banff timestamp - tx, err := env.txBuilder.NewAdvanceTimeTx(defaultGenesisTime.Add(SyncBound)) + tx, err := newAdvanceTimeTx(t, upgradeTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -889,16 +858,15 @@ func TestAdvanceTimeTxAfterBanff(t *testing.T) { // Ensure marshaling/unmarshaling works func TestAdvanceTimeTxUnmarshal(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() - tx, err := env.txBuilder.NewAdvanceTimeTx(defaultGenesisTime) + chainTime := env.state.GetTimestamp() + tx, err := newAdvanceTimeTx(t, chainTime.Add(time.Second)) require.NoError(err) - bytes, err := txs.Codec.Marshal(txs.Version, tx) + bytes, err := txs.Codec.Marshal(txs.CodecVersion, tx) require.NoError(err) var unmarshaledTx txs.Tx @@ -923,7 +891,7 @@ func addPendingValidator( uint64(startTime.Unix()), uint64(endTime.Unix()), nodeID, - ids.ShortID(nodeID), + ids.GenerateTestShortID(), reward.PercentDenominator, keys, ids.ShortEmpty, diff --git a/vms/platformvm/txs/executor/atomic_tx_executor.go b/vms/platformvm/txs/executor/atomic_tx_executor.go index 447d7c140cbe..d378a3c94ae0 100644 --- a/vms/platformvm/txs/executor/atomic_tx_executor.go +++ b/vms/platformvm/txs/executor/atomic_tx_executor.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor diff --git a/vms/platformvm/txs/executor/backend.go b/vms/platformvm/txs/executor/backend.go index f043521a56bc..847aefc16499 100644 --- a/vms/platformvm/txs/executor/backend.go +++ b/vms/platformvm/txs/executor/backend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -20,7 +20,7 @@ type Backend struct { Clk *mockable.Clock Fx fx.Fx FlowChecker utxo.Verifier - Uptimes uptime.Manager + Uptimes uptime.Calculator Rewards reward.Calculator Bootstrapped *utils.Atomic[bool] } diff --git a/vms/platformvm/txs/executor/camino_advance_time_test.go b/vms/platformvm/txs/executor/camino_advance_time_test.go index 7cc33c8a71f3..f4b07760619f 100644 --- a/vms/platformvm/txs/executor/camino_advance_time_test.go +++ b/vms/platformvm/txs/executor/camino_advance_time_test.go @@ -249,7 +249,7 @@ func TestDeferredStakers(t *testing.T) { for _, newTime := range tt.advanceTimeTo { env.clk.Set(newTime) - tx, err := env.txBuilder.NewAdvanceTimeTx(newTime) + tx, err := newAdvanceTimeTx(t, newTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) diff --git a/vms/platformvm/txs/executor/camino_state_changes.go b/vms/platformvm/txs/executor/camino_state_changes.go deleted file mode 100644 index 96ae55640799..000000000000 --- a/vms/platformvm/txs/executor/camino_state_changes.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2022-2024, Chain4Travel AG. All rights reserved. -// See the file LICENSE for licensing terms. - -package executor - -import ( - "time" - - "github.com/ava-labs/avalanchego/vms/platformvm/state" -) - -type caminoStateChanges struct{} - -func (*caminoStateChanges) Apply(_ state.Diff) { -} - -func (*caminoStateChanges) Len() int { - return 0 -} - -func caminoAdvanceTimeTo( - _ *Backend, - _ state.Chain, - _ time.Time, - _ *stateChanges, -) error { - return nil -} diff --git a/vms/platformvm/txs/executor/camino_tx_executor.go b/vms/platformvm/txs/executor/camino_tx_executor.go index 086e873ee8ba..c3530e1e8795 100644 --- a/vms/platformvm/txs/executor/camino_tx_executor.go +++ b/vms/platformvm/txs/executor/camino_tx_executor.go @@ -22,7 +22,9 @@ import ( "github.com/ava-labs/avalanchego/vms/components/multisig" "github.com/ava-labs/avalanchego/vms/components/verify" as "github.com/ava-labs/avalanchego/vms/platformvm/addrstate" + "github.com/ava-labs/avalanchego/vms/platformvm/config" dacProposals "github.com/ava-labs/avalanchego/vms/platformvm/dac" + deposits "github.com/ava-labs/avalanchego/vms/platformvm/deposit" "github.com/ava-labs/avalanchego/vms/platformvm/locked" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/treasury" @@ -30,8 +32,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor/dac" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - - deposits "github.com/ava-labs/avalanchego/vms/platformvm/deposit" ) // Max number of items allowed in a page @@ -91,7 +91,6 @@ var ( errNestedMsigAlias = errors.New("nested msig aliases are not allowed") errProposalStartToEarly = errors.New("proposal start time is to early") errProposalToFarInFuture = fmt.Errorf("proposal start time is more than %s ahead of the current chain time", MaxFutureStartTime) - ErrProposalInactive = errors.New("proposal is inactive") errProposerCredentialMismatch = errors.New("proposer credential isn't matching") errWrongProposalBondAmount = errors.New("wrong proposal bond amount") errVoterCredentialMismatch = errors.New("voter credential isn't matching") @@ -205,9 +204,15 @@ func (e *CaminoStandardTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error return fmt.Errorf("%w: %w", errSignatureMissing, err) } + currentTimestamp := e.State.GetTimestamp() + // verify validator - duration := tx.Validator.Duration() + startTime := currentTimestamp + if !e.Backend.Config.IsDurangoActivated(currentTimestamp) { + startTime = tx.StartTime() + } + duration := tx.EndTime().Sub(startTime) switch { case tx.Validator.Wght < e.Backend.Config.MinValidatorStake: @@ -225,7 +230,6 @@ func (e *CaminoStandardTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error } if e.Backend.Bootstrapped.Get() { - currentTimestamp := e.State.GetTimestamp() // Ensure the proposed validator starts after the current time startTime := tx.StartTime() if !currentTimestamp.Before(startTime) { @@ -449,7 +453,7 @@ func (e *CaminoStandardTxExecutor) wrapAtomicElementsForMultisig(tx *txs.ExportT UTXO: utxo, Aliases: aliases, } - bytes, err := txs.Codec.Marshal(txs.Version, wrappedUtxo) + bytes, err := txs.Codec.Marshal(txs.CodecVersion, wrappedUtxo) if err != nil { return err } @@ -757,7 +761,7 @@ func (e *CaminoStandardTxExecutor) DepositTx(tx *txs.DepositTx) error { return err } - baseFee, err := e.State.GetBaseFee() + baseFee, err := getBaseFee(e.State, e.Backend.Config) if err != nil { return err } @@ -888,7 +892,7 @@ func (e *CaminoStandardTxExecutor) UnlockDepositTx(tx *txs.UnlockDepositTx) erro amountToBurn := uint64(0) if !hasExpiredDeposits { - baseFee, err := e.State.GetBaseFee() + baseFee, err := getBaseFee(e.State, e.Backend.Config) if err != nil { return err } @@ -1137,7 +1141,7 @@ func (e *CaminoStandardTxExecutor) ClaimTx(tx *txs.ClaimTx) error { } // BaseTx check (fee, reward outs) - baseFee, err := e.State.GetBaseFee() + baseFee, err := getBaseFee(e.State, e.Backend.Config) if err != nil { return err } @@ -1244,7 +1248,7 @@ func (e *CaminoStandardTxExecutor) RegisterNodeTx(tx *txs.RegisterNodeTx) error } // verify the flowcheck - baseFee, err := e.State.GetBaseFee() + baseFee, err := getBaseFee(e.State, e.Backend.Config) if err != nil { return err } @@ -1502,7 +1506,7 @@ func (e *CaminoStandardTxExecutor) BaseTx(tx *txs.BaseTx) error { return err } - baseFee, err := e.State.GetBaseFee() + baseFee, err := getBaseFee(e.State, e.Backend.Config) if err != nil { return err } @@ -1593,7 +1597,7 @@ func (e *CaminoStandardTxExecutor) MultisigAliasTx(tx *txs.MultisigAliasTx) erro } // verify the flowcheck - baseFee, err := e.State.GetBaseFee() + baseFee, err := getBaseFee(e.State, e.Backend.Config) if err != nil { return err } @@ -1654,7 +1658,7 @@ func (e *CaminoStandardTxExecutor) AddDepositOfferTx(tx *txs.AddDepositOfferTx) } // verify the flowcheck - baseFee, err := e.State.GetBaseFee() + baseFee, err := getBaseFee(e.State, e.Backend.Config) if err != nil { return err } @@ -1812,7 +1816,7 @@ func (e *CaminoStandardTxExecutor) AddProposalTx(tx *txs.AddProposalTx) error { // verify the flowcheck lockState := locked.StateBonded - baseFee, err := e.State.GetBaseFee() + baseFee, err := getBaseFee(e.State, e.Backend.Config) if err != nil { return err } @@ -1913,8 +1917,8 @@ func (e *CaminoStandardTxExecutor) AddVoteTx(tx *txs.AddVoteTx) error { return err } - if !proposal.IsActiveAt(chainTime) { - return ErrProposalInactive // should never happen, cause inactive proposals are removed from state + if err := proposal.VerifyActive(chainTime); err != nil { + return err // could happen, if proposal didn't start yet } // verify voter credential and address state (role) @@ -1947,7 +1951,7 @@ func (e *CaminoStandardTxExecutor) AddVoteTx(tx *txs.AddVoteTx) error { // verify the flowcheck - baseFee, err := e.State.GetBaseFee() + baseFee, err := getBaseFee(e.State, e.Backend.Config) if err != nil { return err } @@ -2274,7 +2278,7 @@ func (e *CaminoStandardTxExecutor) AddressStateTx(tx *txs.AddressStateTx) error } // Verify the flowcheck - baseFee, err := e.State.GetBaseFee() + baseFee, err := getBaseFee(e.State, e.Backend.Config) if err != nil { return err } @@ -2399,3 +2403,28 @@ func outputsAreEqual(outs1, outs2 []*avax.TransferableOutput) bool { return ok && out1.Asset == out2.Asset && outEq1.Equal(out2.Out) }) } + +func getBaseFee(s state.Chain, cfg *config.Config) (uint64, error) { + fee, err := s.GetBaseFee() + switch err { + case database.ErrNotFound: + return cfg.TxFee, nil + case nil: + return fee, nil + } + return 0, err +} + +// TODO @evlekht remove nolint, when this func will be used. +// Currently its not used, cause we didn't implement P->C transport of proposal outcomes +// like new base fee or new fee distribution or at least api that will provide this info. +func getFeeDistribution(s state.Chain, cfg *config.Config) ([dacProposals.FeeDistributionFractionsCount]uint64, error) { //nolint:unused + feeDistribution, err := s.GetFeeDistribution() + switch err { + case database.ErrNotFound: + return cfg.CaminoConfig.FeeDistribution, nil + case nil: + return feeDistribution, nil + } + return [dacProposals.FeeDistributionFractionsCount]uint64{}, err +} diff --git a/vms/platformvm/txs/executor/camino_tx_executor_test.go b/vms/platformvm/txs/executor/camino_tx_executor_test.go index 3e20435adbc2..01e44dbec8f5 100644 --- a/vms/platformvm/txs/executor/camino_tx_executor_test.go +++ b/vms/platformvm/txs/executor/camino_tx_executor_test.go @@ -170,6 +170,7 @@ func TestCaminoStandardTxExecutorAddValidatorTx(t *testing.T) { staker, err := state.NewCurrentStaker( tx.ID(), tx.Unsigned.(*txs.CaminoAddValidatorTx), + tx.Unsigned.(*txs.CaminoAddValidatorTx).StartTime(), 0, ) require.NoError(t, err) @@ -200,6 +201,7 @@ func TestCaminoStandardTxExecutorAddValidatorTx(t *testing.T) { staker, err := state.NewCurrentStaker( tx.ID(), tx.Unsigned.(*txs.CaminoAddValidatorTx), + tx.Unsigned.(*txs.CaminoAddValidatorTx).StartTime(), 0, ) require.NoError(t, err) @@ -351,6 +353,7 @@ func TestCaminoStandardTxExecutorAddSubnetValidatorTx(t *testing.T) { staker, err := state.NewCurrentStaker( addDSTx.ID(), addDSTx.Unsigned.(*txs.CaminoAddValidatorTx), + addDSTx.Unsigned.(*txs.CaminoAddValidatorTx).StartTime(), 0, ) require.NoError(t, err) @@ -374,6 +377,7 @@ func TestCaminoStandardTxExecutorAddSubnetValidatorTx(t *testing.T) { staker, err = state.NewCurrentStaker( subnetTx.ID(), subnetTx.Unsigned.(*txs.AddSubnetValidatorTx), + subnetTx.Unsigned.(*txs.AddSubnetValidatorTx).StartTime(), 0, ) require.NoError(t, err) @@ -1442,7 +1446,7 @@ func TestCaminoRewardValidatorTx(t *testing.T) { generateUTXOsAfterReward: func(txID ids.ID) []*avax.UTXO { return []*avax.UTXO{ generate.UTXO(txID, env.ctx.AVAXAssetID, test.ValidatorWeight, stakeOwners, ids.Empty, ids.Empty, true), - generate.UTXOWithIndex(unlockedUTXOTxID, 2, env.ctx.AVAXAssetID, test.PreFundedBalance, stakeOwners, ids.Empty, ids.Empty, true), + generate.UTXOWithIndex(unlockedUTXOTxID, 3, env.ctx.AVAXAssetID, test.PreFundedBalance, stakeOwners, ids.Empty, ids.Empty, true), } }, expectedErr: nil, @@ -3304,7 +3308,7 @@ func TestCaminoStandardTxExecutorDepositTx(t *testing.T) { // creating offer permission cred if tt.offerPermissionCred != nil { tx.Creds = append(tx.Creds, tt.offerPermissionCred(t)) - signedBytes, err := txs.Codec.Marshal(txs.Version, tx) + signedBytes, err := txs.Codec.Marshal(txs.CodecVersion, tx) require.NoError(t, err) tx.SetBytes(tx.Unsigned.Bytes(), signedBytes) } @@ -4876,7 +4880,7 @@ func TestCaminoStandardTxExecutorRewardsImportTx(t *testing.T) { } utxoID := utxo.InputID() utxoIDs[i] = utxoID[:] - utxoBytes, err := txs.Codec.Marshal(txs.Version, toMarshal) + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, toMarshal) require.NoError(t, err) utxosBytes[i] = utxoBytes } @@ -5982,7 +5986,7 @@ func TestCaminoStandardTxExecutorAddProposalTx(t *testing.T) { proposalWrapper := &txs.ProposalWrapper{Proposal: &dac.GeneralProposal{ Start: 100, End: 100 + dac.GeneralProposalMinDuration, Options: [][]byte{{}}, }} - proposalBytes, err := txs.Codec.Marshal(txs.Version, proposalWrapper) + proposalBytes, err := txs.Codec.Marshal(txs.CodecVersion, proposalWrapper) require.NoError(t, err) baseTxWithBondAmt := func(bondAmt uint64) *txs.BaseTx { @@ -6092,7 +6096,7 @@ func TestCaminoStandardTxExecutorAddProposalTx(t *testing.T) { return s }, utx: func(cfg *config.Config) *txs.AddProposalTx { - proposalBytes, err := txs.Codec.Marshal(txs.Version, &txs.ProposalWrapper{Proposal: &dac.BaseFeeProposal{ + proposalBytes, err := txs.Codec.Marshal(txs.CodecVersion, &txs.ProposalWrapper{Proposal: &dac.BaseFeeProposal{ Start: uint64(cfg.BerlinPhaseTime.Unix()) - 1, End: uint64(cfg.BerlinPhaseTime.Unix()) + 1, Options: []uint64{1}, @@ -6119,7 +6123,7 @@ func TestCaminoStandardTxExecutorAddProposalTx(t *testing.T) { }, utx: func(cfg *config.Config) *txs.AddProposalTx { startTime := uint64(cfg.BerlinPhaseTime.Add(MaxFutureStartTime).Unix() + 1) - proposalBytes, err := txs.Codec.Marshal(txs.Version, &txs.ProposalWrapper{Proposal: &dac.BaseFeeProposal{ + proposalBytes, err := txs.Codec.Marshal(txs.CodecVersion, &txs.ProposalWrapper{Proposal: &dac.BaseFeeProposal{ Start: startTime, End: startTime + 1, Options: []uint64{1}, @@ -6150,7 +6154,7 @@ func TestCaminoStandardTxExecutorAddProposalTx(t *testing.T) { Start: 100, End: 100 + dac.AddMemberProposalDuration, Options: []uint64{1}, }, }} - proposalBytes, err := txs.Codec.Marshal(txs.Version, proposalWrapper) + proposalBytes, err := txs.Codec.Marshal(txs.CodecVersion, proposalWrapper) require.NoError(t, err) return &txs.AddProposalTx{ BaseTx: *baseTxWithBondAmt(cfg.CaminoConfig.DACProposalBondAmount), @@ -6178,7 +6182,7 @@ func TestCaminoStandardTxExecutorAddProposalTx(t *testing.T) { Start: 100, End: 100 + dac.AddMemberProposalDuration, ApplicantAddress: applicantAddress, }, }} - proposalBytes, err := txs.Codec.Marshal(txs.Version, proposalWrapper) + proposalBytes, err := txs.Codec.Marshal(txs.CodecVersion, proposalWrapper) require.NoError(t, err) return &txs.AddProposalTx{ BaseTx: *baseTxWithBondAmt(cfg.CaminoConfig.DACProposalBondAmount), @@ -6339,7 +6343,7 @@ func TestCaminoStandardTxExecutorAddProposalTx(t *testing.T) { Start: 100, End: 100 + dac.AddMemberProposalDuration, ApplicantAddress: applicantAddress, }, }} - proposalBytes, err := txs.Codec.Marshal(txs.Version, proposalWrapper) + proposalBytes, err := txs.Codec.Marshal(txs.CodecVersion, proposalWrapper) require.NoError(t, err) return &txs.AddProposalTx{ BaseTx: *baseTxWithBondAmt(cfg.CaminoConfig.DACProposalBondAmount), @@ -6398,7 +6402,7 @@ func TestCaminoStandardTxExecutorAddVoteTx(t *testing.T) { feeUTXO := generate.UTXO(ids.ID{1, 2, 3, 4, 5}, ctx.AVAXAssetID, test.TxFee, feeOwner, ids.Empty, ids.Empty, true) simpleVote := &txs.VoteWrapper{Vote: &dac.SimpleVote{OptionIndex: 0}} - voteBytes, err := txs.Codec.Marshal(txs.Version, simpleVote) + voteBytes, err := txs.Codec.Marshal(txs.CodecVersion, simpleVote) require.NoError(t, err) baseTx := txs.BaseTx{BaseTx: avax.BaseTx{ @@ -6511,7 +6515,7 @@ func TestCaminoStandardTxExecutorAddVoteTx(t *testing.T) { signers: [][]*secp256k1.PrivateKey{ {feeOwnerKey}, {voterKey1}, }, - expectedErr: ErrProposalInactive, + expectedErr: dac.ErrNotActive, }, "Proposal is not active yet": { state: func(t *testing.T, c *gomock.Controller, utx *txs.AddVoteTx, cfg *config.Config) *state.MockDiff { @@ -6533,7 +6537,7 @@ func TestCaminoStandardTxExecutorAddVoteTx(t *testing.T) { signers: [][]*secp256k1.PrivateKey{ {feeOwnerKey}, {voterKey1}, }, - expectedErr: ErrProposalInactive, + expectedErr: dac.ErrNotYetActive, }, "Voter isn't consortium member": { state: func(t *testing.T, c *gomock.Controller, utx *txs.AddVoteTx, cfg *config.Config) *state.MockDiff { @@ -6596,7 +6600,7 @@ func TestCaminoStandardTxExecutorAddVoteTx(t *testing.T) { }, utx: func(cfg *config.Config) *txs.AddVoteTx { vote := &txs.VoteWrapper{Vote: &dac.DummyVote{}} // not SimpleVote - voteBytes, err := txs.Codec.Marshal(txs.Version, vote) + voteBytes, err := txs.Codec.Marshal(txs.CodecVersion, vote) require.NoError(t, err) return &txs.AddVoteTx{ BaseTx: baseTx, @@ -6625,7 +6629,7 @@ func TestCaminoStandardTxExecutorAddVoteTx(t *testing.T) { }, utx: func(cfg *config.Config) *txs.AddVoteTx { simpleVote := &txs.VoteWrapper{Vote: &dac.SimpleVote{OptionIndex: 5}} // just 3 options in proposal - voteBytes, err := txs.Codec.Marshal(txs.Version, simpleVote) + voteBytes, err := txs.Codec.Marshal(txs.CodecVersion, simpleVote) require.NoError(t, err) return &txs.AddVoteTx{ BaseTx: baseTx, @@ -6750,7 +6754,7 @@ func TestCaminoStandardTxExecutorAddVoteTx(t *testing.T) { }, utx: func(cfg *config.Config) *txs.AddVoteTx { simpleVote := &txs.VoteWrapper{Vote: &dac.SimpleVote{OptionIndex: 1}} - voteBytes, err := txs.Codec.Marshal(txs.Version, simpleVote) + voteBytes, err := txs.Codec.Marshal(txs.CodecVersion, simpleVote) require.NoError(t, err) return &txs.AddVoteTx{ BaseTx: baseTx, diff --git a/vms/platformvm/txs/executor/camino_visitor.go b/vms/platformvm/txs/executor/camino_visitor.go index 981a9121b13f..54c7da26e74d 100644 --- a/vms/platformvm/txs/executor/camino_visitor.go +++ b/vms/platformvm/txs/executor/camino_visitor.go @@ -144,49 +144,3 @@ func (*AtomicTxExecutor) AddVoteTx(*txs.AddVoteTx) error { func (*AtomicTxExecutor) FinishProposalsTx(*txs.FinishProposalsTx) error { return ErrWrongTxType } - -// MemPool - -func (v *MempoolTxVerifier) AddressStateTx(tx *txs.AddressStateTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) DepositTx(tx *txs.DepositTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) UnlockDepositTx(tx *txs.UnlockDepositTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) ClaimTx(tx *txs.ClaimTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) RegisterNodeTx(tx *txs.RegisterNodeTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) RewardsImportTx(tx *txs.RewardsImportTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) MultisigAliasTx(tx *txs.MultisigAliasTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddDepositOfferTx(tx *txs.AddDepositOfferTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddProposalTx(tx *txs.AddProposalTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddVoteTx(tx *txs.AddVoteTx) error { - return v.standardTx(tx) -} - -func (*MempoolTxVerifier) FinishProposalsTx(*txs.FinishProposalsTx) error { - return ErrWrongTxType -} diff --git a/vms/platformvm/txs/executor/create_chain_test.go b/vms/platformvm/txs/executor/create_chain_test.go index 72315d3c4dd5..0342c8dc1cfe 100644 --- a/vms/platformvm/txs/executor/create_chain_test.go +++ b/vms/platformvm/txs/executor/create_chain_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -25,11 +25,9 @@ import ( // Ensure Execute fails when there are not enough control sigs func TestCreateChainTxInsufficientControlSigs(t *testing.T) { require := require.New(t) - env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() tx, err := env.txBuilder.NewCreateChainTx( testSubnet1.ID(), @@ -60,11 +58,9 @@ func TestCreateChainTxInsufficientControlSigs(t *testing.T) { // Ensure Execute fails when an incorrect control signature is given func TestCreateChainTxWrongControlSig(t *testing.T) { require := require.New(t) - env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() tx, err := env.txBuilder.NewCreateChainTx( testSubnet1.ID(), @@ -102,11 +98,9 @@ func TestCreateChainTxWrongControlSig(t *testing.T) { // its validator set doesn't exist func TestCreateChainTxNoSuchSubnet(t *testing.T) { require := require.New(t) - env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() tx, err := env.txBuilder.NewCreateChainTx( testSubnet1.ID(), @@ -136,11 +130,9 @@ func TestCreateChainTxNoSuchSubnet(t *testing.T) { // Ensure valid tx passes semanticVerify func TestCreateChainTxValid(t *testing.T) { require := require.New(t) - env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() tx, err := env.txBuilder.NewCreateChainTx( testSubnet1.ID(), @@ -195,12 +187,9 @@ func TestCreateChainTxAP3FeeChange(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.config.ApricotPhase3Time = ap3Time - defer func() { - require.NoError(shutdownEnvironment(env)) - }() ins, outs, _, signers, err := env.utxosHandler.Spend(env.state, preFundedKeys, 0, test.fee, ids.ShortEmpty) require.NoError(err) diff --git a/vms/platformvm/txs/executor/create_subnet_test.go b/vms/platformvm/txs/executor/create_subnet_test.go index 182e28ae83c9..6d968daa4df0 100644 --- a/vms/platformvm/txs/executor/create_subnet_test.go +++ b/vms/platformvm/txs/executor/create_subnet_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -49,12 +49,10 @@ func TestCreateSubnetTxAP3FeeChange(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.config.ApricotPhase3Time = ap3Time env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() ins, outs, _, signers, err := env.utxosHandler.Spend(env.state, preFundedKeys, 0, test.fee, ids.ShortEmpty) require.NoError(err) diff --git a/vms/platformvm/txs/executor/dac/camino_dac_test.go b/vms/platformvm/txs/executor/dac/camino_dac_test.go index a9424c776755..693a6e3da0c0 100644 --- a/vms/platformvm/txs/executor/dac/camino_dac_test.go +++ b/vms/platformvm/txs/executor/dac/camino_dac_test.go @@ -11,7 +11,6 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -27,7 +26,7 @@ import ( ) func TestProposalVerifierBaseFeeProposal(t *testing.T) { - ctx := snow.DefaultContextTest() + ctx := test.Context(t) // TODO @evlekht replace with test.PhaseLast when cairo phase will be added as last defaultConfig := test.Config(t, test.PhaseCairo) @@ -40,7 +39,7 @@ func TestProposalVerifierBaseFeeProposal(t *testing.T) { bondUTXO := generate.UTXO(ids.ID{1, 2, 3, 4, 6}, ctx.AVAXAssetID, proposalBondAmt, bondOwner, ids.Empty, ids.Empty, true) proposal := &txs.ProposalWrapper{Proposal: &dac.BaseFeeProposal{End: 1, Options: []uint64{1}}} - proposalBytes, err := txs.Codec.Marshal(txs.Version, proposal) + proposalBytes, err := txs.Codec.Marshal(txs.CodecVersion, proposal) require.NoError(t, err) baseTx := txs.BaseTx{BaseTx: avax.BaseTx{ @@ -207,7 +206,7 @@ func TestProposalExecutorBaseFeeProposal(t *testing.T) { } func TestProposalVerifierAddMemberProposal(t *testing.T) { - ctx := snow.DefaultContextTest() + ctx := test.Context(t) defaultConfig := test.Config(t, test.PhaseLast) feeOwnerKey, _, feeOwner := generate.KeyAndOwner(t, test.Keys[0]) @@ -220,7 +219,7 @@ func TestProposalVerifierAddMemberProposal(t *testing.T) { bondUTXO := generate.UTXO(ids.ID{1, 2, 3, 4, 6}, ctx.AVAXAssetID, proposalBondAmt, bondOwner, ids.Empty, ids.Empty, true) proposal := &txs.ProposalWrapper{Proposal: &dac.AddMemberProposal{End: 1, ApplicantAddress: applicantAddress}} - proposalBytes, err := txs.Codec.Marshal(txs.Version, proposal) + proposalBytes, err := txs.Codec.Marshal(txs.CodecVersion, proposal) require.NoError(t, err) baseTx := txs.BaseTx{BaseTx: avax.BaseTx{ @@ -411,7 +410,7 @@ func TestProposalExecutorAddMemberProposal(t *testing.T) { } func TestProposalVerifierExcludeMemberProposal(t *testing.T) { - ctx := snow.DefaultContextTest() + ctx := test.Context(t) defaultConfig := test.Config(t, test.PhaseLast) feeOwnerKey, _, feeOwner := generate.KeyAndOwner(t, test.Keys[0]) @@ -427,7 +426,7 @@ func TestProposalVerifierExcludeMemberProposal(t *testing.T) { bondUTXO := generate.UTXO(ids.ID{1, 2, 3, 4, 6}, ctx.AVAXAssetID, proposalBondAmt, bondOwner, ids.Empty, ids.Empty, true) proposal := &txs.ProposalWrapper{Proposal: &dac.ExcludeMemberProposal{End: 1, MemberAddress: memberAddress}} - proposalBytes, err := txs.Codec.Marshal(txs.Version, proposal) + proposalBytes, err := txs.Codec.Marshal(txs.CodecVersion, proposal) require.NoError(t, err) baseTx := txs.BaseTx{BaseTx: avax.BaseTx{ @@ -891,7 +890,7 @@ func TestGetBondTxIDs(t *testing.T) { } func TestProposalVerifierFeeDistributionProposal(t *testing.T) { - ctx := snow.DefaultContextTest() + ctx := test.Context(t) defaultConfig := test.Config(t, test.PhaseLast) feeOwnerKey, _, feeOwner := generate.KeyAndOwner(t, test.Keys[0]) @@ -903,7 +902,7 @@ func TestProposalVerifierFeeDistributionProposal(t *testing.T) { bondUTXO := generate.UTXO(ids.ID{1, 2, 3, 4, 6}, ctx.AVAXAssetID, proposalBondAmt, bondOwner, ids.Empty, ids.Empty, true) proposal := &txs.ProposalWrapper{Proposal: &dac.FeeDistributionProposal{End: 1, Options: [][dac.FeeDistributionFractionsCount]uint64{{1}}}} - proposalBytes, err := txs.Codec.Marshal(txs.Version, proposal) + proposalBytes, err := txs.Codec.Marshal(txs.CodecVersion, proposal) require.NoError(t, err) baseTx := txs.BaseTx{BaseTx: avax.BaseTx{ diff --git a/vms/platformvm/txs/executor/export_test.go b/vms/platformvm/txs/executor/export_test.go index 380b3dd5a489..d9e0ce071008 100644 --- a/vms/platformvm/txs/executor/export_test.go +++ b/vms/platformvm/txs/executor/export_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -15,11 +15,9 @@ import ( ) func TestNewExportTx(t *testing.T) { - env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(t, shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() type test struct { description string @@ -33,13 +31,13 @@ func TestNewExportTx(t *testing.T) { tests := []test{ { description: "P->X export", - destinationChainID: xChainID, + destinationChainID: env.ctx.XChainID, sourceKeys: []*secp256k1.PrivateKey{sourceKey}, timestamp: defaultValidateStartTime, }, { description: "P->C export", - destinationChainID: cChainID, + destinationChainID: env.ctx.CChainID, sourceKeys: []*secp256k1.PrivateKey{sourceKey}, timestamp: env.config.ApricotPhase5Time, }, @@ -59,19 +57,15 @@ func TestNewExportTx(t *testing.T) { ) require.NoError(err) - fakedState, err := state.NewDiff(lastAcceptedID, env) + stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) - fakedState.SetTimestamp(tt.timestamp) + stateDiff.SetTimestamp(tt.timestamp) - fakedParent := ids.GenerateTestID() - env.SetState(fakedParent, fakedState) - - verifier := MempoolTxVerifier{ - Backend: &env.backend, - ParentID: fakedParent, - StateVersions: env, - Tx: tx, + verifier := StandardTxExecutor{ + Backend: &env.backend, + State: stateDiff, + Tx: tx, } require.NoError(tx.Unsigned.Visit(&verifier)) }) diff --git a/vms/platformvm/txs/executor/helpers_test.go b/vms/platformvm/txs/executor/helpers_test.go index aee0d184d5f8..b2654ec7c8c9 100644 --- a/vms/platformvm/txs/executor/helpers_test.go +++ b/vms/platformvm/txs/executor/helpers_test.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( - "context" - "errors" "fmt" "math" "testing" @@ -21,10 +19,10 @@ import ( "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" @@ -64,18 +62,23 @@ var ( defaultMinValidatorStake = 5 * units.MilliAvax defaultBalance = 100 * defaultMinValidatorStake preFundedKeys = secp256k1.TestKeys() - avaxAssetID = ids.ID{'y', 'e', 'e', 't'} defaultTxFee = uint64(100) - xChainID = ids.Empty.Prefix(0) - cChainID = ids.Empty.Prefix(1) lastAcceptedID = ids.GenerateTestID() testSubnet1 *txs.Tx testSubnet1ControlKeys = preFundedKeys[0:3] - errMissing = errors.New("missing") + // Node IDs of genesis validators. Initialized in init function + genesisNodeIDs []ids.NodeID ) +func init() { + genesisNodeIDs = make([]ids.NodeID, len(preFundedKeys)) + for i := range preFundedKeys { + genesisNodeIDs[i] = ids.GenerateTestNodeID() + } +} + type mutableSharedMemory struct { atomic.SharedMemory } @@ -109,20 +112,25 @@ func (e *environment) SetState(blkID ids.ID, chainState state.Chain) { e.states[blkID] = chainState } -func newEnvironment(t *testing.T, postBanff, postCortina bool) *environment { +func newEnvironment(t *testing.T, postBanff, postCortina, postDurango bool) *environment { var isBootstrapped utils.Atomic[bool] isBootstrapped.Set(true) - config := defaultConfig(postBanff, postCortina) - clk := defaultClock(postBanff || postCortina) + config := defaultConfig(postBanff, postCortina, postDurango) + clk := defaultClock(postBanff || postCortina || postDurango) baseDB := versiondb.New(memdb.New()) - ctx, msm := defaultCtx(baseDB) + ctx := snowtest.Context(t, snowtest.PChainID) + m := atomic.NewMemory(baseDB) + msm := &mutableSharedMemory{ + SharedMemory: m.NewSharedMemory(ctx.ChainID), + } + ctx.SharedMemory = msm fx := defaultFx(clk, ctx.Log, isBootstrapped.Get()) rewards := reward.NewCalculator(config.RewardConfig) - baseState := defaultState(&config, ctx, baseDB, rewards) + baseState := defaultState(config, ctx, baseDB, rewards) atomicUTXOs := avax.NewAtomicUTXOManager(ctx.SharedMemory, txs.Codec) uptimes := uptime.NewManager(baseState, clk) @@ -130,7 +138,7 @@ func newEnvironment(t *testing.T, postBanff, postCortina bool) *environment { txBuilder := builder.New( ctx, - &config, + config, clk, fx, baseState, @@ -139,7 +147,7 @@ func newEnvironment(t *testing.T, postBanff, postCortina bool) *environment { ) backend := Backend{ - Config: &config, + Config: config, Ctx: ctx, Clk: clk, Bootstrapped: &isBootstrapped, @@ -151,7 +159,7 @@ func newEnvironment(t *testing.T, postBanff, postCortina bool) *environment { env := &environment{ isBootstrapped: &isBootstrapped, - config: &config, + config: config, clk: clk, baseDB: baseDB, ctx: ctx, @@ -168,6 +176,30 @@ func newEnvironment(t *testing.T, postBanff, postCortina bool) *environment { addSubnet(t, env, txBuilder) + t.Cleanup(func() { + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + require := require.New(t) + + if env.isBootstrapped.Get() { + validatorIDs := env.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) + + require.NoError(env.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID)) + + for subnetID := range env.config.TrackedSubnets { + validatorIDs := env.config.Validators.GetValidatorIDs(subnetID) + + require.NoError(env.uptimes.StopTracking(validatorIDs, subnetID)) + } + env.state.SetHeight(math.MaxUint64) + require.NoError(env.state.Commit()) + } + + require.NoError(env.state.Close()) + require.NoError(env.baseDB.Close()) + }) + return env } @@ -205,6 +237,7 @@ func addSubnet( stateDiff.AddTx(testSubnet1, status.Committed) require.NoError(stateDiff.Apply(env.state)) + require.NoError(env.state.Commit()) } func defaultState( @@ -224,7 +257,6 @@ func defaultState( ctx, metrics.Noop, rewards, - &utils.Atomic[bool]{}, ) if err != nil { panic(err) @@ -239,39 +271,7 @@ func defaultState( return state } -func defaultCtx(db database.Database) (*snow.Context, *mutableSharedMemory) { - ctx := snow.DefaultContextTest() - ctx.NetworkID = 10 - ctx.XChainID = xChainID - ctx.CChainID = cChainID - ctx.AVAXAssetID = avaxAssetID - - atomicDB := prefixdb.New([]byte{1}, db) - m := atomic.NewMemory(atomicDB) - - msm := &mutableSharedMemory{ - SharedMemory: m.NewSharedMemory(ctx.ChainID), - } - ctx.SharedMemory = msm - - ctx.ValidatorState = &validators.TestState{ - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - constants.PlatformChainID: constants.PrimaryNetworkID, - xChainID: constants.PrimaryNetworkID, - cChainID: constants.PrimaryNetworkID, - }[chainID] - if !ok { - return ids.Empty, errMissing - } - return subnetID, nil - }, - } - - return ctx, msm -} - -func defaultConfig(postBanff, postCortina bool) config.Config { +func defaultConfig(postBanff, postCortina, postDurango bool) *config.Config { banffTime := mockable.MaxTime if postBanff { banffTime = defaultValidateEndTime.Add(-2 * time.Second) @@ -280,8 +280,12 @@ func defaultConfig(postBanff, postCortina bool) config.Config { if postCortina { cortinaTime = defaultValidateStartTime.Add(-2 * time.Second) } + durangoTime := mockable.MaxTime + if postDurango { + durangoTime = defaultValidateStartTime.Add(-2 * time.Second) + } - return config.Config{ + return &config.Config{ Chains: chains.TestManager, UptimeLockedCalculator: uptime.NewLockedCalculator(), Validators: validators.NewManager(), @@ -303,6 +307,7 @@ func defaultConfig(postBanff, postCortina bool) config.Config { ApricotPhase5Time: defaultValidateEndTime, BanffTime: banffTime, CortinaTime: cortinaTime, + DurangoTime: durangoTime, } } @@ -337,7 +342,7 @@ func (fvi *fxVMInt) Logger() logging.Logger { func defaultFx(clk *mockable.Clock, log logging.Logger, isBootstrapped bool) fx.Fx { fxVMInt := &fxVMInt{ - registry: linearcodec.NewDefault(), + registry: linearcodec.NewDefault(time.Time{}), clk: clk, log: log, } @@ -367,15 +372,14 @@ func buildGenesisTest(ctx *snow.Context) []byte { } } - genesisValidators := make([]api.PermissionlessValidator, len(preFundedKeys)) - for i, key := range preFundedKeys { - nodeID := ids.NodeID(key.PublicKey().Address()) + genesisValidators := make([]api.GenesisPermissionlessValidator, len(genesisNodeIDs)) + for i, nodeID := range genesisNodeIDs { addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) if err != nil { panic(err) } - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidators[i] = api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), EndTime: json.Uint64(defaultValidateEndTime.Unix()), NodeID: nodeID, @@ -416,30 +420,3 @@ func buildGenesisTest(ctx *snow.Context) []byte { return genesisBytes } - -func shutdownEnvironment(env *environment) error { - if env.isBootstrapped.Get() { - validatorIDs := env.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) - - if err := env.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID); err != nil { - return err - } - - for subnetID := range env.config.TrackedSubnets { - validatorIDs := env.config.Validators.GetValidatorIDs(subnetID) - - if err := env.uptimes.StopTracking(validatorIDs, subnetID); err != nil { - return err - } - } - env.state.SetHeight( /*height*/ math.MaxUint64) - if err := env.state.Commit(); err != nil { - return err - } - } - - return utils.Err( - env.state.Close(), - env.baseDB.Close(), - ) -} diff --git a/vms/platformvm/txs/executor/import_test.go b/vms/platformvm/txs/executor/import_test.go index 3d78429cf906..fed09bd882ce 100644 --- a/vms/platformvm/txs/executor/import_test.go +++ b/vms/platformvm/txs/executor/import_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -22,10 +22,7 @@ import ( ) func TestNewImportTx(t *testing.T) { - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) - defer func() { - require.NoError(t, shutdownEnvironment(env)) - }() + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) type test struct { description string @@ -67,7 +64,7 @@ func TestNewImportTx(t *testing.T) { }, }, } - utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) require.NoError(t, err) inputID := utxo.InputID() @@ -118,9 +115,9 @@ func TestNewImportTx(t *testing.T) { }, { description: "attempting to import from C-chain", - sourceChainID: cChainID, + sourceChainID: env.ctx.CChainID, sharedMemory: fundedSharedMemory( - cChainID, + env.ctx.CChainID, map[ids.ID]uint64{ env.ctx.AVAXAssetID: env.config.TxFee, }, @@ -182,19 +179,15 @@ func TestNewImportTx(t *testing.T) { require.Equal(env.config.TxFee, totalIn-totalOut) - fakedState, err := state.NewDiff(lastAcceptedID, env) + stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) - fakedState.SetTimestamp(tt.timestamp) + stateDiff.SetTimestamp(tt.timestamp) - fakedParent := ids.GenerateTestID() - env.SetState(fakedParent, fakedState) - - verifier := MempoolTxVerifier{ - Backend: &env.backend, - ParentID: fakedParent, - StateVersions: env, - Tx: tx, + verifier := StandardTxExecutor{ + Backend: &env.backend, + State: stateDiff, + Tx: tx, } require.NoError(tx.Unsigned.Visit(&verifier)) }) diff --git a/vms/platformvm/txs/executor/proposal_tx_executor.go b/vms/platformvm/txs/executor/proposal_tx_executor.go index bd329b3f2576..0f082cb8b296 100644 --- a/vms/platformvm/txs/executor/proposal_tx_executor.go +++ b/vms/platformvm/txs/executor/proposal_tx_executor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -10,7 +10,6 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -57,12 +56,6 @@ type ProposalTxExecutor struct { // [OnAbortState] is modified by this struct's methods to // reflect changes made to the state if the proposal is aborted. OnAbortState state.Diff - - // outputs populated by this struct's methods: - // - // [PrefersCommit] is true iff this node initially prefers to - // commit this block transaction. - PrefersCommit bool } func (*ProposalTxExecutor) CreateChainTx(*txs.CreateChainTx) error { @@ -149,8 +142,6 @@ func (e *ProposalTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { avax.Consume(e.OnAbortState, tx.Ins) // Produce the UTXOs avax.Produce(e.OnAbortState, txID, onAbortOuts) - - e.PrefersCommit = tx.StartTime().After(e.Clk.Time()) return nil } @@ -197,8 +188,6 @@ func (e *ProposalTxExecutor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) avax.Consume(e.OnAbortState, tx.Ins) // Produce the UTXOs avax.Produce(e.OnAbortState, txID, tx.Outs) - - e.PrefersCommit = tx.StartTime().After(e.Clk.Time()) return nil } @@ -246,8 +235,6 @@ func (e *ProposalTxExecutor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { avax.Consume(e.OnAbortState, tx.Ins) // Produce the UTXOs avax.Produce(e.OnAbortState, txID, onAbortOuts) - - e.PrefersCommit = tx.StartTime().After(e.Clk.Time()) return nil } @@ -296,19 +283,9 @@ func (e *ProposalTxExecutor) AdvanceTimeTx(tx *txs.AdvanceTimeTx) error { return err } - changes, err := AdvanceTimeTo(e.Backend, e.OnCommitState, newChainTime) - if err != nil { - return err - } - - // Update the state if this tx is committed - e.OnCommitState.SetTimestamp(newChainTime) - changes.Apply(e.OnCommitState) - - e.PrefersCommit = !newChainTime.After(now.Add(SyncBound)) - // Note that state doesn't change if this proposal is aborted - return nil + _, err = AdvanceTimeTo(e.Backend, e.OnCommitState, newChainTime) + return err } func (e *ProposalTxExecutor) RewardValidatorTx(tx *txs.RewardValidatorTx) error { @@ -352,17 +329,6 @@ func (e *ProposalTxExecutor) RewardValidatorTx(tx *txs.RewardValidatorTx) error ) } - // retrieve primaryNetworkValidator before possibly removing it. - primaryNetworkValidator, err := e.OnCommitState.GetCurrentValidator( - constants.PrimaryNetworkID, - stakerToReward.NodeID, - ) - if err != nil { - // This should never error because the staker set is in memory and - // primary network validators are removed last. - return err - } - stakerTx, _, err := e.OnCommitState.GetTx(stakerToReward.TxID) if err != nil { return fmt.Errorf("failed to get next removed staker tx: %w", err) @@ -405,10 +371,7 @@ func (e *ProposalTxExecutor) RewardValidatorTx(tx *txs.RewardValidatorTx) error return err } e.OnAbortState.SetCurrentSupply(stakerToReward.SubnetID, newSupply) - - // handle option preference - e.PrefersCommit, err = e.shouldBeRewarded(stakerToReward, primaryNetworkValidator) - return err + return nil } func (e *ProposalTxExecutor) rewardValidatorTx(uValidatorTx txs.ValidatorTx, validator *state.Staker) error { @@ -645,26 +608,3 @@ func (e *ProposalTxExecutor) rewardDelegatorTx(uDelegatorTx txs.DelegatorTx, del } return nil } - -func (e *ProposalTxExecutor) shouldBeRewarded(stakerToReward, primaryNetworkValidator *state.Staker) (bool, error) { - expectedUptimePercentage := e.Config.UptimePercentage - if stakerToReward.SubnetID != constants.PrimaryNetworkID { - transformSubnet, err := GetTransformSubnetTx(e.OnCommitState, stakerToReward.SubnetID) - if err != nil { - return false, fmt.Errorf("failed to calculate uptime: %w", err) - } - - expectedUptimePercentage = float64(transformSubnet.UptimeRequirement) / reward.PercentDenominator - } - - // TODO: calculate subnet uptimes - uptime, err := e.Uptimes.CalculateUptimePercentFrom( - primaryNetworkValidator.NodeID, - constants.PrimaryNetworkID, - primaryNetworkValidator.StartTime, - ) - if err != nil { - return false, fmt.Errorf("failed to calculate uptime: %w", err) - } - return uptime >= expectedUptimePercentage, nil -} diff --git a/vms/platformvm/txs/executor/proposal_tx_executor_test.go b/vms/platformvm/txs/executor/proposal_tx_executor_test.go index 0044d27a32ea..e01bd267a024 100644 --- a/vms/platformvm/txs/executor/proposal_tx_executor_test.go +++ b/vms/platformvm/txs/executor/proposal_tx_executor_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -25,7 +25,7 @@ import ( func TestProposalTxExecuteAddDelegator(t *testing.T) { dummyHeight := uint64(1) rewardAddress := preFundedKeys[0].PublicKey().Address() - nodeID := ids.NodeID(rewardAddress) + nodeID := genesisNodeIDs[0] newValidatorID := ids.GenerateTestNodeID() newValidatorStartTime := uint64(defaultValidateStartTime.Add(5 * time.Second).Unix()) @@ -46,9 +46,11 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { ) require.NoError(t, err) + addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(t, err) @@ -74,9 +76,11 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { ) require.NoError(t, err) + addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(t, err) @@ -87,7 +91,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { require.NoError(t, target.state.Commit()) } - dummyH := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + dummyH := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) currentTimestamp := dummyH.state.GetTimestamp() type test struct { @@ -241,11 +245,8 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { require := require.New(t) - freshTH := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + freshTH := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) freshTH.config.ApricotPhase3Time = tt.AP3Time - defer func() { - require.NoError(shutdownEnvironment(freshTH)) - }() tx, err := freshTH.txBuilder.NewAddDelegatorTx( tt.stakeAmount, @@ -282,14 +283,11 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - - nodeID := preFundedKeys[0].PublicKey().Address() + defer env.ctx.Lock.Unlock() + nodeID := genesisNodeIDs[0] { // Case: Proposed validator currently validating primary network // but stops validating subnet after stops validating primary network @@ -298,7 +296,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { defaultWeight, uint64(defaultValidateStartTime.Unix())+1, uint64(defaultValidateEndTime.Unix())+1, - ids.NodeID(nodeID), + nodeID, testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr @@ -330,7 +328,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { defaultWeight, uint64(defaultValidateStartTime.Unix())+1, uint64(defaultValidateEndTime.Unix()), - ids.NodeID(nodeID), + nodeID, testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr @@ -353,12 +351,9 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { } // Add a validator to pending validator set of primary network - key, err := secp256k1.NewPrivateKey() - require.NoError(err) - pendingDSValidatorID := ids.NodeID(key.PublicKey().Address()) - - // starts validating primary network 10 seconds after genesis - dsStartTime := defaultGenesisTime.Add(10 * time.Second) + // Starts validating primary network 10 seconds after genesis + pendingDSValidatorID := ids.GenerateTestNodeID() + dsStartTime := defaultValidateStartTime.Add(10 * time.Second) dsEndTime := dsStartTime.Add(5 * defaultMinStakingDuration) addDSTx, err := env.txBuilder.NewAddValidatorTx( @@ -366,7 +361,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { uint64(dsStartTime.Unix()), // start time uint64(dsEndTime.Unix()), // end time pendingDSValidatorID, // node ID - nodeID, // reward address + ids.GenerateTestShortID(), // reward address reward.PercentDenominator, // shares []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, @@ -402,9 +397,11 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { require.ErrorIs(err, ErrNotValidator) } + addValTx := addDSTx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( addDSTx.ID(), - addDSTx.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -508,7 +505,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { // Case: Proposed validator start validating at/before current timestamp // First, advance the timestamp - newTimestamp := defaultGenesisTime.Add(2 * time.Second) + newTimestamp := defaultValidateStartTime.Add(2 * time.Second) env.state.SetTimestamp(newTimestamp) { @@ -516,8 +513,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(defaultMinStakingDuration).Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) @@ -540,7 +537,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { } // reset the timestamp - env.state.SetTimestamp(defaultGenesisTime) + env.state.SetTimestamp(defaultValidateStartTime) // Case: Proposed validator already validating the subnet // First, add validator as validator of subnet @@ -548,16 +545,18 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time - ids.NodeID(nodeID), // node ID + nodeID, // node ID testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, ) require.NoError(err) + addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( subnetTx.ID(), - subnetTx.Unsigned.(*txs.AddSubnetValidatorTx), + addSubnetValTx, + addSubnetValTx.StartTime(), 0, ) require.NoError(err) @@ -573,7 +572,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(defaultValidateStartTime.Unix())+1, // start time uint64(defaultValidateEndTime.Unix()), // end time - ids.NodeID(nodeID), // node ID + nodeID, // node ID testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr @@ -603,11 +602,11 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Case: Too few signatures tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix())+1, // start time - uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix())+1, // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + defaultWeight, // weight + uint64(defaultValidateStartTime.Unix())+1, // start time + uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix())+1, // end time + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]}, ids.ShortEmpty, // change addr ) @@ -639,11 +638,11 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Case: Control Signature from invalid key (keys[3] is not a control key) tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix())+1, // start time - uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix())+1, // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + defaultWeight, // weight + uint64(defaultValidateStartTime.Unix())+1, // start time + uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix())+1, // end time + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], preFundedKeys[1]}, ids.ShortEmpty, // change addr ) @@ -674,19 +673,21 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { // Case: Proposed validator in pending validator set for subnet // First, add validator to pending validator set of subnet tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix())+1, // start time - uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix())+1, // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + defaultWeight, // weight + uint64(defaultValidateStartTime.Unix())+1, // start time + uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix())+1, // end time + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) require.NoError(err) + addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( subnetTx.ID(), - subnetTx.Unsigned.(*txs.AddSubnetValidatorTx), + addSubnetValTx, + addSubnetValTx.StartTime(), 0, ) require.NoError(err) @@ -715,19 +716,18 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { func TestProposalTxExecuteAddValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() nodeID := ids.GenerateTestNodeID() + chainTime := env.state.GetTimestamp() { // Case: Validator's start time too early tx, err := env.txBuilder.NewAddValidatorTx( env.config.MinValidatorStake, - uint64(defaultValidateStartTime.Unix()), + uint64(chainTime.Unix()), uint64(defaultValidateEndTime.Unix()), nodeID, ids.ShortEmpty, @@ -784,12 +784,14 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { } { + nodeID := genesisNodeIDs[0] + // Case: Validator already validating primary network tx, err := env.txBuilder.NewAddValidatorTx( env.config.MinValidatorStake, uint64(defaultValidateStartTime.Unix())+1, uint64(defaultValidateEndTime.Unix()), - ids.NodeID(preFundedKeys[0].Address()), + nodeID, ids.ShortEmpty, reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0]}, @@ -815,7 +817,7 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { { // Case: Validator in pending validator set of primary network - startTime := defaultGenesisTime.Add(1 * time.Second) + startTime := defaultValidateStartTime.Add(1 * time.Second) tx, err := env.txBuilder.NewAddValidatorTx( env.config.MinValidatorStake, // stake amount uint64(startTime.Unix()), // start time @@ -828,9 +830,12 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { ) require.NoError(err) - staker, err := state.NewPendingStaker( + addValTx := tx.Unsigned.(*txs.AddValidatorTx) + staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), + 0, ) require.NoError(err) diff --git a/vms/platformvm/txs/executor/reward_validator_test.go b/vms/platformvm/txs/executor/reward_validator_test.go index 5871b9eef531..3ee34aeb672d 100644 --- a/vms/platformvm/txs/executor/reward_validator_test.go +++ b/vms/platformvm/txs/executor/reward_validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -25,10 +25,7 @@ import ( func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) dummyHeight := uint64(1) currentStakerIterator, err := env.state.GetCurrentStakerIterator() @@ -128,10 +125,7 @@ func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) dummyHeight := uint64(1) currentStakerIterator, err := env.state.GetCurrentStakerIterator() @@ -225,10 +219,7 @@ func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) dummyHeight := uint64(1) vdrRewardAddress := ids.GenerateTestShortID() @@ -264,16 +255,20 @@ func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { ) require.NoError(err) + addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrStaker, err := state.NewCurrentStaker( vdrTx.ID(), - vdrTx.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) + addDelTx := delTx.Unsigned.(*txs.AddDelegatorTx) delStaker, err := state.NewCurrentStaker( delTx.ID(), - delTx.Unsigned.(*txs.AddDelegatorTx), + addDelTx, + addDelTx.StartTime(), 1000000, ) require.NoError(err) @@ -345,10 +340,7 @@ func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { require := require.New(t) - env := newEnvironment(t, true /*=postBanff*/, true /*=postCortina*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, true /*=postBanff*/, true /*=postCortina*/, false /*=postDurango*/) dummyHeight := uint64(1) vdrRewardAddress := ids.GenerateTestShortID() @@ -384,18 +376,22 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { ) require.NoError(err) + addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrRewardAmt := uint64(2000000) vdrStaker, err := state.NewCurrentStaker( vdrTx.ID(), - vdrTx.Unsigned.(*txs.AddValidatorTx), + addValTx, + time.Unix(int64(vdrStartTime), 0), vdrRewardAmt, ) require.NoError(err) + addDelTx := delTx.Unsigned.(*txs.AddDelegatorTx) delRewardAmt := uint64(1000000) delStaker, err := state.NewCurrentStaker( delTx.ID(), - delTx.Unsigned.(*txs.AddDelegatorTx), + addDelTx, + time.Unix(int64(delStartTime), 0), delRewardAmt, ) require.NoError(err) @@ -560,10 +556,7 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { require := require.New(t) - env := newEnvironment(t, true /*=postBanff*/, true /*=postCortina*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, true /*=postBanff*/, true /*=postCortina*/, false /*=postDurango*/) dummyHeight := uint64(1) vdrRewardAddress := ids.GenerateTestShortID() @@ -599,18 +592,22 @@ func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t * ) require.NoError(err) + addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrRewardAmt := uint64(2000000) vdrStaker, err := state.NewCurrentStaker( vdrTx.ID(), - vdrTx.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), vdrRewardAmt, ) require.NoError(err) + addDelTx := delTx.Unsigned.(*txs.AddDelegatorTx) delRewardAmt := uint64(1000000) delStaker, err := state.NewCurrentStaker( delTx.ID(), - delTx.Unsigned.(*txs.AddDelegatorTx), + addDelTx, + time.Unix(int64(delStartTime), 0), delRewardAmt, ) require.NoError(err) @@ -718,10 +715,7 @@ func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t * func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) dummyHeight := uint64(1) initialSupply, err := env.state.GetCurrentSupply(constants.PrimaryNetworkID) @@ -759,16 +753,20 @@ func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { ) require.NoError(err) + addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrStaker, err := state.NewCurrentStaker( vdrTx.ID(), - vdrTx.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) + addDelTx := delTx.Unsigned.(*txs.AddDelegatorTx) delStaker, err := state.NewCurrentStaker( delTx.ID(), - delTx.Unsigned.(*txs.AddDelegatorTx), + addDelTx, + addDelTx.StartTime(), 1000000, ) require.NoError(err) diff --git a/vms/platformvm/txs/executor/staker_tx_verification.go b/vms/platformvm/txs/executor/staker_tx_verification.go index baa539410d75..dff31a1f30b2 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/vms/platformvm/txs/executor/staker_tx_verification.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -17,6 +17,7 @@ import ( "errors" "fmt" "math" + "time" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" @@ -47,13 +48,17 @@ var ( ErrDuplicateValidator = errors.New("duplicate validator") ErrDelegateToPermissionedValidator = errors.New("delegation to permissioned validator") ErrWrongStakedAssetID = errors.New("incorrect staked assetID") - ErrDUpgradeNotActive = errors.New("attempting to use a D-upgrade feature prior to activation") + ErrDurangoUpgradeNotActive = errors.New("attempting to use a Durango-upgrade feature prior to activation") ) // verifySubnetValidatorPrimaryNetworkRequirements verifies the primary // network requirements for [subnetValidator]. An error is returned if they // are not fulfilled. -func verifySubnetValidatorPrimaryNetworkRequirements(chainState state.Chain, subnetValidator txs.Validator) error { +func verifySubnetValidatorPrimaryNetworkRequirements( + isDurangoActive bool, + chainState state.Chain, + subnetValidator txs.Validator, +) error { primaryNetworkValidator, err := GetValidator(chainState, constants.PrimaryNetworkID, subnetValidator.NodeID) if err == database.ErrNotFound { return fmt.Errorf( @@ -72,8 +77,12 @@ func verifySubnetValidatorPrimaryNetworkRequirements(chainState state.Chain, sub // Ensure that the period this validator validates the specified subnet // is a subset of the time they validate the primary network. + startTime := chainState.GetTimestamp() + if !isDurangoActive { + startTime = subnetValidator.StartTime() + } if !txs.BoundedBy( - subnetValidator.StartTime(), + startTime, subnetValidator.EndTime(), primaryNetworkValidator.StartTime, primaryNetworkValidator.EndTime, @@ -101,8 +110,20 @@ func verifyAddValidatorTx( return nil, err } - duration := tx.Validator.Duration() + var ( + currentTimestamp = chainState.GetTimestamp() + isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return nil, err + } + + startTime := currentTimestamp + if !isDurangoActive { + startTime = tx.StartTime() + } + duration := tx.EndTime().Sub(startTime) switch { case tx.Validator.Wght < backend.Config.MinValidatorStake: // Ensure validator is staking at least the minimum amount @@ -133,16 +154,8 @@ func verifyAddValidatorTx( return outs, nil } - currentTimestamp := chainState.GetTimestamp() - // Ensure the proposed validator starts after the current time - startTime := tx.StartTime() - if !currentTimestamp.Before(startTime) { - return nil, fmt.Errorf( - "%w: %s >= %s", - ErrTimestampNotBeforeStartTime, - currentTimestamp, - startTime, - ) + if err := verifyStakerStartTime(isDurangoActive, currentTimestamp, startTime); err != nil { + return nil, err } _, err := GetValidator(chainState, constants.PrimaryNetworkID, tx.Validator.NodeID) @@ -175,14 +188,9 @@ func verifyAddValidatorTx( return nil, fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := currentTimestamp.Add(MaxFutureStartTime) - if startTime.After(maxStartTime) { - return nil, ErrFutureStakeTime - } - - return outs, nil + // verifyStakerStartsSoon is checked last to allow + // the verifier visitor to explicitly check for this error. + return outs, verifyStakerStartsSoon(isDurangoActive, currentTimestamp, startTime) } // verifyAddSubnetValidatorTx carries out the validation for an @@ -198,7 +206,20 @@ func verifyAddSubnetValidatorTx( return err } - duration := tx.Validator.Duration() + var ( + currentTimestamp = chainState.GetTimestamp() + isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + + startTime := currentTimestamp + if !isDurangoActive { + startTime = tx.StartTime() + } + duration := tx.EndTime().Sub(startTime) + switch { case duration < backend.Config.MinStakeDuration: // Ensure staking length is not too short @@ -213,16 +234,8 @@ func verifyAddSubnetValidatorTx( return nil } - currentTimestamp := chainState.GetTimestamp() - // Ensure the proposed validator starts after the current timestamp - validatorStartTime := tx.StartTime() - if !currentTimestamp.Before(validatorStartTime) { - return fmt.Errorf( - "%w: %s >= %s", - ErrTimestampNotBeforeStartTime, - currentTimestamp, - validatorStartTime, - ) + if err := verifyStakerStartTime(isDurangoActive, currentTimestamp, startTime); err != nil { + return err } _, err := GetValidator(chainState, tx.SubnetValidator.Subnet, tx.Validator.NodeID) @@ -242,7 +255,7 @@ func verifyAddSubnetValidatorTx( ) } - if err := verifySubnetValidatorPrimaryNetworkRequirements(chainState, tx.Validator); err != nil { + if err := verifySubnetValidatorPrimaryNetworkRequirements(isDurangoActive, chainState, tx.Validator); err != nil { return err } @@ -265,14 +278,9 @@ func verifyAddSubnetValidatorTx( return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := currentTimestamp.Add(MaxFutureStartTime) - if validatorStartTime.After(maxStartTime) { - return ErrFutureStakeTime - } - - return nil + // verifyStakerStartsSoon is checked last to allow + // the verifier visitor to explicitly check for this error. + return verifyStakerStartsSoon(isDurangoActive, currentTimestamp, startTime) } // Returns the representation of [tx.NodeID] validating [tx.Subnet]. @@ -294,6 +302,14 @@ func verifyRemoveSubnetValidatorTx( return nil, false, err } + var ( + currentTimestamp = chainState.GetTimestamp() + isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return nil, false, err + } + isCurrentValidator := true vdr, err := chainState.GetCurrentValidator(tx.Subnet, tx.NodeID) if err == database.ErrNotFound { @@ -325,7 +341,7 @@ func verifyRemoveSubnetValidatorTx( return nil, false, err } - fee, err := chainState.GetBaseFee() + fee, err := getBaseFee(chainState, backend.Config) if err != nil { return nil, false, err } @@ -364,7 +380,23 @@ func verifyAddDelegatorTx( return nil, err } - duration := tx.Validator.Duration() + var ( + currentTimestamp = chainState.GetTimestamp() + isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return nil, err + } + + var ( + endTime = tx.EndTime() + startTime = currentTimestamp + ) + if !isDurangoActive { + startTime = tx.StartTime() + } + duration := endTime.Sub(startTime) + switch { case duration < backend.Config.MinStakeDuration: // Ensure staking length is not too short @@ -387,16 +419,8 @@ func verifyAddDelegatorTx( return outs, nil } - currentTimestamp := chainState.GetTimestamp() - // Ensure the proposed validator starts after the current timestamp - validatorStartTime := tx.StartTime() - if !currentTimestamp.Before(validatorStartTime) { - return nil, fmt.Errorf( - "%w: %s >= %s", - ErrTimestampNotBeforeStartTime, - currentTimestamp, - validatorStartTime, - ) + if err := verifyStakerStartTime(isDurangoActive, currentTimestamp, startTime); err != nil { + return nil, err } primaryNetworkValidator, err := GetValidator(chainState, constants.PrimaryNetworkID, tx.Validator.NodeID) @@ -417,21 +441,22 @@ func verifyAddDelegatorTx( maximumWeight = safemath.Min(maximumWeight, backend.Config.MaxValidatorStake) } - txID := sTx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { - return nil, err - } - if !txs.BoundedBy( - newStaker.StartTime, - newStaker.EndTime, + startTime, + endTime, primaryNetworkValidator.StartTime, primaryNetworkValidator.EndTime, ) { return nil, ErrPeriodMismatch } - overDelegated, err := overDelegated(chainState, primaryNetworkValidator, maximumWeight, newStaker) + overDelegated, err := overDelegated( + chainState, + primaryNetworkValidator, + maximumWeight, + tx.Validator.Wght, + startTime, + endTime, + ) if err != nil { return nil, err } @@ -453,14 +478,9 @@ func verifyAddDelegatorTx( return nil, fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := currentTimestamp.Add(MaxFutureStartTime) - if validatorStartTime.After(maxStartTime) { - return nil, ErrFutureStakeTime - } - - return outs, nil + // verifyStakerStartsSoon is checked last to allow + // the verifier visitor to explicitly check for this error. + return outs, verifyStakerStartsSoon(isDurangoActive, currentTimestamp, startTime) } // verifyAddPermissionlessValidatorTx carries out the validation for an @@ -476,20 +496,26 @@ func verifyAddPermissionlessValidatorTx( return err } + var ( + currentTimestamp = chainState.GetTimestamp() + isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + if !backend.Bootstrapped.Get() { return nil } - currentTimestamp := chainState.GetTimestamp() - // Ensure the proposed validator starts after the current time - startTime := tx.StartTime() - if !currentTimestamp.Before(startTime) { - return fmt.Errorf( - "%w: %s >= %s", - ErrTimestampNotBeforeStartTime, - currentTimestamp, - startTime, - ) + startTime := currentTimestamp + if !isDurangoActive { + startTime = tx.StartTime() + } + duration := tx.EndTime().Sub(startTime) + + if err := verifyStakerStartTime(isDurangoActive, currentTimestamp, startTime); err != nil { + return err } validatorRules, err := getValidatorRules(backend, chainState, tx.Subnet) @@ -497,7 +523,6 @@ func verifyAddPermissionlessValidatorTx( return err } - duration := tx.Validator.Duration() stakedAssetID := tx.StakeOuts[0].AssetID() switch { case tx.Validator.Wght < validatorRules.minValidatorStake: @@ -550,7 +575,7 @@ func verifyAddPermissionlessValidatorTx( var txFee uint64 if tx.Subnet != constants.PrimaryNetworkID { - if err := verifySubnetValidatorPrimaryNetworkRequirements(chainState, tx.Validator); err != nil { + if err := verifySubnetValidatorPrimaryNetworkRequirements(isDurangoActive, chainState, tx.Validator); err != nil { return err } @@ -577,14 +602,9 @@ func verifyAddPermissionlessValidatorTx( return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := currentTimestamp.Add(MaxFutureStartTime) - if startTime.After(maxStartTime) { - return ErrFutureStakeTime - } - - return nil + // verifyStakerStartsSoon is checked last to allow + // the verifier visitor to explicitly check for this error. + return verifyStakerStartsSoon(isDurangoActive, currentTimestamp, startTime) } // verifyAddPermissionlessDelegatorTx carries out the validation for an @@ -600,19 +620,29 @@ func verifyAddPermissionlessDelegatorTx( return err } + var ( + currentTimestamp = chainState.GetTimestamp() + isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + if !backend.Bootstrapped.Get() { return nil } - currentTimestamp := chainState.GetTimestamp() - // Ensure the proposed validator starts after the current timestamp - startTime := tx.StartTime() - if !currentTimestamp.Before(startTime) { - return fmt.Errorf( - "chain timestamp (%s) not before validator's start time (%s)", - currentTimestamp, - startTime, - ) + var ( + endTime = tx.EndTime() + startTime = currentTimestamp + ) + if !isDurangoActive { + startTime = tx.StartTime() + } + duration := endTime.Sub(startTime) + + if err := verifyStakerStartTime(isDurangoActive, currentTimestamp, startTime); err != nil { + return err } delegatorRules, err := getDelegatorRules(backend, chainState, tx.Subnet) @@ -620,7 +650,6 @@ func verifyAddPermissionlessDelegatorTx( return err } - duration := tx.Validator.Duration() stakedAssetID := tx.StakeOuts[0].AssetID() switch { case tx.Validator.Wght < delegatorRules.minDelegatorStake: @@ -664,21 +693,22 @@ func verifyAddPermissionlessDelegatorTx( } maximumWeight = safemath.Min(maximumWeight, delegatorRules.maxValidatorStake) - txID := sTx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { - return err - } - if !txs.BoundedBy( - newStaker.StartTime, - newStaker.EndTime, + startTime, + endTime, validator.StartTime, validator.EndTime, ) { return ErrPeriodMismatch } - overDelegated, err := overDelegated(chainState, validator, maximumWeight, newStaker) + overDelegated, err := overDelegated( + chainState, + validator, + maximumWeight, + tx.Validator.Wght, + startTime, + endTime, + ) if err != nil { return err } @@ -721,14 +751,9 @@ func verifyAddPermissionlessDelegatorTx( return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := currentTimestamp.Add(MaxFutureStartTime) - if startTime.After(maxStartTime) { - return ErrFutureStakeTime - } - - return nil + // verifyStakerStartsSoon is checked last to allow + // the verifier visitor to explicitly check for this error. + return verifyStakerStartsSoon(isDurangoActive, currentTimestamp, startTime) } // Returns an error if the given tx is invalid. @@ -742,8 +767,8 @@ func verifyTransferSubnetOwnershipTx( sTx *txs.Tx, tx *txs.TransferSubnetOwnershipTx, ) error { - if !backend.Config.IsDActivated(chainState.GetTimestamp()) { - return ErrDUpgradeNotActive + if !backend.Config.IsDurangoActivated(chainState.GetTimestamp()) { + return ErrDurangoUpgradeNotActive } // Verify the tx is well-formed @@ -751,6 +776,10 @@ func verifyTransferSubnetOwnershipTx( return err } + if err := avax.VerifyMemoFieldLength(tx.Memo, true /*=isDurangoActive*/); err != nil { + return err + } + if !backend.Bootstrapped.Get() { // Not bootstrapped yet -- don't need to do full verification. return nil @@ -777,3 +806,36 @@ func verifyTransferSubnetOwnershipTx( return nil } + +// Ensure the proposed validator starts after the current time +func verifyStakerStartTime(isDurangoActive bool, chainTime, stakerTime time.Time) error { + // Pre Durango activation, start time must be after current chain time. + // Post Durango activation, start time is not validated + if isDurangoActive { + return nil + } + + if !chainTime.Before(stakerTime) { + return fmt.Errorf( + "%w: %s >= %s", + ErrTimestampNotBeforeStartTime, + chainTime, + stakerTime, + ) + } + return nil +} + +func verifyStakerStartsSoon(isDurangoActive bool, chainTime, stakerStartTime time.Time) error { + if isDurangoActive { + return nil + } + + // Make sure the tx doesn't start too far in the future. This is done last + // to allow the verifier visitor to explicitly check for this error. + maxStartTime := chainTime.Add(MaxFutureStartTime) + if stakerStartTime.After(maxStartTime) { + return ErrFutureStakeTime + } + return nil +} diff --git a/vms/platformvm/txs/executor/staker_tx_verification_helpers.go b/vms/platformvm/txs/executor/staker_tx_verification_helpers.go index 867e4c18e03c..0f0852ebca1c 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification_helpers.go +++ b/vms/platformvm/txs/executor/staker_tx_verification_helpers.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -161,13 +161,15 @@ func overDelegated( state state.Chain, validator *state.Staker, weightLimit uint64, - delegator *state.Staker, + delegatorWeight uint64, + delegatorStartTime time.Time, + delegatorEndTime time.Time, ) (bool, error) { - maxWeight, err := GetMaxWeight(state, validator, delegator.StartTime, delegator.EndTime) + maxWeight, err := GetMaxWeight(state, validator, delegatorStartTime, delegatorEndTime) if err != nil { return true, err } - newMaxWeight, err := math.Add64(maxWeight, delegator.Weight) + newMaxWeight, err := math.Add64(maxWeight, delegatorWeight) if err != nil { return true, err } diff --git a/vms/platformvm/txs/executor/staker_tx_verification_test.go b/vms/platformvm/txs/executor/staker_tx_verification_test.go index aa8953c8f22e..1431f32e56f5 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification_test.go +++ b/vms/platformvm/txs/executor/staker_tx_verification_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -14,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -27,6 +28,8 @@ import ( ) func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { + ctx := snowtest.Context(t, snowtest.PChainID) + type test struct { name string backendF func(*gomock.Controller) *Backend @@ -37,6 +40,12 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { } var ( + // in the following tests we set the fork time for forks we want active + // to activeForkTime, which is ensured to be before any other time related + // quantity (based on now) + activeForkTime = time.Unix(0, 0) + now = time.Now().Truncate(time.Second) // after activeForkTime + subnetID = ids.GenerateTestID() customAssetID = ids.GenerateTestID() unsignedTransformTx = &txs.TransformSubnetTx{ @@ -52,21 +61,24 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { Creds: []verify.Verifiable{}, } // This tx already passed syntactic verification. + startTime = now.Add(time.Second) + endTime = startTime.Add(time.Second * time.Duration(unsignedTransformTx.MinStakeDuration)) verifiedTx = txs.AddPermissionlessValidatorTx{ BaseTx: txs.BaseTx{ SyntacticallyVerified: true, BaseTx: avax.BaseTx{ - NetworkID: 1, - BlockchainID: ids.GenerateTestID(), + NetworkID: ctx.NetworkID, + BlockchainID: ctx.ChainID, Outs: []*avax.TransferableOutput{}, Ins: []*avax.TransferableInput{}, }, }, Validator: txs.Validator{ NodeID: ids.GenerateTestNodeID(), - Start: 1, - End: 1 + uint64(unsignedTransformTx.MinStakeDuration), - Wght: unsignedTransformTx.MinValidatorStake, + // Note: [Start] is not set here as it will be ignored + // Post-Durango in favor of the current chain time + End: uint64(endTime.Unix()), + Wght: unsignedTransformTx.MinValidatorStake, }, Subnet: subnetID, StakeOuts: []*avax.TransferableOutput{ @@ -98,7 +110,10 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { name: "fail syntactic verification", backendF: func(*gomock.Controller) *Backend { return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, } }, stateF: func(*gomock.Controller) state.Chain { @@ -116,18 +131,23 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { name: "not bootstrapped", backendF: func(*gomock.Controller) *Backend { return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: &utils.Atomic[bool]{}, } }, stateF: func(ctrl *gomock.Controller) state.Chain { - return nil + mockState := state.NewMockChain(ctrl) + mockState.EXPECT().GetTimestamp().Return(now) // chain time is after Durango fork activation since now.After(activeForkTime) + return mockState }, sTxF: func() *txs.Tx { return &verifiedSignedTx }, txF: func() *txs.AddPermissionlessValidatorTx { - return nil + return &txs.AddPermissionlessValidatorTx{} }, expectedErr: nil, }, @@ -137,7 +157,11 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + CortinaTime: activeForkTime, + DurangoTime: mockable.MaxTime, + }, Bootstrapped: bootstrapped, } }, @@ -160,13 +184,16 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - state.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + state.EXPECT().GetTimestamp().Return(now) // chain time is after latest fork activation since now.After(activeForkTime) state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, @@ -186,13 +213,16 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - state.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + state.EXPECT().GetTimestamp().Return(now) // chain time is after latest fork activation since now.After(activeForkTime) state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, @@ -212,13 +242,16 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - state.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + state.EXPECT().GetTimestamp().Return(now) // chain time is after latest fork activation since now.After(activeForkTime) state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, @@ -239,13 +272,16 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - state.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + state.EXPECT().GetTimestamp().Return(now) // chain time is after latest fork activation since now.After(activeForkTime) state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, @@ -256,9 +292,9 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { tx := verifiedTx // Note that this copies [verifiedTx] tx.Validator.Wght = unsignedTransformTx.MaxValidatorStake tx.DelegationShares = unsignedTransformTx.MinDelegationFee + // Note the duration is 1 less than the minimum - tx.Validator.Start = 1 - tx.Validator.End = uint64(unsignedTransformTx.MinStakeDuration) + tx.Validator.End = tx.Validator.Start + uint64(unsignedTransformTx.MinStakeDuration) - 1 return &tx }, expectedErr: ErrStakeTooShort, @@ -269,13 +305,16 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - state.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + state.EXPECT().GetTimestamp().Return(time.Unix(1, 0)) // chain time is after fork activation since time.Unix(1, 0).After(activeForkTime) state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, @@ -286,9 +325,9 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { tx := verifiedTx // Note that this copies [verifiedTx] tx.Validator.Wght = unsignedTransformTx.MaxValidatorStake tx.DelegationShares = unsignedTransformTx.MinDelegationFee + // Note the duration is more than the maximum - tx.Validator.Start = 1 - tx.Validator.End = 2 + uint64(unsignedTransformTx.MaxStakeDuration) + tx.Validator.End = uint64(unsignedTransformTx.MaxStakeDuration) + 2 return &tx }, expectedErr: ErrStakeTooLong, @@ -299,15 +338,18 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - state.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) - state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) - return state + mockState := state.NewMockChain(ctrl) + mockState.EXPECT().GetTimestamp().Return(now) // chain time is after latest fork activation since now.After(activeForkTime) + mockState.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) + return mockState }, sTxF: func() *txs.Tx { return &verifiedSignedTx @@ -331,17 +373,20 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - state.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) - state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) + mockState := state.NewMockChain(ctrl) + mockState.EXPECT().GetTimestamp().Return(now) // chain time is after latest fork activation since now.After(activeForkTime) + mockState.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) // State says validator exists - state.EXPECT().GetCurrentValidator(subnetID, verifiedTx.NodeID()).Return(nil, nil) - return state + mockState.EXPECT().GetCurrentValidator(subnetID, verifiedTx.NodeID()).Return(nil, nil) + return mockState }, sTxF: func() *txs.Tx { return &verifiedSignedTx @@ -357,20 +402,22 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { mockState := state.NewMockChain(ctrl) - mockState.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + mockState.EXPECT().GetTimestamp().Return(now).Times(2) // chain time is after latest fork activation since now.After(activeForkTime) mockState.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) mockState.EXPECT().GetCurrentValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) mockState.EXPECT().GetPendingValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) // Validator time isn't subset of primary network validator time primaryNetworkVdr := &state.Staker{ - StartTime: verifiedTx.StartTime().Add(time.Second), - EndTime: verifiedTx.EndTime(), + EndTime: verifiedTx.EndTime().Add(-1 * time.Second), } mockState.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, verifiedTx.NodeID()).Return(primaryNetworkVdr, nil) return mockState @@ -403,20 +450,20 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { FlowChecker: flowChecker, Config: &config.Config{ AddSubnetValidatorFee: 1, + DurangoTime: activeForkTime, // activate latest fork, }, - Ctx: snow.DefaultContextTest(), + Ctx: ctx, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { mockState := state.NewMockChain(ctrl) - mockState.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + mockState.EXPECT().GetTimestamp().Return(now).Times(2) // chain time is after latest fork activation since now.After(activeForkTime) mockState.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) mockState.EXPECT().GetCurrentValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) mockState.EXPECT().GetPendingValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) primaryNetworkVdr := &state.Staker{ - StartTime: verifiedTx.StartTime(), - EndTime: verifiedTx.EndTime(), + EndTime: mockable.MaxTime, } mockState.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, verifiedTx.NodeID()).Return(primaryNetworkVdr, nil) return mockState @@ -448,15 +495,17 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { return &Backend{ FlowChecker: flowChecker, Config: &config.Config{ + CortinaTime: activeForkTime, + DurangoTime: mockable.MaxTime, AddSubnetValidatorFee: 1, }, - Ctx: snow.DefaultContextTest(), + Ctx: ctx, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { mockState := state.NewMockChain(ctrl) - mockState.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + mockState.EXPECT().GetTimestamp().Return(now).Times(2) // chain time is Cortina fork activation since now.After(activeForkTime) mockState.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) mockState.EXPECT().GetCurrentValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) mockState.EXPECT().GetPendingValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) @@ -473,7 +522,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { txF: func() *txs.AddPermissionlessValidatorTx { // Note this copies [verifiedTx] tx := verifiedTx - tx.Validator.Start = uint64(MaxFutureStartTime.Seconds()) + 1 + tx.Validator.Start = uint64(now.Add(MaxFutureStartTime).Add(time.Second).Unix()) tx.Validator.End = tx.Validator.Start + uint64(unsignedTransformTx.MinStakeDuration) return &tx }, @@ -499,20 +548,20 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { FlowChecker: flowChecker, Config: &config.Config{ AddSubnetValidatorFee: 1, + DurangoTime: activeForkTime, // activate latest fork, }, - Ctx: snow.DefaultContextTest(), + Ctx: ctx, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { mockState := state.NewMockChain(ctrl) - mockState.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + mockState.EXPECT().GetTimestamp().Return(now).Times(2) // chain time is after Durango fork activation since now.After(activeForkTime) mockState.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) mockState.EXPECT().GetCurrentValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) mockState.EXPECT().GetPendingValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) primaryNetworkVdr := &state.Staker{ - StartTime: time.Unix(0, 0), - EndTime: mockable.MaxTime, + EndTime: mockable.MaxTime, } mockState.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, verifiedTx.NodeID()).Return(primaryNetworkVdr, nil) return mockState diff --git a/vms/platformvm/txs/executor/standard_tx_executor.go b/vms/platformvm/txs/executor/standard_tx_executor.go index 85cab6d192ac..16d8746f6d2e 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/vms/platformvm/txs/executor/standard_tx_executor.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -34,8 +34,9 @@ import ( var ( _ txs.Visitor = (*StandardTxExecutor)(nil) - errEmptyNodeID = errors.New("validator nodeID cannot be empty") - errMaxStakeDurationTooLarge = errors.New("max stake duration must be less than or equal to the global max stake duration") + errEmptyNodeID = errors.New("validator nodeID cannot be empty") + errMaxStakeDurationTooLarge = errors.New("max stake duration must be less than or equal to the global max stake duration") + errMissingStartTimePreDurango = errors.New("staker transactions must have a StartTime pre-Durango") ) type StandardTxExecutor struct { @@ -63,14 +64,21 @@ func (e *StandardTxExecutor) CreateChainTx(tx *txs.CreateChainTx) error { return err } + var ( + currentTimestamp = e.State.GetTimestamp() + isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + baseTxCreds, err := verifyPoASubnetAuthorization(e.Backend, e.State, e.Tx, tx.SubnetID, tx.SubnetAuth) if err != nil { return err } // Verify the flowcheck - timestamp := e.State.GetTimestamp() - createBlockchainTxFee := e.Config.GetCreateBlockchainTxFee(timestamp) + createBlockchainTxFee := e.Config.GetCreateBlockchainTxFee(currentTimestamp) if err := e.FlowChecker.VerifySpend( tx, e.State, @@ -107,9 +115,16 @@ func (e *StandardTxExecutor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { return err } + var ( + currentTimestamp = e.State.GetTimestamp() + isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + // Verify the flowcheck - timestamp := e.State.GetTimestamp() - createSubnetTxFee := e.Config.GetCreateSubnetTxFee(timestamp) + createSubnetTxFee := e.Config.GetCreateSubnetTxFee(currentTimestamp) if err := e.FlowChecker.VerifySpend( tx, e.State, @@ -140,6 +155,14 @@ func (e *StandardTxExecutor) ImportTx(tx *txs.ImportTx) error { return err } + var ( + currentTimestamp = e.State.GetTimestamp() + isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + e.Inputs = set.NewSet[ids.ID](len(tx.ImportedInputs)) utxoIDs := make([][]byte, len(tx.ImportedInputs)) for i, in := range tx.ImportedInputs { @@ -181,7 +204,7 @@ func (e *StandardTxExecutor) ImportTx(tx *txs.ImportTx) error { copy(ins, tx.Ins) copy(ins[len(tx.Ins):], tx.ImportedInputs) - fee, err := e.State.GetBaseFee() + fee, err := getBaseFee(e.State, e.Backend.Config) if err != nil { return err } @@ -224,6 +247,14 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { return err } + var ( + currentTimestamp = e.State.GetTimestamp() + isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + outs := make([]*avax.TransferableOutput, len(tx.Outs)+len(tx.ExportedOutputs)) copy(outs, tx.Outs) copy(outs[len(tx.Outs):], tx.ExportedOutputs) @@ -234,7 +265,7 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { } } - fee, err := e.State.GetBaseFee() + fee, err := getBaseFee(e.State, e.Backend.Config) if err != nil { return err } @@ -274,7 +305,7 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { Out: out.Out, } - utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) if err != nil { return fmt.Errorf("failed to marshal UTXO: %w", err) } @@ -311,13 +342,11 @@ func (e *StandardTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { return err } - txID := e.Tx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { + if err := e.putStaker(tx); err != nil { return err } - e.State.PutPendingValidator(newStaker) + txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) @@ -342,16 +371,13 @@ func (e *StandardTxExecutor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) return err } - txID := e.Tx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { + if err := e.putStaker(tx); err != nil { return err } - e.State.PutPendingValidator(newStaker) + txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) - return nil } @@ -365,16 +391,13 @@ func (e *StandardTxExecutor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { return err } - txID := e.Tx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { + if err := e.putStaker(tx); err != nil { return err } - e.State.PutPendingDelegator(newStaker) + txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) - return nil } @@ -414,6 +437,14 @@ func (e *StandardTxExecutor) TransformSubnetTx(tx *txs.TransformSubnetTx) error return err } + var ( + currentTimestamp = e.State.GetTimestamp() + isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + // Note: math.MaxInt32 * time.Second < math.MaxInt64 - so this can never // overflow. if time.Duration(tx.MaxStakeDuration)*time.Second > e.Backend.Config.MaxStakeDuration { @@ -465,13 +496,11 @@ func (e *StandardTxExecutor) AddPermissionlessValidatorTx(tx *txs.AddPermissionl return err } - txID := e.Tx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { + if err := e.putStaker(tx); err != nil { return err } - e.State.PutPendingValidator(newStaker) + txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) @@ -499,16 +528,13 @@ func (e *StandardTxExecutor) AddPermissionlessDelegatorTx(tx *txs.AddPermissionl return err } - txID := e.Tx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { + if err := e.putStaker(tx); err != nil { return err } - e.State.PutPendingDelegator(newStaker) + txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) - return nil } @@ -532,13 +558,12 @@ func (e *StandardTxExecutor) TransferSubnetOwnershipTx(tx *txs.TransferSubnetOwn txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) - return nil } func (e *StandardTxExecutor) BaseTx(tx *txs.BaseTx) error { - if !e.Backend.Config.IsDActivated(e.State.GetTimestamp()) { - return ErrDUpgradeNotActive + if !e.Backend.Config.IsDurangoActivated(e.State.GetTimestamp()) { + return ErrDurangoUpgradeNotActive } // Verify the tx is well-formed @@ -546,6 +571,10 @@ func (e *StandardTxExecutor) BaseTx(tx *txs.BaseTx) error { return err } + if err := avax.VerifyMemoFieldLength(tx.Memo, true /*=isDurangoActive*/); err != nil { + return err + } + // Verify the flowcheck if err := e.FlowChecker.VerifySpend( tx, @@ -560,9 +589,78 @@ func (e *StandardTxExecutor) BaseTx(tx *txs.BaseTx) error { return err } + txID := e.Tx.ID() // Consume the UTXOS avax.Consume(e.State, tx.Ins) // Produce the UTXOS - avax.Produce(e.State, e.Tx.ID(), tx.Outs) + avax.Produce(e.State, txID, tx.Outs) + return nil +} + +// Creates the staker as defined in [stakerTx] and adds it to [e.State]. +func (e *StandardTxExecutor) putStaker(stakerTx txs.Staker) error { + var ( + chainTime = e.State.GetTimestamp() + txID = e.Tx.ID() + staker *state.Staker + err error + ) + + if !e.Config.IsDurangoActivated(chainTime) { + // Pre-Durango, stakers set a future [StartTime] and are added to the + // pending staker set. They are promoted to the current staker set once + // the chain time reaches [StartTime]. + scheduledStakerTx, ok := stakerTx.(txs.ScheduledStaker) + if !ok { + return fmt.Errorf("%w: %T", errMissingStartTimePreDurango, stakerTx) + } + staker, err = state.NewPendingStaker(txID, scheduledStakerTx) + } else { + // Only calculate the potentialReward for permissionless stakers. + // Recall that we only need to check if this is a permissioned + // validator as there are no permissioned delegators + var potentialReward uint64 + if !stakerTx.CurrentPriority().IsPermissionedValidator() { + subnetID := stakerTx.SubnetID() + currentSupply, err := e.State.GetCurrentSupply(subnetID) + if err != nil { + return err + } + + rewards, err := GetRewardsCalculator(e.Backend, e.State, subnetID) + if err != nil { + return err + } + + // Post-Durango, stakers are immediately added to the current staker + // set. Their [StartTime] is the current chain time. + stakeDuration := stakerTx.EndTime().Sub(chainTime) + potentialReward = rewards.Calculate( + stakeDuration, + stakerTx.Weight(), + currentSupply, + ) + + e.State.SetCurrentSupply(subnetID, currentSupply+potentialReward) + } + + staker, err = state.NewCurrentStaker(txID, stakerTx, chainTime, potentialReward) + } + if err != nil { + return err + } + + switch priority := staker.Priority; { + case priority.IsCurrentValidator(): + e.State.PutCurrentValidator(staker) + case priority.IsCurrentDelegator(): + e.State.PutCurrentDelegator(staker) + case priority.IsPendingValidator(): + e.State.PutPendingValidator(staker) + case priority.IsPendingDelegator(): + e.State.PutPendingDelegator(staker) + default: + return fmt.Errorf("staker %s, unexpected priority %d", staker.TxID, priority) + } return nil } diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index 7a4d6227519b..e8e285cc606c 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -29,18 +29,22 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/types" ) // This tests that the math performed during TransformSubnetTx execution can @@ -51,14 +55,12 @@ var errTest = errors.New("non-nil error") func TestStandardTxExecutorAddValidatorTxEmptyID(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() chainTime := env.state.GetTimestamp() - startTime := defaultGenesisTime.Add(1 * time.Second) + startTime := defaultValidateStartTime.Add(1 * time.Second) tests := []struct { banffTime time.Time @@ -109,30 +111,32 @@ func TestStandardTxExecutorAddValidatorTxEmptyID(t *testing.T) { func TestStandardTxExecutorAddDelegator(t *testing.T) { dummyHeight := uint64(1) rewardAddress := preFundedKeys[0].PublicKey().Address() - nodeID := ids.NodeID(rewardAddress) + nodeID := genesisNodeIDs[0] newValidatorID := ids.GenerateTestNodeID() - newValidatorStartTime := uint64(defaultValidateStartTime.Add(5 * time.Second).Unix()) - newValidatorEndTime := uint64(defaultValidateEndTime.Add(-5 * time.Second).Unix()) + newValidatorStartTime := defaultValidateStartTime.Add(5 * time.Second) + newValidatorEndTime := defaultValidateEndTime.Add(-5 * time.Second) // [addMinStakeValidator] adds a new validator to the primary network's // pending validator set with the minimum staking amount addMinStakeValidator := func(target *environment) { tx, err := target.txBuilder.NewAddValidatorTx( - target.config.MinValidatorStake, // stake amount - newValidatorStartTime, // start time - newValidatorEndTime, // end time - newValidatorID, // node ID - rewardAddress, // Reward Address - reward.PercentDenominator, // Shares + target.config.MinValidatorStake, // stake amount + uint64(newValidatorStartTime.Unix()), // start time + uint64(newValidatorEndTime.Unix()), // end time + newValidatorID, // node ID + rewardAddress, // Reward Address + reward.PercentDenominator, // Shares []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, ) require.NoError(t, err) + addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddValidatorTx), + addValTx, + newValidatorStartTime, 0, ) require.NoError(t, err) @@ -147,20 +151,22 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { // pending validator set with the maximum staking amount addMaxStakeValidator := func(target *environment) { tx, err := target.txBuilder.NewAddValidatorTx( - target.config.MaxValidatorStake, // stake amount - newValidatorStartTime, // start time - newValidatorEndTime, // end time - newValidatorID, // node ID - rewardAddress, // Reward Address - reward.PercentDenominator, // Shared + target.config.MaxValidatorStake, // stake amount + uint64(newValidatorStartTime.Unix()), // start time + uint64(newValidatorEndTime.Unix()), // end time + newValidatorID, // node ID + rewardAddress, // Reward Address + reward.PercentDenominator, // Shared []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, ) require.NoError(t, err) + addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddValidatorTx), + addValTx, + newValidatorStartTime, 0, ) require.NoError(t, err) @@ -171,67 +177,63 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { require.NoError(t, target.state.Commit()) } - dummyH := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + dummyH := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) currentTimestamp := dummyH.state.GetTimestamp() type test struct { description string stakeAmount uint64 - startTime uint64 - endTime uint64 + startTime time.Time + endTime time.Time nodeID ids.NodeID rewardAddress ids.ShortID feeKeys []*secp256k1.PrivateKey setup func(*environment) AP3Time time.Time expectedExecutionErr error - expectedMempoolErr error } tests := []test{ { description: "validator stops validating earlier than delegator", stakeAmount: dummyH.config.MinDelegatorStake, - startTime: uint64(defaultValidateStartTime.Unix()) + 1, - endTime: uint64(defaultValidateEndTime.Unix()) + 1, + startTime: defaultValidateStartTime.Add(time.Second), + endTime: defaultValidateEndTime.Add(time.Second), nodeID: nodeID, rewardAddress: rewardAddress, feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrPeriodMismatch, - expectedMempoolErr: ErrPeriodMismatch, }, { description: fmt.Sprintf("delegator should not be added more than (%s) in the future", MaxFutureStartTime), stakeAmount: dummyH.config.MinDelegatorStake, - startTime: uint64(currentTimestamp.Add(MaxFutureStartTime + time.Second).Unix()), - endTime: uint64(currentTimestamp.Add(MaxFutureStartTime + defaultMinStakingDuration + time.Second).Unix()), + startTime: currentTimestamp.Add(MaxFutureStartTime + time.Second), + endTime: currentTimestamp.Add(MaxFutureStartTime + defaultMinStakingDuration + time.Second), nodeID: nodeID, rewardAddress: rewardAddress, feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrFutureStakeTime, - expectedMempoolErr: nil, }, { description: "validator not in the current or pending validator sets", stakeAmount: dummyH.config.MinDelegatorStake, - startTime: uint64(defaultValidateStartTime.Add(5 * time.Second).Unix()), - endTime: uint64(defaultValidateEndTime.Add(-5 * time.Second).Unix()), + startTime: defaultValidateStartTime.Add(5 * time.Second), + endTime: defaultValidateEndTime.Add(-5 * time.Second), nodeID: newValidatorID, rewardAddress: rewardAddress, feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, expectedExecutionErr: database.ErrNotFound, - expectedMempoolErr: database.ErrNotFound, }, { description: "delegator starts before validator", stakeAmount: dummyH.config.MinDelegatorStake, - startTime: newValidatorStartTime - 1, // start validating subnet before primary network + startTime: newValidatorStartTime.Add(-1 * time.Second), // start validating subnet before primary network endTime: newValidatorEndTime, nodeID: newValidatorID, rewardAddress: rewardAddress, @@ -239,20 +241,18 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { setup: addMinStakeValidator, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrPeriodMismatch, - expectedMempoolErr: ErrPeriodMismatch, }, { description: "delegator stops before validator", stakeAmount: dummyH.config.MinDelegatorStake, startTime: newValidatorStartTime, - endTime: newValidatorEndTime + 1, // stop validating subnet after stopping validating primary network + endTime: newValidatorEndTime.Add(time.Second), // stop validating subnet after stopping validating primary network nodeID: newValidatorID, rewardAddress: rewardAddress, feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMinStakeValidator, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrPeriodMismatch, - expectedMempoolErr: ErrPeriodMismatch, }, { description: "valid", @@ -265,29 +265,27 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { setup: addMinStakeValidator, AP3Time: defaultGenesisTime, expectedExecutionErr: nil, - expectedMempoolErr: nil, }, { description: "starts delegating at current timestamp", stakeAmount: dummyH.config.MinDelegatorStake, // weight - startTime: uint64(currentTimestamp.Unix()), // start time - endTime: uint64(defaultValidateEndTime.Unix()), // end time + startTime: currentTimestamp, // start time + endTime: defaultValidateEndTime, // end time nodeID: nodeID, // node ID rewardAddress: rewardAddress, // Reward Address feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, // tx fee payer setup: nil, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrTimestampNotBeforeStartTime, - expectedMempoolErr: ErrTimestampNotBeforeStartTime, }, { description: "tx fee paying key has no funds", - stakeAmount: dummyH.config.MinDelegatorStake, // weight - startTime: uint64(defaultValidateStartTime.Unix()) + 1, // start time - endTime: uint64(defaultValidateEndTime.Unix()), // end time - nodeID: nodeID, // node ID - rewardAddress: rewardAddress, // Reward Address - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[1]}, // tx fee payer + stakeAmount: dummyH.config.MinDelegatorStake, // weight + startTime: defaultValidateStartTime.Add(time.Second), // start time + endTime: defaultValidateEndTime, // end time + nodeID: nodeID, // node ID + rewardAddress: rewardAddress, // Reward Address + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[1]}, // tx fee payer setup: func(target *environment) { // Remove all UTXOs owned by keys[1] utxoIDs, err := target.state.UTXOIDs( preFundedKeys[1].PublicKey().Address().Bytes(), @@ -303,7 +301,6 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { }, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrFlowCheckFailed, - expectedMempoolErr: ErrFlowCheckFailed, }, { description: "over delegation before AP3", @@ -316,7 +313,6 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { setup: addMaxStakeValidator, AP3Time: defaultValidateEndTime, expectedExecutionErr: nil, - expectedMempoolErr: nil, }, { description: "over delegation after AP3", @@ -329,23 +325,19 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { setup: addMaxStakeValidator, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrOverDelegated, - expectedMempoolErr: ErrOverDelegated, }, } for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { require := require.New(t) - freshTH := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + freshTH := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) freshTH.config.ApricotPhase3Time = tt.AP3Time - defer func() { - require.NoError(shutdownEnvironment(freshTH)) - }() tx, err := freshTH.txBuilder.NewAddDelegatorTx( tt.stakeAmount, - tt.startTime, - tt.endTime, + uint64(tt.startTime.Unix()), + uint64(tt.endTime.Unix()), tt.nodeID, tt.rewardAddress, tt.feeKeys, @@ -369,29 +361,17 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { } err = tx.Unsigned.Visit(&executor) require.ErrorIs(err, tt.expectedExecutionErr) - - mempoolExecutor := MempoolTxVerifier{ - Backend: &freshTH.backend, - ParentID: lastAcceptedID, - StateVersions: freshTH, - Tx: tx, - } - err = tx.Unsigned.Visit(&mempoolExecutor) - require.ErrorIs(err, tt.expectedMempoolErr) }) } } -func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { +func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() - nodeID := preFundedKeys[0].PublicKey().Address() - env.config.BanffTime = env.state.GetTimestamp() + nodeID := genesisNodeIDs[0] { // Case: Proposed validator currently validating primary network @@ -402,7 +382,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, uint64(startTime.Unix()), uint64(defaultValidateEndTime.Unix())+1, - ids.NodeID(nodeID), + nodeID, testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr @@ -430,7 +410,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, uint64(defaultValidateStartTime.Unix()+1), uint64(defaultValidateEndTime.Unix()), - ids.NodeID(nodeID), + nodeID, testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr @@ -449,12 +429,8 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { } // Add a validator to pending validator set of primary network - key, err := secp256k1.NewPrivateKey() - require.NoError(err) - - pendingDSValidatorID := ids.NodeID(key.PublicKey().Address()) - - // starts validating primary network 10 seconds after genesis + // Starts validating primary network 10 seconds after genesis + pendingDSValidatorID := ids.GenerateTestNodeID() dsStartTime := defaultGenesisTime.Add(10 * time.Second) dsEndTime := dsStartTime.Add(5 * defaultMinStakingDuration) @@ -463,7 +439,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { uint64(dsStartTime.Unix()), // start time uint64(dsEndTime.Unix()), // end time pendingDSValidatorID, // node ID - nodeID, // reward address + ids.GenerateTestShortID(), // reward address reward.PercentDenominator, // shares []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, @@ -495,9 +471,11 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { require.ErrorIs(err, ErrNotValidator) } + addValTx := addDSTx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( addDSTx.ID(), - addDSTx.Unsigned.(*txs.AddValidatorTx), + addValTx, + dsStartTime, 0, ) require.NoError(err) @@ -596,8 +574,8 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(defaultMinStakingDuration).Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) @@ -624,16 +602,18 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time - ids.NodeID(nodeID), // node ID + nodeID, // node ID testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, ) require.NoError(err) + addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( subnetTx.ID(), - subnetTx.Unsigned.(*txs.AddSubnetValidatorTx), + addSubnetValTx, + defaultValidateStartTime, 0, ) require.NoError(err) @@ -650,7 +630,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(startTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time - ids.NodeID(nodeID), // node ID + nodeID, // node ID testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr @@ -680,8 +660,8 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(startTime.Unix()), // start time uint64(startTime.Add(defaultMinStakingDuration).Unix())+1, // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, ids.ShortEmpty, // change addr ) @@ -713,8 +693,8 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(startTime.Unix()), // start time uint64(startTime.Add(defaultMinStakingDuration).Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]}, ids.ShortEmpty, // change addr ) @@ -746,8 +726,8 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(startTime.Unix()), // start time uint64(startTime.Add(defaultMinStakingDuration).Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], preFundedKeys[1]}, ids.ShortEmpty, // change addr ) @@ -778,16 +758,18 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(startTime.Unix())+1, // start time uint64(startTime.Add(defaultMinStakingDuration).Unix())+1, // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) require.NoError(err) + addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( subnetTx.ID(), - subnetTx.Unsigned.(*txs.AddSubnetValidatorTx), + addSubnetValTx, + defaultValidateStartTime, 0, ) require.NoError(err) @@ -810,18 +792,14 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { } } -func TestStandardTxExecutorAddValidator(t *testing.T) { +func TestBanffStandardTxExecutorAddValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() nodeID := ids.GenerateTestNodeID() - env.config.BanffTime = env.state.GetTimestamp() - { // Case: Validator's start time too early tx, err := env.txBuilder.NewAddValidatorTx( @@ -876,7 +854,7 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { { // Case: Validator in current validator set of primary network - startTime := defaultGenesisTime.Add(1 * time.Second) + startTime := defaultValidateStartTime.Add(1 * time.Second) tx, err := env.txBuilder.NewAddValidatorTx( env.config.MinValidatorStake, // stake amount uint64(startTime.Unix()), // start time @@ -889,9 +867,11 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { ) require.NoError(err) + addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddValidatorTx), + addValTx, + startTime, 0, ) require.NoError(err) @@ -913,7 +893,7 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { { // Case: Validator in pending validator set of primary network - startTime := defaultGenesisTime.Add(1 * time.Second) + startTime := defaultValidateStartTime.Add(1 * time.Second) tx, err := env.txBuilder.NewAddValidatorTx( env.config.MinValidatorStake, // stake amount uint64(startTime.Unix()), // start time @@ -949,7 +929,7 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { { // Case: Validator doesn't have enough tokens to cover stake amount - startTime := defaultGenesisTime.Add(1 * time.Second) + startTime := defaultValidateStartTime.Add(1 * time.Second) tx, err := env.txBuilder.NewAddValidatorTx( // create the tx env.config.MinValidatorStake, uint64(startTime.Unix()), @@ -983,7 +963,640 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { } } +// Verifies that the Memo field is required to be empty post-Durango +func TestDurangoMemoField(t *testing.T) { + type test struct { + name string + setupTest func(*environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) + } + + tests := []test{ + { + name: "AddValidatorTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + ins, unstakedOuts, stakedOuts, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.AddPrimaryNetworkValidatorFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + var ( + nodeID = ids.GenerateTestNodeID() + chainTime = env.state.GetTimestamp() + endTime = chainTime.Add(defaultMaxStakingDuration) + ) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.AddValidatorTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Validator: txs.Validator{ + NodeID: nodeID, + Start: 0, + End: uint64(endTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + StakeOuts: stakedOuts, + RewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + DelegationShares: reward.PercentDenominator, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "AddSubnetValidatorTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + var primaryValidator *state.Staker + it, err := env.state.GetCurrentStakerIterator() + require.NoError(t, err) + for it.Next() { + staker := it.Value() + if staker.Priority != txs.PrimaryNetworkValidatorCurrentPriority { + continue + } + primaryValidator = staker + break + } + it.Release() + + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.TxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + subnetAuth, subnetSigners, err := env.utxosHandler.Authorize(env.state, testSubnet1.TxID, preFundedKeys) + require.NoError(t, err) + signers = append(signers, subnetSigners) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.AddSubnetValidatorTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + SubnetValidator: txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: primaryValidator.NodeID, + End: uint64(primaryValidator.EndTime.Unix()), + Wght: defaultMinValidatorStake, + }, + Subnet: testSubnet1.TxID, + }, + SubnetAuth: subnetAuth, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "AddDelegatorTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + var primaryValidator *state.Staker + it, err := env.state.GetCurrentStakerIterator() + require.NoError(t, err) + for it.Next() { + staker := it.Value() + if staker.Priority != txs.PrimaryNetworkValidatorCurrentPriority { + continue + } + primaryValidator = staker + break + } + it.Release() + + ins, unstakedOuts, stakedOuts, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.AddPrimaryNetworkDelegatorFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.AddDelegatorTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Validator: txs.Validator{ + NodeID: primaryValidator.NodeID, + End: uint64(primaryValidator.EndTime.Unix()), + Wght: defaultMinValidatorStake, + }, + StakeOuts: stakedOuts, + DelegationRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "CreateChainTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + chainTime := env.state.GetTimestamp() + createBlockchainTxFee := env.config.GetCreateBlockchainTxFee(chainTime) + + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + createBlockchainTxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + subnetAuth, subnetSigners, err := env.utxosHandler.Authorize(env.state, testSubnet1.TxID, preFundedKeys) + require.NoError(t, err) + signers = append(signers, subnetSigners) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.CreateChainTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + SubnetID: testSubnet1.TxID, + ChainName: "aaa", + VMID: ids.GenerateTestID(), + FxIDs: []ids.ID{}, + GenesisData: []byte{}, + SubnetAuth: subnetAuth, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "CreateSubnetTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + chainTime := env.state.GetTimestamp() + createSubnetTxFee := env.config.GetCreateSubnetTxFee(chainTime) + + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + createSubnetTxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.CreateSubnetTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Owner: &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "ImportTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + // Skip shared memory checks + env.backend.Bootstrapped.Set(false) + + utxoID := avax.UTXOID{ + TxID: ids.Empty.Prefix(1), + OutputIndex: 1, + } + amount := uint64(50000) + recipientKey := preFundedKeys[1] + + utxo := &avax.UTXO{ + UTXOID: utxoID, + Asset: avax.Asset{ID: env.ctx.AVAXAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amount, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{recipientKey.PublicKey().Address()}, + }, + }, + } + + signers := [][]*secp256k1.PrivateKey{{recipientKey}} + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.ImportTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + }}, + SourceChain: env.ctx.XChainID, + ImportedInputs: []*avax.TransferableInput{ + { + UTXOID: utxo.UTXOID, + Asset: utxo.Asset, + In: &secp256k1fx.TransferInput{ + Amt: env.config.TxFee, + }, + }, + }, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "ExportTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + amount := units.Avax + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + amount, + env.config.TxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.ExportTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + DestinationChain: env.ctx.XChainID, + ExportedOutputs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: env.ctx.AVAXAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amount, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + }, + }}, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "RemoveSubnetValidatorTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + var primaryValidator *state.Staker + it, err := env.state.GetCurrentStakerIterator() + require.NoError(t, err) + for it.Next() { + staker := it.Value() + if staker.Priority != txs.PrimaryNetworkValidatorCurrentPriority { + continue + } + primaryValidator = staker + break + } + it.Release() + + endTime := primaryValidator.EndTime + subnetValTx, err := env.txBuilder.NewAddSubnetValidatorTx( + defaultWeight, + 0, + uint64(endTime.Unix()), + primaryValidator.NodeID, + testSubnet1.ID(), + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + ids.ShortEmpty, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + require.NoError(t, subnetValTx.Unsigned.Visit(&StandardTxExecutor{ + Backend: &env.backend, + State: onAcceptState, + Tx: subnetValTx, + })) + + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.TxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + subnetAuth, subnetSigners, err := env.utxosHandler.Authorize(env.state, testSubnet1.TxID, preFundedKeys) + require.NoError(t, err) + signers = append(signers, subnetSigners) + + tx := &txs.RemoveSubnetValidatorTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Subnet: testSubnet1.ID(), + NodeID: primaryValidator.NodeID, + SubnetAuth: subnetAuth, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "TransformSubnetTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.TxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + subnetAuth, subnetSigners, err := env.utxosHandler.Authorize(env.state, testSubnet1.TxID, preFundedKeys) + require.NoError(t, err) + signers = append(signers, subnetSigners) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.TransformSubnetTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Subnet: testSubnet1.TxID, + AssetID: ids.GenerateTestID(), + InitialSupply: 10, + MaximumSupply: 10, + MinConsumptionRate: 0, + MaxConsumptionRate: reward.PercentDenominator, + MinValidatorStake: 2, + MaxValidatorStake: 10, + MinStakeDuration: 1, + MaxStakeDuration: 2, + MinDelegationFee: reward.PercentDenominator, + MinDelegatorStake: 1, + MaxValidatorWeightFactor: 1, + UptimeRequirement: reward.PercentDenominator, + SubnetAuth: subnetAuth, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "AddPermissionlessValidatorTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + ins, unstakedOuts, stakedOuts, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.AddPrimaryNetworkValidatorFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + sk, err := bls.NewSecretKey() + require.NoError(t, err) + + var ( + nodeID = ids.GenerateTestNodeID() + chainTime = env.state.GetTimestamp() + endTime = chainTime.Add(defaultMaxStakingDuration) + ) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.AddPermissionlessValidatorTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Validator: txs.Validator{ + NodeID: nodeID, + End: uint64(endTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + Signer: signer.NewProofOfPossession(sk), + StakeOuts: stakedOuts, + ValidatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + DelegatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + DelegationShares: reward.PercentDenominator, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "AddPermissionlessDelegatorTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + var primaryValidator *state.Staker + it, err := env.state.GetCurrentStakerIterator() + require.NoError(t, err) + for it.Next() { + staker := it.Value() + if staker.Priority != txs.PrimaryNetworkValidatorCurrentPriority { + continue + } + primaryValidator = staker + break + } + it.Release() + + ins, unstakedOuts, stakedOuts, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.AddPrimaryNetworkDelegatorFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.AddPermissionlessDelegatorTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Validator: txs.Validator{ + NodeID: primaryValidator.NodeID, + End: uint64(primaryValidator.EndTime.Unix()), + Wght: defaultMinValidatorStake, + }, + StakeOuts: stakedOuts, + DelegationRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "TransferSubnetOwnershipTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.TxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + subnetAuth, subnetSigners, err := env.utxosHandler.Authorize(env.state, testSubnet1.TxID, preFundedKeys) + require.NoError(t, err) + signers = append(signers, subnetSigners) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.TransferSubnetOwnershipTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Subnet: testSubnet1.TxID, + SubnetAuth: subnetAuth, + Owner: &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "BaseTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.TxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t, true /*=postBanff*/, true /*=postCortina*/, true /*=postDurango*/) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + utx, signers, onAcceptState, memo := tt.setupTest(env) + + // Populated memo field should error + *memo = []byte{'m', 'e', 'm', 'o'} + tx, err := txs.NewSigned(utx, txs.Codec, signers) + require.NoError(err) + + err = tx.Unsigned.Visit(&StandardTxExecutor{ + Backend: &env.backend, + State: onAcceptState, + Tx: tx, + }) + require.ErrorIs(err, avax.ErrMemoTooLarge) + + // Empty memo field should not error + *memo = []byte{} + tx, err = txs.NewSigned(utx, txs.Codec, signers) + require.NoError(err) + + require.NoError(tx.Unsigned.Visit(&StandardTxExecutor{ + Backend: &env.backend, + State: onAcceptState, + Tx: tx, + })) + }) + } +} + // Returns a RemoveSubnetValidatorTx that passes syntactic verification. +// Memo field is empty as required post Durango activation func newRemoveSubnetValidatorTx(t *testing.T) (*txs.RemoveSubnetValidatorTx, *txs.Tx) { t.Helper() @@ -1026,7 +1639,6 @@ func newRemoveSubnetValidatorTx(t *testing.T) (*txs.RemoveSubnetValidatorTx, *tx }, }, }, - Memo: []byte("hi"), }, }, Subnet: ids.GenerateTestID(), @@ -1046,13 +1658,13 @@ func newRemoveSubnetValidatorTx(t *testing.T) (*txs.RemoveSubnetValidatorTx, *tx // mock implementations that can be used in tests // for verifying RemoveSubnetValidatorTx. type removeSubnetValidatorTxVerifyEnv struct { - banffTime time.Time - fx *fx.MockFx - flowChecker *utxo.MockVerifier - unsignedTx *txs.RemoveSubnetValidatorTx - tx *txs.Tx - state *state.MockDiff - staker *state.Staker + latestForkTime time.Time + fx *fx.MockFx + flowChecker *utxo.MockVerifier + unsignedTx *txs.RemoveSubnetValidatorTx + tx *txs.Tx + state *state.MockDiff + staker *state.Staker } // Returns mock implementations that can be used in tests @@ -1066,12 +1678,12 @@ func newValidRemoveSubnetValidatorTxVerifyEnv(t *testing.T, ctrl *gomock.Control unsignedTx, tx := newRemoveSubnetValidatorTx(t) mockState := state.NewMockDiff(ctrl) return removeSubnetValidatorTxVerifyEnv{ - banffTime: now, - fx: mockFx, - flowChecker: mockFlowChecker, - unsignedTx: unsignedTx, - tx: tx, - state: mockState, + latestForkTime: now, + fx: mockFx, + flowChecker: mockFlowChecker, + unsignedTx: unsignedTx, + tx: tx, + state: mockState, staker: &state.Staker{ TxID: ids.GenerateTestID(), NodeID: ids.GenerateTestNodeID(), @@ -1094,6 +1706,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) // Set dependency expectations. + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil).Times(1) subnetOwner := fx.NewMockOwner(ctrl) env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil).Times(1) @@ -1108,7 +1721,9 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1133,7 +1748,9 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1153,12 +1770,15 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(nil, database.ErrNotFound) env.state.EXPECT().GetPendingValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(nil, database.ErrNotFound) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1182,11 +1802,14 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { staker.Priority = txs.SubnetPermissionlessValidatorCurrentPriority // Set dependency expectations. + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(&staker, nil).Times(1) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1208,11 +1831,14 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { // Remove credentials env.tx.Creds = nil env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1232,12 +1858,15 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1257,6 +1886,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) subnetOwner := fx.NewMockOwner(ctrl) env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil) @@ -1264,7 +1894,9 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1284,6 +1916,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) subnetOwner := fx.NewMockOwner(ctrl) env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil) @@ -1295,7 +1928,9 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1325,6 +1960,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { } // Returns a TransformSubnetTx that passes syntactic verification. +// Memo field is empty as required post Durango activation func newTransformSubnetTx(t *testing.T) (*txs.TransformSubnetTx, *txs.Tx) { t.Helper() @@ -1367,7 +2003,6 @@ func newTransformSubnetTx(t *testing.T) (*txs.TransformSubnetTx, *txs.Tx) { }, }, }, - Memo: []byte("hi"), }, }, Subnet: ids.GenerateTestID(), @@ -1399,13 +2034,13 @@ func newTransformSubnetTx(t *testing.T) (*txs.TransformSubnetTx, *txs.Tx) { // mock implementations that can be used in tests // for verifying TransformSubnetTx. type transformSubnetTxVerifyEnv struct { - banffTime time.Time - fx *fx.MockFx - flowChecker *utxo.MockVerifier - unsignedTx *txs.TransformSubnetTx - tx *txs.Tx - state *state.MockDiff - staker *state.Staker + latestForkTime time.Time + fx *fx.MockFx + flowChecker *utxo.MockVerifier + unsignedTx *txs.TransformSubnetTx + tx *txs.Tx + state *state.MockDiff + staker *state.Staker } // Returns mock implementations that can be used in tests @@ -1419,12 +2054,12 @@ func newValidTransformSubnetTxVerifyEnv(t *testing.T, ctrl *gomock.Controller) t unsignedTx, tx := newTransformSubnetTx(t) mockState := state.NewMockDiff(ctrl) return transformSubnetTxVerifyEnv{ - banffTime: now, - fx: mockFx, - flowChecker: mockFlowChecker, - unsignedTx: unsignedTx, - tx: tx, - state: mockState, + latestForkTime: now, + fx: mockFx, + flowChecker: mockFlowChecker, + unsignedTx: unsignedTx, + tx: tx, + state: mockState, staker: &state.Staker{ TxID: ids.GenerateTestID(), NodeID: ids.GenerateTestNodeID(), @@ -1450,7 +2085,9 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1471,10 +2108,13 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { env := newValidTransformSubnetTxVerifyEnv(t, ctrl) env.unsignedTx.MaxStakeDuration = math.MaxUint32 env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1496,10 +2136,13 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { // Remove credentials env.tx.Creds = nil env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, MaxStakeDuration: math.MaxInt64, }, Bootstrapped: &utils.Atomic[bool]{}, @@ -1521,6 +2164,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { env := newValidTransformSubnetTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) subnetOwner := fx.NewMockOwner(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil) env.state.EXPECT().GetSubnetTransformation(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound).Times(1) env.fx.EXPECT().VerifyPermission(gomock.Any(), env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil) @@ -1530,7 +2174,9 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, MaxStakeDuration: math.MaxInt64, }, Bootstrapped: &utils.Atomic[bool]{}, @@ -1553,6 +2199,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { // Set dependency expectations. subnetOwner := fx.NewMockOwner(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil).Times(1) env.state.EXPECT().GetSubnetTransformation(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound).Times(1) env.fx.EXPECT().VerifyPermission(env.unsignedTx, env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil).Times(1) @@ -1566,7 +2213,9 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, MaxStakeDuration: math.MaxInt64, }, Bootstrapped: &utils.Atomic[bool]{}, diff --git a/vms/platformvm/txs/executor/state_changes.go b/vms/platformvm/txs/executor/state_changes.go index 3d7145f4f4df..96d8bb3bc6dc 100644 --- a/vms/platformvm/txs/executor/state_changes.go +++ b/vms/platformvm/txs/executor/state_changes.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -20,6 +20,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" @@ -67,109 +68,87 @@ func VerifyNewChainTime( return nil } -type StateChanges interface { - Apply(onAccept state.Diff) - Len() int -} - -type stateChanges struct { - updatedSupplies map[ids.ID]uint64 - currentValidatorsToAdd []*state.Staker - currentDelegatorsToAdd []*state.Staker - pendingValidatorsToRemove []*state.Staker - pendingDelegatorsToRemove []*state.Staker - currentValidatorsToRemove []*state.Staker - - caminoStateChanges -} - -func (s *stateChanges) Apply(stateDiff state.Diff) { - for subnetID, supply := range s.updatedSupplies { - stateDiff.SetCurrentSupply(subnetID, supply) +func NextBlockTime(state state.Chain, clk *mockable.Clock) (time.Time, bool, error) { + var ( + timestamp = clk.Time() + parentTime = state.GetTimestamp() + ) + if parentTime.After(timestamp) { + timestamp = parentTime } + // [timestamp] = max(now, parentTime) - for _, currentValidatorToAdd := range s.currentValidatorsToAdd { - stateDiff.PutCurrentValidator(currentValidatorToAdd) - } - for _, pendingValidatorToRemove := range s.pendingValidatorsToRemove { - stateDiff.DeletePendingValidator(pendingValidatorToRemove) - } - for _, currentDelegatorToAdd := range s.currentDelegatorsToAdd { - stateDiff.PutCurrentDelegator(currentDelegatorToAdd) - } - for _, pendingDelegatorToRemove := range s.pendingDelegatorsToRemove { - stateDiff.DeletePendingDelegator(pendingDelegatorToRemove) - } - for _, currentValidatorToRemove := range s.currentValidatorsToRemove { - stateDiff.DeleteCurrentValidator(currentValidatorToRemove) + nextStakerChangeTime, err := GetNextStakerChangeTime(state) + if err != nil { + return time.Time{}, false, fmt.Errorf("failed getting next staker change time: %w", err) } - s.caminoStateChanges.Apply(stateDiff) -} - -func (s *stateChanges) Len() int { - return len(s.currentValidatorsToAdd) + len(s.currentDelegatorsToAdd) + - len(s.pendingValidatorsToRemove) + len(s.pendingDelegatorsToRemove) + - len(s.currentValidatorsToRemove) + s.caminoStateChanges.Len() + // timeWasCapped means that [timestamp] was reduced to [nextStakerChangeTime] + timeWasCapped := !timestamp.Before(nextStakerChangeTime) + if timeWasCapped { + timestamp = nextStakerChangeTime + } + // [timestamp] = min(max(now, parentTime), nextStakerChangeTime) + return timestamp, timeWasCapped, nil } -// AdvanceTimeTo does not modify [parentState]. -// Instead it returns all the StateChanges caused by advancing the chain time to -// the [newChainTime]. +// AdvanceTimeTo applies all state changes to [parentState] resulting from +// advancing the chain time to [newChainTime]. +// Returns true iff the validator set changed. func AdvanceTimeTo( backend *Backend, parentState state.Chain, newChainTime time.Time, -) (StateChanges, error) { - pendingStakerIterator, err := parentState.GetPendingStakerIterator() +) (bool, error) { + // We promote pending stakers to current stakers first and remove + // completed stakers from the current staker set. We assume that any + // promoted staker will not immediately be removed from the current staker + // set. This is guaranteed by the following invariants. + // + // Invariant: MinStakeDuration > 0 => guarantees [StartTime] != [EndTime] + // Invariant: [newChainTime] <= nextStakerChangeTime. + + changes, err := state.NewDiffOn(parentState) if err != nil { - return nil, err + return false, err } - defer pendingStakerIterator.Release() - changes := &stateChanges{ - updatedSupplies: make(map[ids.ID]uint64), + pendingStakerIterator, err := parentState.GetPendingStakerIterator() + if err != nil { + return false, err } + defer pendingStakerIterator.Release() - // Add to the staker set any pending stakers whose start time is at or - // before the new timestamp - - // Note: we process pending stakers ready to be promoted to current ones and - // then we process current stakers to be demoted out of stakers set. It is - // guaranteed that no promoted stakers would be demoted immediately. A - // failure of this invariant would cause a staker to be added to - // StateChanges and be persisted among current stakers even if it already - // expired. The following invariants ensure this does not happens: - // Invariant: minimum stake duration is > 0, so staker.StartTime != staker.EndTime. - // Invariant: [newChainTime] does not skip stakers set change times. - + var changed bool + // Promote any pending stakers to current if [StartTime] <= [newChainTime]. for pendingStakerIterator.Next() { stakerToRemove := pendingStakerIterator.Value() if stakerToRemove.StartTime.After(newChainTime) { break } + if stakerToRemove.EndTime.Equal(stakerToRemove.StartTime) { + continue + } stakerToAdd := *stakerToRemove stakerToAdd.NextTime = stakerToRemove.EndTime stakerToAdd.Priority = txs.PendingToCurrentPriorities[stakerToRemove.Priority] if stakerToRemove.Priority == txs.SubnetPermissionedValidatorPendingPriority { - changes.currentValidatorsToAdd = append(changes.currentValidatorsToAdd, &stakerToAdd) - changes.pendingValidatorsToRemove = append(changes.pendingValidatorsToRemove, stakerToRemove) + changes.PutCurrentValidator(&stakerToAdd) + changes.DeletePendingValidator(stakerToRemove) + changed = true continue } - supply, ok := changes.updatedSupplies[stakerToRemove.SubnetID] - if !ok { - supply, err = parentState.GetCurrentSupply(stakerToRemove.SubnetID) - if err != nil { - return nil, err - } + supply, err := changes.GetCurrentSupply(stakerToRemove.SubnetID) + if err != nil { + return false, err } rewards, err := GetRewardsCalculator(backend, parentState, stakerToRemove.SubnetID) if err != nil { - return nil, err + return false, err } potentialReward := rewards.Calculate( @@ -181,25 +160,28 @@ func AdvanceTimeTo( // Invariant: [rewards.Calculate] can never return a [potentialReward] // such that [supply + potentialReward > maximumSupply]. - changes.updatedSupplies[stakerToRemove.SubnetID] = supply + potentialReward + changes.SetCurrentSupply(stakerToRemove.SubnetID, supply+potentialReward) switch stakerToRemove.Priority { case txs.PrimaryNetworkValidatorPendingPriority, txs.SubnetPermissionlessValidatorPendingPriority: - changes.currentValidatorsToAdd = append(changes.currentValidatorsToAdd, &stakerToAdd) - changes.pendingValidatorsToRemove = append(changes.pendingValidatorsToRemove, stakerToRemove) + changes.PutCurrentValidator(&stakerToAdd) + changes.DeletePendingValidator(stakerToRemove) case txs.PrimaryNetworkDelegatorApricotPendingPriority, txs.PrimaryNetworkDelegatorBanffPendingPriority, txs.SubnetPermissionlessDelegatorPendingPriority: - changes.currentDelegatorsToAdd = append(changes.currentDelegatorsToAdd, &stakerToAdd) - changes.pendingDelegatorsToRemove = append(changes.pendingDelegatorsToRemove, stakerToRemove) + changes.PutCurrentDelegator(&stakerToAdd) + changes.DeletePendingDelegator(stakerToRemove) default: - return nil, fmt.Errorf("expected staker priority got %d", stakerToRemove.Priority) + return false, fmt.Errorf("expected staker priority got %d", stakerToRemove.Priority) } + + changed = true } + // Remove any current stakers whose [EndTime] <= [newChainTime]. currentStakerIterator, err := parentState.GetCurrentStakerIterator() if err != nil { - return nil, err + return false, err } defer currentStakerIterator.Release() @@ -217,14 +199,16 @@ func AdvanceTimeTo( break } - changes.currentValidatorsToRemove = append(changes.currentValidatorsToRemove, stakerToRemove) + changes.DeleteCurrentValidator(stakerToRemove) + changed = true } - if err := caminoAdvanceTimeTo(backend, parentState, newChainTime, changes); err != nil { - return nil, err + if err := changes.Apply(parentState); err != nil { + return false, err } - return changes, nil + parentState.SetTimestamp(newChainTime) + return changed, nil } func GetRewardsCalculator( diff --git a/vms/platformvm/txs/executor/subnet_tx_verification.go b/vms/platformvm/txs/executor/subnet_tx_verification.go index bf384be9fa89..f1a75f6f2f3f 100644 --- a/vms/platformvm/txs/executor/subnet_tx_verification.go +++ b/vms/platformvm/txs/executor/subnet_tx_verification.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor diff --git a/vms/platformvm/txs/executor/tx_mempool_verifier.go b/vms/platformvm/txs/executor/tx_mempool_verifier.go deleted file mode 100644 index 49be6d382bfd..000000000000 --- a/vms/platformvm/txs/executor/tx_mempool_verifier.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright (C) 2022-2024, Chain4Travel AG. All rights reserved. -// -// This file is a derived work, based on ava-labs code whose -// original notices appear below. -// -// It is distributed under the same license conditions as the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package executor - -import ( - "errors" - "fmt" - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/platformvm/state" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -var _ txs.Visitor = (*MempoolTxVerifier)(nil) - -type MempoolTxVerifier struct { - *Backend - ParentID ids.ID - StateVersions state.Versions - Tx *txs.Tx -} - -func (*MempoolTxVerifier) AdvanceTimeTx(*txs.AdvanceTimeTx) error { - return ErrWrongTxType -} - -func (*MempoolTxVerifier) RewardValidatorTx(*txs.RewardValidatorTx) error { - return ErrWrongTxType -} - -func (v *MempoolTxVerifier) AddValidatorTx(tx *txs.AddValidatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddDelegatorTx(tx *txs.AddDelegatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) CreateChainTx(tx *txs.CreateChainTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) CreateSubnetTx(tx *txs.CreateSubnetTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) ImportTx(tx *txs.ImportTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) ExportTx(tx *txs.ExportTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) TransformSubnetTx(tx *txs.TransformSubnetTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddPermissionlessValidatorTx(tx *txs.AddPermissionlessValidatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddPermissionlessDelegatorTx(tx *txs.AddPermissionlessDelegatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) TransferSubnetOwnershipTx(tx *txs.TransferSubnetOwnershipTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) BaseTx(tx *txs.BaseTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) standardTx(tx txs.UnsignedTx) error { - baseState, err := v.standardBaseState() - if err != nil { - return err - } - - executor := CaminoStandardTxExecutor{ - StandardTxExecutor{ - Backend: v.Backend, - State: baseState, - Tx: v.Tx, - }, - } - err = tx.Visit(&executor) - // We ignore [errFutureStakeTime] here because the time will be advanced - // when this transaction is issued. - if errors.Is(err, ErrFutureStakeTime) { - return nil - } - return err -} - -// Upon Banff activation, txs are not verified against current chain time -// but against the block timestamp. [baseTime] calculates -// the right timestamp to be used to mempool tx verification -func (v *MempoolTxVerifier) standardBaseState() (state.Diff, error) { - state, err := state.NewDiff(v.ParentID, v.StateVersions) - if err != nil { - return nil, err - } - - nextBlkTime, err := v.nextBlockTime(state) - if err != nil { - return nil, err - } - - if !v.Backend.Config.IsBanffActivated(nextBlkTime) { - // next tx would be included into an Apricot block - // so we verify it against current chain state - return state, nil - } - - // next tx would be included into a Banff block - // so we verify it against duly updated chain state - changes, err := AdvanceTimeTo(v.Backend, state, nextBlkTime) - if err != nil { - return nil, err - } - changes.Apply(state) - state.SetTimestamp(nextBlkTime) - - return state, nil -} - -func (v *MempoolTxVerifier) nextBlockTime(state state.Diff) (time.Time, error) { - var ( - parentTime = state.GetTimestamp() - nextBlkTime = v.Clk.Time() - ) - if parentTime.After(nextBlkTime) { - nextBlkTime = parentTime - } - nextStakerChangeTime, err := GetNextStakerChangeTime(state) - if err != nil { - return time.Time{}, fmt.Errorf("could not calculate next staker change time: %w", err) - } - if !nextBlkTime.Before(nextStakerChangeTime) { - nextBlkTime = nextStakerChangeTime - } - return nextBlkTime, nil -} diff --git a/vms/platformvm/txs/export_tx.go b/vms/platformvm/txs/export_tx.go index b124263aae12..19dc3a076f7a 100644 --- a/vms/platformvm/txs/export_tx.go +++ b/vms/platformvm/txs/export_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/platformvm/txs/import_tx.go b/vms/platformvm/txs/import_tx.go index 121986991012..563242dad34a 100644 --- a/vms/platformvm/txs/import_tx.go +++ b/vms/platformvm/txs/import_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/platformvm/txs/mempool/camino_visitor.go b/vms/platformvm/txs/mempool/camino_visitor.go deleted file mode 100644 index 5e87ab9ed296..000000000000 --- a/vms/platformvm/txs/mempool/camino_visitor.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (C) 2022-2024, Chain4Travel AG. All rights reserved. -// See the file LICENSE for licensing terms. - -package mempool - -import ( - "errors" - - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -var errUnsupportedTxType = errors.New("unsupported tx type") - -// Issuer - -func (i *issuer) AddressStateTx(*txs.AddressStateTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) DepositTx(*txs.DepositTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) UnlockDepositTx(*txs.UnlockDepositTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) ClaimTx(*txs.ClaimTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) RegisterNodeTx(*txs.RegisterNodeTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) RewardsImportTx(*txs.RewardsImportTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) MultisigAliasTx(*txs.MultisigAliasTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) AddDepositOfferTx(*txs.AddDepositOfferTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) AddProposalTx(*txs.AddProposalTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) AddVoteTx(*txs.AddVoteTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (*issuer) FinishProposalsTx(*txs.FinishProposalsTx) error { - return errUnsupportedTxType -} - -// Remover - -func (r *remover) AddressStateTx(*txs.AddressStateTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) DepositTx(*txs.DepositTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) UnlockDepositTx(*txs.UnlockDepositTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) ClaimTx(*txs.ClaimTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) RegisterNodeTx(*txs.RegisterNodeTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) RewardsImportTx(*txs.RewardsImportTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) MultisigAliasTx(*txs.MultisigAliasTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) AddDepositOfferTx(*txs.AddDepositOfferTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) AddProposalTx(*txs.AddProposalTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) AddVoteTx(*txs.AddVoteTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (*remover) FinishProposalsTx(*txs.FinishProposalsTx) error { - // this tx is never in mempool - return nil -} diff --git a/vms/platformvm/txs/mempool/issuer.go b/vms/platformvm/txs/mempool/issuer.go deleted file mode 100644 index b56c10190cf8..000000000000 --- a/vms/platformvm/txs/mempool/issuer.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package mempool - -import ( - "errors" - - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -var ( - _ txs.Visitor = (*issuer)(nil) - - errCantIssueAdvanceTimeTx = errors.New("can not issue an advance time tx") - errCantIssueRewardValidatorTx = errors.New("can not issue a reward validator tx") -) - -type issuer struct { - m *mempool - tx *txs.Tx -} - -func (*issuer) AdvanceTimeTx(*txs.AdvanceTimeTx) error { - return errCantIssueAdvanceTimeTx -} - -func (*issuer) RewardValidatorTx(*txs.RewardValidatorTx) error { - return errCantIssueRewardValidatorTx -} - -func (i *issuer) AddValidatorTx(*txs.AddValidatorTx) error { - i.m.addStakerTx(i.tx) - return nil -} - -func (i *issuer) AddSubnetValidatorTx(*txs.AddSubnetValidatorTx) error { - i.m.addStakerTx(i.tx) - return nil -} - -func (i *issuer) AddDelegatorTx(*txs.AddDelegatorTx) error { - i.m.addStakerTx(i.tx) - return nil -} - -func (i *issuer) RemoveSubnetValidatorTx(*txs.RemoveSubnetValidatorTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) CreateChainTx(*txs.CreateChainTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) CreateSubnetTx(*txs.CreateSubnetTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) ImportTx(*txs.ImportTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) ExportTx(*txs.ExportTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) TransformSubnetTx(*txs.TransformSubnetTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) TransferSubnetOwnershipTx(*txs.TransferSubnetOwnershipTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) BaseTx(*txs.BaseTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) AddPermissionlessValidatorTx(*txs.AddPermissionlessValidatorTx) error { - i.m.addStakerTx(i.tx) - return nil -} - -func (i *issuer) AddPermissionlessDelegatorTx(*txs.AddPermissionlessDelegatorTx) error { - i.m.addStakerTx(i.tx) - return nil -} diff --git a/vms/platformvm/txs/mempool/mempool.go b/vms/platformvm/txs/mempool/mempool.go index 7d1ba9b609bd..34ee9c283745 100644 --- a/vms/platformvm/txs/mempool/mempool.go +++ b/vms/platformvm/txs/mempool/mempool.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mempool @@ -6,27 +6,28 @@ package mempool import ( "errors" "fmt" + "sync" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/setmap" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/txheap" ) const ( - // targetTxSize is the maximum number of bytes a transaction can use to be + // MaxTxSize is the maximum number of bytes a transaction can use to be // allowed into the mempool. - targetTxSize = 64 * units.KiB + MaxTxSize = 64 * units.KiB // droppedTxIDsCacheSize is the maximum number of dropped txIDs to cache droppedTxIDsCacheSize = 64 - initialConsumedUTXOsSize = 512 - // maxMempoolSize is the maximum number of bytes allowed in the mempool maxMempoolSize = 64 * units.MiB ) @@ -34,247 +35,202 @@ const ( var ( _ Mempool = (*mempool)(nil) - errMempoolFull = errors.New("mempool is full") + ErrDuplicateTx = errors.New("duplicate tx") + ErrTxTooLarge = errors.New("tx too large") + ErrMempoolFull = errors.New("mempool is full") + ErrConflictsWithOtherTx = errors.New("tx conflicts with other tx") + ErrCantIssueAdvanceTimeTx = errors.New("can not issue an advance time tx") + ErrCantIssueRewardValidatorTx = errors.New("can not issue a reward validator tx") ) -type BlockTimer interface { - // ResetBlockTimer schedules a timer to notify the consensus engine once - // there is a block ready to be built. If a block is ready to be built when - // this function is called, the engine will be notified directly. - ResetBlockTimer() -} - type Mempool interface { - // we may want to be able to stop valid transactions - // from entering the mempool, e.g. during blocks creation - EnableAdding() - DisableAdding() - Add(tx *txs.Tx) error - Has(txID ids.ID) bool - Get(txID ids.ID) *txs.Tx - Remove(txs []*txs.Tx) - - // Following Banff activation, all mempool transactions, - // (both decision and staker) are included into Standard blocks. - // HasTxs allow to check for availability of any mempool transaction. - HasTxs() bool - // PeekTxs returns the next txs for Banff blocks - // up to maxTxsBytes without removing them from the mempool. - PeekTxs(maxTxsBytes int) []*txs.Tx - - HasStakerTx() bool - // PeekStakerTx returns the next stakerTx without removing it from mempool. - // It returns nil if !HasStakerTx(). - // It's guaranteed that the returned tx, if not nil, is a StakerTx. - PeekStakerTx() *txs.Tx + Get(txID ids.ID) (*txs.Tx, bool) + // Remove [txs] and any conflicts of [txs] from the mempool. + Remove(txs ...*txs.Tx) + + // Peek returns the oldest tx in the mempool. + Peek() (tx *txs.Tx, exists bool) + + // Iterate iterates over the txs until f returns false + Iterate(f func(tx *txs.Tx) bool) + + // RequestBuildBlock notifies the consensus engine that a block should be + // built. If [emptyBlockPermitted] is true, the notification will be sent + // regardless of whether there are no transactions in the mempool. If not, + // a notification will only be sent if there is at least one transaction in + // the mempool. + RequestBuildBlock(emptyBlockPermitted bool) // Note: dropped txs are added to droppedTxIDs but are not evicted from // unissued decision/staker txs. This allows previously dropped txs to be // possibly reissued. MarkDropped(txID ids.ID, reason error) GetDropReason(txID ids.ID) error + + // Len returns the number of txs in the mempool. + Len() int } // Transactions from clients that have not yet been put into blocks and added to // consensus type mempool struct { - // If true, drop transactions added to the mempool via Add. - dropIncoming bool + lock sync.RWMutex + unissuedTxs linkedhashmap.LinkedHashmap[ids.ID, *txs.Tx] + consumedUTXOs *setmap.SetMap[ids.ID, ids.ID] // TxID -> Consumed UTXOs + bytesAvailable int + droppedTxIDs *cache.LRU[ids.ID, error] // TxID -> verification error - bytesAvailableMetric prometheus.Gauge - bytesAvailable int - - unissuedDecisionTxs txheap.Heap - unissuedStakerTxs txheap.Heap - - // Key: Tx ID - // Value: Verification error - droppedTxIDs *cache.LRU[ids.ID, error] - - consumedUTXOs set.Set[ids.ID] + toEngine chan<- common.Message - blkTimer BlockTimer + numTxs prometheus.Gauge + bytesAvailableMetric prometheus.Gauge } -func NewMempool( +func New( namespace string, registerer prometheus.Registerer, - blkTimer BlockTimer, + toEngine chan<- common.Message, ) (Mempool, error) { - bytesAvailableMetric := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "bytes_available", - Help: "Number of bytes of space currently available in the mempool", - }) - if err := registerer.Register(bytesAvailableMetric); err != nil { - return nil, err + m := &mempool{ + unissuedTxs: linkedhashmap.New[ids.ID, *txs.Tx](), + consumedUTXOs: setmap.New[ids.ID, ids.ID](), + bytesAvailable: maxMempoolSize, + droppedTxIDs: &cache.LRU[ids.ID, error]{Size: droppedTxIDsCacheSize}, + toEngine: toEngine, + numTxs: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "txs", + Help: "Number of decision/staker transactions in the mempool", + }), + bytesAvailableMetric: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "bytes_available", + Help: "Number of bytes of space currently available in the mempool", + }), } + m.bytesAvailableMetric.Set(maxMempoolSize) - unissuedDecisionTxs, err := txheap.NewWithMetrics( - txheap.NewByAge(), - fmt.Sprintf("%s_decision_txs", namespace), - registerer, + err := utils.Err( + registerer.Register(m.numTxs), + registerer.Register(m.bytesAvailableMetric), ) - if err != nil { - return nil, err - } - - unissuedStakerTxs, err := txheap.NewWithMetrics( - txheap.NewByStartTime(), - fmt.Sprintf("%s_staker_txs", namespace), - registerer, - ) - if err != nil { - return nil, err - } - - bytesAvailableMetric.Set(maxMempoolSize) - return &mempool{ - bytesAvailableMetric: bytesAvailableMetric, - bytesAvailable: maxMempoolSize, - unissuedDecisionTxs: unissuedDecisionTxs, - unissuedStakerTxs: unissuedStakerTxs, - droppedTxIDs: &cache.LRU[ids.ID, error]{Size: droppedTxIDsCacheSize}, - consumedUTXOs: set.NewSet[ids.ID](initialConsumedUTXOsSize), - dropIncoming: false, // enable tx adding by default - blkTimer: blkTimer, - }, nil -} - -func (m *mempool) EnableAdding() { - m.dropIncoming = false -} - -func (m *mempool) DisableAdding() { - m.dropIncoming = true + return m, err } func (m *mempool) Add(tx *txs.Tx) error { - if m.dropIncoming { - return fmt.Errorf("tx %s not added because mempool is closed", tx.ID()) + m.lock.Lock() + defer m.lock.Unlock() + + switch tx.Unsigned.(type) { + case *txs.AdvanceTimeTx: + return ErrCantIssueAdvanceTimeTx + case *txs.RewardValidatorTx: + return ErrCantIssueRewardValidatorTx + default: } // Note: a previously dropped tx can be re-added txID := tx.ID() - if m.Has(txID) { - return fmt.Errorf("duplicate tx %s", txID) + if _, ok := m.unissuedTxs.Get(txID); ok { + return fmt.Errorf("%w: %s", ErrDuplicateTx, txID) } - txBytes := tx.Bytes() - if len(txBytes) > targetTxSize { - return fmt.Errorf("tx %s size (%d) > target size (%d)", txID, len(txBytes), targetTxSize) + txSize := len(tx.Bytes()) + if txSize > MaxTxSize { + return fmt.Errorf("%w: %s size (%d) > max size (%d)", + ErrTxTooLarge, + txID, + txSize, + MaxTxSize, + ) } - if len(txBytes) > m.bytesAvailable { - return fmt.Errorf("%w, tx %s size (%d) exceeds available space (%d)", - errMempoolFull, + if txSize > m.bytesAvailable { + return fmt.Errorf("%w: %s size (%d) > available space (%d)", + ErrMempoolFull, txID, - len(txBytes), + txSize, m.bytesAvailable, ) } inputs := tx.Unsigned.InputIDs() - if m.consumedUTXOs.Overlaps(inputs) { - return fmt.Errorf("tx %s conflicts with a transaction in the mempool", txID) + if m.consumedUTXOs.HasOverlap(inputs) { + return fmt.Errorf("%w: %s", ErrConflictsWithOtherTx, txID) } - if err := tx.Unsigned.Visit(&issuer{ - m: m, - tx: tx, - }); err != nil { - return err - } + m.unissuedTxs.Put(txID, tx) + m.numTxs.Inc() + m.bytesAvailable -= txSize + m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) // Mark these UTXOs as consumed in the mempool - m.consumedUTXOs.Union(inputs) + m.consumedUTXOs.Put(txID, inputs) // An explicitly added tx must not be marked as dropped. m.droppedTxIDs.Evict(txID) - m.blkTimer.ResetBlockTimer() return nil } -func (m *mempool) Has(txID ids.ID) bool { - return m.Get(txID) != nil +func (m *mempool) Get(txID ids.ID) (*txs.Tx, bool) { + return m.unissuedTxs.Get(txID) } -func (m *mempool) Get(txID ids.ID) *txs.Tx { - if tx := m.unissuedDecisionTxs.Get(txID); tx != nil { - return tx - } - return m.unissuedStakerTxs.Get(txID) -} +func (m *mempool) Remove(txs ...*txs.Tx) { + m.lock.Lock() + defer m.lock.Unlock() -func (m *mempool) Remove(txsToRemove []*txs.Tx) { - remover := &remover{ - m: m, - } - - for _, tx := range txsToRemove { - remover.tx = tx - _ = tx.Unsigned.Visit(remover) - } -} - -func (m *mempool) HasTxs() bool { - return m.unissuedDecisionTxs.Len() > 0 || m.unissuedStakerTxs.Len() > 0 -} - -func (m *mempool) PeekTxs(maxTxsBytes int) []*txs.Tx { - txs := m.unissuedDecisionTxs.List() - txs = append(txs, m.unissuedStakerTxs.List()...) + for _, tx := range txs { + txID := tx.ID() + // If the transaction is in the mempool, remove it. + if _, ok := m.consumedUTXOs.DeleteKey(txID); ok { + m.unissuedTxs.Delete(txID) + m.bytesAvailable += len(tx.Bytes()) + continue + } - size := 0 - for i, tx := range txs { - size += len(tx.Bytes()) - if size > maxTxsBytes { - return txs[:i] + // If the transaction isn't in the mempool, remove any conflicts it has. + inputs := tx.Unsigned.InputIDs() + for _, removed := range m.consumedUTXOs.DeleteOverlapping(inputs) { + tx, _ := m.unissuedTxs.Get(removed.Key) + m.unissuedTxs.Delete(removed.Key) + m.bytesAvailable += len(tx.Bytes()) } } - return txs -} - -func (m *mempool) addDecisionTx(tx *txs.Tx) { - m.unissuedDecisionTxs.Add(tx) - m.register(tx) + m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) + m.numTxs.Set(float64(m.unissuedTxs.Len())) } -func (m *mempool) addStakerTx(tx *txs.Tx) { - m.unissuedStakerTxs.Add(tx) - m.register(tx) +func (m *mempool) Peek() (*txs.Tx, bool) { + _, tx, exists := m.unissuedTxs.Oldest() + return tx, exists } -func (m *mempool) HasStakerTx() bool { - return m.unissuedStakerTxs.Len() > 0 -} +func (m *mempool) Iterate(f func(tx *txs.Tx) bool) { + m.lock.RLock() + defer m.lock.RUnlock() -func (m *mempool) removeDecisionTxs(txs []*txs.Tx) { - for _, tx := range txs { - txID := tx.ID() - if m.unissuedDecisionTxs.Remove(txID) != nil { - m.deregister(tx) + itr := m.unissuedTxs.NewIterator() + for itr.Next() { + if !f(itr.Value()) { + return } } } -func (m *mempool) removeStakerTx(tx *txs.Tx) { - txID := tx.ID() - if m.unissuedStakerTxs.Remove(txID) != nil { - m.deregister(tx) +func (m *mempool) MarkDropped(txID ids.ID, reason error) { + if errors.Is(reason, ErrMempoolFull) { + return } -} -func (m *mempool) PeekStakerTx() *txs.Tx { - if m.unissuedStakerTxs.Len() == 0 { - return nil - } + m.lock.RLock() + defer m.lock.RUnlock() - return m.unissuedStakerTxs.Peek() -} + if _, ok := m.unissuedTxs.Get(txID); ok { + return + } -func (m *mempool) MarkDropped(txID ids.ID, reason error) { m.droppedTxIDs.Put(txID, reason) } @@ -283,17 +239,20 @@ func (m *mempool) GetDropReason(txID ids.ID) error { return err } -func (m *mempool) register(tx *txs.Tx) { - txBytes := tx.Bytes() - m.bytesAvailable -= len(txBytes) - m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) +func (m *mempool) RequestBuildBlock(emptyBlockPermitted bool) { + if !emptyBlockPermitted && m.unissuedTxs.Len() == 0 { + return + } + + select { + case m.toEngine <- common.PendingTxs: + default: + } } -func (m *mempool) deregister(tx *txs.Tx) { - txBytes := tx.Bytes() - m.bytesAvailable += len(txBytes) - m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) +func (m *mempool) Len() int { + m.lock.RLock() + defer m.lock.RUnlock() - inputs := tx.Unsigned.InputIDs() - m.consumedUTXOs.Difference(inputs) + return m.unissuedTxs.Len() } diff --git a/vms/platformvm/txs/mempool/mempool_test.go b/vms/platformvm/txs/mempool/mempool_test.go index bdcd3101233f..6d569b50c6b2 100644 --- a/vms/platformvm/txs/mempool/mempool_test.go +++ b/vms/platformvm/txs/mempool/mempool_test.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mempool import ( - "errors" - "math" "testing" "time" @@ -14,18 +12,14 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var _ BlockTimer = (*noopBlkTimer)(nil) - -type noopBlkTimer struct{} - -func (*noopBlkTimer) ResetBlockTimer() {} - var preFundedKeys = secp256k1.TestKeys() // shows that valid tx is not added to mempool if this would exceed its maximum @@ -34,7 +28,7 @@ func TestBlockBuilderMaxMempoolSizeHandling(t *testing.T) { require := require.New(t) registerer := prometheus.NewRegistry() - mpool, err := NewMempool("mempool", registerer, &noopBlkTimer{}) + mpool, err := New("mempool", registerer, nil) require.NoError(err) decisionTxs, err := createTestDecisionTxs(1) @@ -45,7 +39,12 @@ func TestBlockBuilderMaxMempoolSizeHandling(t *testing.T) { mpool.(*mempool).bytesAvailable = len(tx.Bytes()) - 1 err = mpool.Add(tx) - require.True(errors.Is(err, errMempoolFull), err, "max mempool size breached") + require.ErrorIs(err, ErrMempoolFull) + + // tx should not be marked as dropped if the mempool is full + txID := tx.ID() + mpool.MarkDropped(txID, err) + require.NoError(mpool.GetDropReason(txID)) // shortcut to simulated almost filled mempool mpool.(*mempool).bytesAvailable = len(tx.Bytes()) @@ -58,48 +57,30 @@ func TestDecisionTxsInMempool(t *testing.T) { require := require.New(t) registerer := prometheus.NewRegistry() - mpool, err := NewMempool("mempool", registerer, &noopBlkTimer{}) + mpool, err := New("mempool", registerer, nil) require.NoError(err) decisionTxs, err := createTestDecisionTxs(2) require.NoError(err) - // txs must not already there before we start - require.False(mpool.HasTxs()) - for _, tx := range decisionTxs { // tx not already there - require.False(mpool.Has(tx.ID())) + _, ok := mpool.Get(tx.ID()) + require.False(ok) // we can insert require.NoError(mpool.Add(tx)) // we can get it - require.True(mpool.Has(tx.ID())) - - retrieved := mpool.Get(tx.ID()) - require.NotNil(retrieved) - require.Equal(tx, retrieved) - - // we can peek it - peeked := mpool.PeekTxs(math.MaxInt) - - // tx will be among those peeked, - // in NO PARTICULAR ORDER - found := false - for _, pk := range peeked { - if pk.ID() == tx.ID() { - found = true - break - } - } - require.True(found) + got, ok := mpool.Get(tx.ID()) + require.True(ok) + require.Equal(tx, got) // once removed it cannot be there - mpool.Remove([]*txs.Tx{tx}) + mpool.Remove(tx) - require.False(mpool.Has(tx.ID())) - require.Equal((*txs.Tx)(nil), mpool.Get(tx.ID())) + _, ok = mpool.Get(tx.ID()) + require.False(ok) // we can reinsert it again to grow the mempool require.NoError(mpool.Add(tx)) @@ -110,7 +91,7 @@ func TestProposalTxsInMempool(t *testing.T) { require := require.New(t) registerer := prometheus.NewRegistry() - mpool, err := NewMempool("mempool", registerer, &noopBlkTimer{}) + mpool, err := New("mempool", registerer, nil) require.NoError(err) // The proposal txs are ordered by decreasing start time. This means after @@ -119,52 +100,23 @@ func TestProposalTxsInMempool(t *testing.T) { proposalTxs, err := createTestProposalTxs(2) require.NoError(err) - // txs should not be already there - require.False(mpool.HasStakerTx()) - - for i, tx := range proposalTxs { - require.False(mpool.Has(tx.ID())) + for _, tx := range proposalTxs { + _, ok := mpool.Get(tx.ID()) + require.False(ok) // we can insert require.NoError(mpool.Add(tx)) // we can get it - require.True(mpool.HasStakerTx()) - require.True(mpool.Has(tx.ID())) - - retrieved := mpool.Get(tx.ID()) - require.NotNil(retrieved) - require.Equal(tx, retrieved) - - { - // we can peek it - peeked := mpool.PeekStakerTx() - require.NotNil(peeked) - require.Equal(tx, peeked) - } - - { - // we can peek it - peeked := mpool.PeekTxs(math.MaxInt) - require.Len(peeked, i+1) - - // tx will be among those peeked, - // in NO PARTICULAR ORDER - found := false - for _, pk := range peeked { - if pk.ID() == tx.ID() { - found = true - break - } - } - require.True(found) - } + got, ok := mpool.Get(tx.ID()) + require.Equal(tx, got) + require.True(ok) // once removed it cannot be there - mpool.Remove([]*txs.Tx{tx}) + mpool.Remove(tx) - require.False(mpool.Has(tx.ID())) - require.Equal((*txs.Tx)(nil), mpool.Get(tx.ID())) + _, ok = mpool.Get(tx.ID()) + require.False(ok) // we can reinsert it again to grow the mempool require.NoError(mpool.Add(tx)) @@ -222,17 +174,10 @@ func createTestProposalTxs(count int) ([]*txs.Tx, error) { now := time.Now() proposalTxs := make([]*txs.Tx, 0, count) for i := 0; i < count; i++ { - utx := &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{}, - Validator: txs.Validator{ - Start: uint64(now.Add(time.Duration(count-i) * time.Second).Unix()), - }, - StakeOuts: nil, - RewardsOwner: &secp256k1fx.OutputOwners{}, - DelegationShares: 100, - } - - tx, err := txs.NewSigned(utx, txs.Codec, nil) + tx, err := generateAddValidatorTx( + uint64(now.Add(time.Duration(count-i)*time.Second).Unix()), // startTime + 0, // endTime + ) if err != nil { return nil, err } @@ -240,3 +185,116 @@ func createTestProposalTxs(count int) ([]*txs.Tx, error) { } return proposalTxs, nil } + +func generateAddValidatorTx(startTime uint64, endTime uint64) (*txs.Tx, error) { + utx := &txs.AddValidatorTx{ + BaseTx: txs.BaseTx{}, + Validator: txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: startTime, + End: endTime, + }, + StakeOuts: nil, + RewardsOwner: &secp256k1fx.OutputOwners{}, + DelegationShares: 100, + } + + return txs.NewSigned(utx, txs.Codec, nil) +} + +func TestPeekTxs(t *testing.T) { + require := require.New(t) + + registerer := prometheus.NewRegistry() + toEngine := make(chan common.Message, 100) + mempool, err := New("mempool", registerer, toEngine) + require.NoError(err) + + testDecisionTxs, err := createTestDecisionTxs(1) + require.NoError(err) + testProposalTxs, err := createTestProposalTxs(1) + require.NoError(err) + + tx, exists := mempool.Peek() + require.False(exists) + require.Nil(tx) + + require.NoError(mempool.Add(testDecisionTxs[0])) + require.NoError(mempool.Add(testProposalTxs[0])) + + tx, exists = mempool.Peek() + require.True(exists) + require.Equal(tx, testDecisionTxs[0]) + require.NotEqual(tx, testProposalTxs[0]) + + mempool.Remove(testDecisionTxs[0]) + + tx, exists = mempool.Peek() + require.True(exists) + require.NotEqual(tx, testDecisionTxs[0]) + require.Equal(tx, testProposalTxs[0]) + + mempool.Remove(testProposalTxs[0]) + + tx, exists = mempool.Peek() + require.False(exists) + require.Nil(tx) +} + +func TestRemoveConflicts(t *testing.T) { + require := require.New(t) + + registerer := prometheus.NewRegistry() + toEngine := make(chan common.Message, 100) + mempool, err := New("mempool", registerer, toEngine) + require.NoError(err) + + txs, err := createTestDecisionTxs(1) + require.NoError(err) + conflictTxs, err := createTestDecisionTxs(1) + require.NoError(err) + + require.NoError(mempool.Add(txs[0])) + + tx, exists := mempool.Peek() + require.True(exists) + require.Equal(tx, txs[0]) + + mempool.Remove(conflictTxs[0]) + + _, exists = mempool.Peek() + require.False(exists) +} + +func TestIterate(t *testing.T) { + require := require.New(t) + + registerer := prometheus.NewRegistry() + toEngine := make(chan common.Message, 100) + mempool, err := New("mempool", registerer, toEngine) + require.NoError(err) + + testDecisionTxs, err := createTestDecisionTxs(1) + require.NoError(err) + decisionTx := testDecisionTxs[0] + + testProposalTxs, err := createTestProposalTxs(1) + require.NoError(err) + proposalTx := testProposalTxs[0] + + require.NoError(mempool.Add(decisionTx)) + require.NoError(mempool.Add(proposalTx)) + + expectedSet := set.Of( + decisionTx.ID(), + proposalTx.ID(), + ) + + set := set.NewSet[ids.ID](2) + mempool.Iterate(func(tx *txs.Tx) bool { + set.Add(tx.ID()) + return true + }) + + require.Equal(expectedSet, set) +} diff --git a/vms/platformvm/txs/mempool/mock_mempool.go b/vms/platformvm/txs/mempool/mock_mempool.go index a4baccd405e9..c47f42e92718 100644 --- a/vms/platformvm/txs/mempool/mock_mempool.go +++ b/vms/platformvm/txs/mempool/mock_mempool.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool (interfaces: Mempool) +// +// Generated by this command: +// +// mockgen -package=mempool -destination=vms/platformvm/txs/mempool/mock_mempool.go github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool Mempool +// // Package mempool is a generated GoMock package. package mempool @@ -47,45 +49,22 @@ func (m *MockMempool) Add(arg0 *txs.Tx) error { } // Add indicates an expected call of Add. -func (mr *MockMempoolMockRecorder) Add(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) Add(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockMempool)(nil).Add), arg0) } -// DisableAdding mocks base method. -func (m *MockMempool) DisableAdding() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DisableAdding") -} - -// DisableAdding indicates an expected call of DisableAdding. -func (mr *MockMempoolMockRecorder) DisableAdding() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableAdding", reflect.TypeOf((*MockMempool)(nil).DisableAdding)) -} - -// EnableAdding mocks base method. -func (m *MockMempool) EnableAdding() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "EnableAdding") -} - -// EnableAdding indicates an expected call of EnableAdding. -func (mr *MockMempoolMockRecorder) EnableAdding() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableAdding", reflect.TypeOf((*MockMempool)(nil).EnableAdding)) -} - // Get mocks base method. -func (m *MockMempool) Get(arg0 ids.ID) *txs.Tx { +func (m *MockMempool) Get(arg0 ids.ID) (*txs.Tx, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0) ret0, _ := ret[0].(*txs.Tx) - return ret0 + ret1, _ := ret[1].(bool) + return ret0, ret1 } // Get indicates an expected call of Get. -func (mr *MockMempoolMockRecorder) Get(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) Get(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockMempool)(nil).Get), arg0) } @@ -99,51 +78,35 @@ func (m *MockMempool) GetDropReason(arg0 ids.ID) error { } // GetDropReason indicates an expected call of GetDropReason. -func (mr *MockMempoolMockRecorder) GetDropReason(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) GetDropReason(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDropReason", reflect.TypeOf((*MockMempool)(nil).GetDropReason), arg0) } -// Has mocks base method. -func (m *MockMempool) Has(arg0 ids.ID) bool { +// Iterate mocks base method. +func (m *MockMempool) Iterate(arg0 func(*txs.Tx) bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Has", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// Has indicates an expected call of Has. -func (mr *MockMempoolMockRecorder) Has(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockMempool)(nil).Has), arg0) -} - -// HasStakerTx mocks base method. -func (m *MockMempool) HasStakerTx() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasStakerTx") - ret0, _ := ret[0].(bool) - return ret0 + m.ctrl.Call(m, "Iterate", arg0) } -// HasStakerTx indicates an expected call of HasStakerTx. -func (mr *MockMempoolMockRecorder) HasStakerTx() *gomock.Call { +// Iterate indicates an expected call of Iterate. +func (mr *MockMempoolMockRecorder) Iterate(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasStakerTx", reflect.TypeOf((*MockMempool)(nil).HasStakerTx)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterate", reflect.TypeOf((*MockMempool)(nil).Iterate), arg0) } -// HasTxs mocks base method. -func (m *MockMempool) HasTxs() bool { +// Len mocks base method. +func (m *MockMempool) Len() int { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasTxs") - ret0, _ := ret[0].(bool) + ret := m.ctrl.Call(m, "Len") + ret0, _ := ret[0].(int) return ret0 } -// HasTxs indicates an expected call of HasTxs. -func (mr *MockMempoolMockRecorder) HasTxs() *gomock.Call { +// Len indicates an expected call of Len. +func (mr *MockMempoolMockRecorder) Len() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasTxs", reflect.TypeOf((*MockMempool)(nil).HasTxs)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Len", reflect.TypeOf((*MockMempool)(nil).Len)) } // MarkDropped mocks base method. @@ -153,47 +116,50 @@ func (m *MockMempool) MarkDropped(arg0 ids.ID, arg1 error) { } // MarkDropped indicates an expected call of MarkDropped. -func (mr *MockMempoolMockRecorder) MarkDropped(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) MarkDropped(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkDropped", reflect.TypeOf((*MockMempool)(nil).MarkDropped), arg0, arg1) } -// PeekStakerTx mocks base method. -func (m *MockMempool) PeekStakerTx() *txs.Tx { +// Peek mocks base method. +func (m *MockMempool) Peek() (*txs.Tx, bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PeekStakerTx") + ret := m.ctrl.Call(m, "Peek") ret0, _ := ret[0].(*txs.Tx) - return ret0 + ret1, _ := ret[1].(bool) + return ret0, ret1 } -// PeekStakerTx indicates an expected call of PeekStakerTx. -func (mr *MockMempoolMockRecorder) PeekStakerTx() *gomock.Call { +// Peek indicates an expected call of Peek. +func (mr *MockMempoolMockRecorder) Peek() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeekStakerTx", reflect.TypeOf((*MockMempool)(nil).PeekStakerTx)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Peek", reflect.TypeOf((*MockMempool)(nil).Peek)) } -// PeekTxs mocks base method. -func (m *MockMempool) PeekTxs(arg0 int) []*txs.Tx { +// Remove mocks base method. +func (m *MockMempool) Remove(arg0 ...*txs.Tx) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PeekTxs", arg0) - ret0, _ := ret[0].([]*txs.Tx) - return ret0 + varargs := []any{} + for _, a := range arg0 { + varargs = append(varargs, a) + } + m.ctrl.Call(m, "Remove", varargs...) } -// PeekTxs indicates an expected call of PeekTxs. -func (mr *MockMempoolMockRecorder) PeekTxs(arg0 interface{}) *gomock.Call { +// Remove indicates an expected call of Remove. +func (mr *MockMempoolMockRecorder) Remove(arg0 ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeekTxs", reflect.TypeOf((*MockMempool)(nil).PeekTxs), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockMempool)(nil).Remove), arg0...) } -// Remove mocks base method. -func (m *MockMempool) Remove(arg0 []*txs.Tx) { +// RequestBuildBlock mocks base method. +func (m *MockMempool) RequestBuildBlock(arg0 bool) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Remove", arg0) + m.ctrl.Call(m, "RequestBuildBlock", arg0) } -// Remove indicates an expected call of Remove. -func (mr *MockMempoolMockRecorder) Remove(arg0 interface{}) *gomock.Call { +// RequestBuildBlock indicates an expected call of RequestBuildBlock. +func (mr *MockMempoolMockRecorder) RequestBuildBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockMempool)(nil).Remove), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestBuildBlock", reflect.TypeOf((*MockMempool)(nil).RequestBuildBlock), arg0) } diff --git a/vms/platformvm/txs/mempool/remover.go b/vms/platformvm/txs/mempool/remover.go deleted file mode 100644 index b21071b16465..000000000000 --- a/vms/platformvm/txs/mempool/remover.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package mempool - -import "github.com/ava-labs/avalanchego/vms/platformvm/txs" - -var _ txs.Visitor = (*remover)(nil) - -type remover struct { - m *mempool - tx *txs.Tx -} - -func (r *remover) AddValidatorTx(*txs.AddValidatorTx) error { - r.m.removeStakerTx(r.tx) - return nil -} - -func (r *remover) AddSubnetValidatorTx(*txs.AddSubnetValidatorTx) error { - r.m.removeStakerTx(r.tx) - return nil -} - -func (r *remover) AddDelegatorTx(*txs.AddDelegatorTx) error { - r.m.removeStakerTx(r.tx) - return nil -} - -func (r *remover) RemoveSubnetValidatorTx(*txs.RemoveSubnetValidatorTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) CreateChainTx(*txs.CreateChainTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) CreateSubnetTx(*txs.CreateSubnetTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) ImportTx(*txs.ImportTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) ExportTx(*txs.ExportTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) TransformSubnetTx(*txs.TransformSubnetTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) TransferSubnetOwnershipTx(*txs.TransferSubnetOwnershipTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) BaseTx(*txs.BaseTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) AddPermissionlessValidatorTx(*txs.AddPermissionlessValidatorTx) error { - r.m.removeStakerTx(r.tx) - return nil -} - -func (r *remover) AddPermissionlessDelegatorTx(*txs.AddPermissionlessDelegatorTx) error { - r.m.removeStakerTx(r.tx) - return nil -} - -func (*remover) AdvanceTimeTx(*txs.AdvanceTimeTx) error { - // this tx is never in mempool - return nil -} - -func (*remover) RewardValidatorTx(*txs.RewardValidatorTx) error { - // this tx is never in mempool - return nil -} diff --git a/vms/platformvm/txs/mock_staker.go b/vms/platformvm/txs/mock_staker.go deleted file mode 100644 index e01ca66cf9e3..000000000000 --- a/vms/platformvm/txs/mock_staker.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/txs (interfaces: Staker) - -// Package txs is a generated GoMock package. -package txs - -import ( - reflect "reflect" - time "time" - - ids "github.com/ava-labs/avalanchego/ids" - bls "github.com/ava-labs/avalanchego/utils/crypto/bls" - gomock "go.uber.org/mock/gomock" -) - -// MockStaker is a mock of Staker interface. -type MockStaker struct { - ctrl *gomock.Controller - recorder *MockStakerMockRecorder -} - -// MockStakerMockRecorder is the mock recorder for MockStaker. -type MockStakerMockRecorder struct { - mock *MockStaker -} - -// NewMockStaker creates a new mock instance. -func NewMockStaker(ctrl *gomock.Controller) *MockStaker { - mock := &MockStaker{ctrl: ctrl} - mock.recorder = &MockStakerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockStaker) EXPECT() *MockStakerMockRecorder { - return m.recorder -} - -// CurrentPriority mocks base method. -func (m *MockStaker) CurrentPriority() Priority { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CurrentPriority") - ret0, _ := ret[0].(Priority) - return ret0 -} - -// CurrentPriority indicates an expected call of CurrentPriority. -func (mr *MockStakerMockRecorder) CurrentPriority() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentPriority", reflect.TypeOf((*MockStaker)(nil).CurrentPriority)) -} - -// EndTime mocks base method. -func (m *MockStaker) EndTime() time.Time { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "EndTime") - ret0, _ := ret[0].(time.Time) - return ret0 -} - -// EndTime indicates an expected call of EndTime. -func (mr *MockStakerMockRecorder) EndTime() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EndTime", reflect.TypeOf((*MockStaker)(nil).EndTime)) -} - -// NodeID mocks base method. -func (m *MockStaker) NodeID() ids.NodeID { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NodeID") - ret0, _ := ret[0].(ids.NodeID) - return ret0 -} - -// NodeID indicates an expected call of NodeID. -func (mr *MockStakerMockRecorder) NodeID() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeID", reflect.TypeOf((*MockStaker)(nil).NodeID)) -} - -// PendingPriority mocks base method. -func (m *MockStaker) PendingPriority() Priority { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PendingPriority") - ret0, _ := ret[0].(Priority) - return ret0 -} - -// PendingPriority indicates an expected call of PendingPriority. -func (mr *MockStakerMockRecorder) PendingPriority() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PendingPriority", reflect.TypeOf((*MockStaker)(nil).PendingPriority)) -} - -// PublicKey mocks base method. -func (m *MockStaker) PublicKey() (*bls.PublicKey, bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PublicKey") - ret0, _ := ret[0].(*bls.PublicKey) - ret1, _ := ret[1].(bool) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// PublicKey indicates an expected call of PublicKey. -func (mr *MockStakerMockRecorder) PublicKey() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublicKey", reflect.TypeOf((*MockStaker)(nil).PublicKey)) -} - -// StartTime mocks base method. -func (m *MockStaker) StartTime() time.Time { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StartTime") - ret0, _ := ret[0].(time.Time) - return ret0 -} - -// StartTime indicates an expected call of StartTime. -func (mr *MockStakerMockRecorder) StartTime() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartTime", reflect.TypeOf((*MockStaker)(nil).StartTime)) -} - -// SubnetID mocks base method. -func (m *MockStaker) SubnetID() ids.ID { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SubnetID") - ret0, _ := ret[0].(ids.ID) - return ret0 -} - -// SubnetID indicates an expected call of SubnetID. -func (mr *MockStakerMockRecorder) SubnetID() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubnetID", reflect.TypeOf((*MockStaker)(nil).SubnetID)) -} - -// Weight mocks base method. -func (m *MockStaker) Weight() uint64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Weight") - ret0, _ := ret[0].(uint64) - return ret0 -} - -// Weight indicates an expected call of Weight. -func (mr *MockStakerMockRecorder) Weight() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Weight", reflect.TypeOf((*MockStaker)(nil).Weight)) -} diff --git a/vms/platformvm/txs/mock_staker_tx.go b/vms/platformvm/txs/mock_staker_tx.go new file mode 100644 index 000000000000..2e01b15b3813 --- /dev/null +++ b/vms/platformvm/txs/mock_staker_tx.go @@ -0,0 +1,265 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: vms/platformvm/txs/staker_tx.go +// +// Generated by this command: +// +// mockgen -source=vms/platformvm/txs/staker_tx.go -destination=vms/platformvm/txs/mock_staker_tx.go -package=txs -exclude_interfaces=ValidatorTx,DelegatorTx,StakerTx,PermissionlessStaker +// + +// Package txs is a generated GoMock package. +package txs + +import ( + reflect "reflect" + time "time" + + ids "github.com/ava-labs/avalanchego/ids" + bls "github.com/ava-labs/avalanchego/utils/crypto/bls" + gomock "go.uber.org/mock/gomock" +) + +// MockStaker is a mock of Staker interface. +type MockStaker struct { + ctrl *gomock.Controller + recorder *MockStakerMockRecorder +} + +// MockStakerMockRecorder is the mock recorder for MockStaker. +type MockStakerMockRecorder struct { + mock *MockStaker +} + +// NewMockStaker creates a new mock instance. +func NewMockStaker(ctrl *gomock.Controller) *MockStaker { + mock := &MockStaker{ctrl: ctrl} + mock.recorder = &MockStakerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStaker) EXPECT() *MockStakerMockRecorder { + return m.recorder +} + +// CurrentPriority mocks base method. +func (m *MockStaker) CurrentPriority() Priority { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CurrentPriority") + ret0, _ := ret[0].(Priority) + return ret0 +} + +// CurrentPriority indicates an expected call of CurrentPriority. +func (mr *MockStakerMockRecorder) CurrentPriority() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentPriority", reflect.TypeOf((*MockStaker)(nil).CurrentPriority)) +} + +// EndTime mocks base method. +func (m *MockStaker) EndTime() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EndTime") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// EndTime indicates an expected call of EndTime. +func (mr *MockStakerMockRecorder) EndTime() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EndTime", reflect.TypeOf((*MockStaker)(nil).EndTime)) +} + +// NodeID mocks base method. +func (m *MockStaker) NodeID() ids.NodeID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NodeID") + ret0, _ := ret[0].(ids.NodeID) + return ret0 +} + +// NodeID indicates an expected call of NodeID. +func (mr *MockStakerMockRecorder) NodeID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeID", reflect.TypeOf((*MockStaker)(nil).NodeID)) +} + +// PublicKey mocks base method. +func (m *MockStaker) PublicKey() (*bls.PublicKey, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PublicKey") + ret0, _ := ret[0].(*bls.PublicKey) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// PublicKey indicates an expected call of PublicKey. +func (mr *MockStakerMockRecorder) PublicKey() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublicKey", reflect.TypeOf((*MockStaker)(nil).PublicKey)) +} + +// SubnetID mocks base method. +func (m *MockStaker) SubnetID() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubnetID") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// SubnetID indicates an expected call of SubnetID. +func (mr *MockStakerMockRecorder) SubnetID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubnetID", reflect.TypeOf((*MockStaker)(nil).SubnetID)) +} + +// Weight mocks base method. +func (m *MockStaker) Weight() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Weight") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// Weight indicates an expected call of Weight. +func (mr *MockStakerMockRecorder) Weight() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Weight", reflect.TypeOf((*MockStaker)(nil).Weight)) +} + +// MockScheduledStaker is a mock of ScheduledStaker interface. +type MockScheduledStaker struct { + ctrl *gomock.Controller + recorder *MockScheduledStakerMockRecorder +} + +// MockScheduledStakerMockRecorder is the mock recorder for MockScheduledStaker. +type MockScheduledStakerMockRecorder struct { + mock *MockScheduledStaker +} + +// NewMockScheduledStaker creates a new mock instance. +func NewMockScheduledStaker(ctrl *gomock.Controller) *MockScheduledStaker { + mock := &MockScheduledStaker{ctrl: ctrl} + mock.recorder = &MockScheduledStakerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockScheduledStaker) EXPECT() *MockScheduledStakerMockRecorder { + return m.recorder +} + +// CurrentPriority mocks base method. +func (m *MockScheduledStaker) CurrentPriority() Priority { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CurrentPriority") + ret0, _ := ret[0].(Priority) + return ret0 +} + +// CurrentPriority indicates an expected call of CurrentPriority. +func (mr *MockScheduledStakerMockRecorder) CurrentPriority() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentPriority", reflect.TypeOf((*MockScheduledStaker)(nil).CurrentPriority)) +} + +// EndTime mocks base method. +func (m *MockScheduledStaker) EndTime() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EndTime") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// EndTime indicates an expected call of EndTime. +func (mr *MockScheduledStakerMockRecorder) EndTime() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EndTime", reflect.TypeOf((*MockScheduledStaker)(nil).EndTime)) +} + +// NodeID mocks base method. +func (m *MockScheduledStaker) NodeID() ids.NodeID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NodeID") + ret0, _ := ret[0].(ids.NodeID) + return ret0 +} + +// NodeID indicates an expected call of NodeID. +func (mr *MockScheduledStakerMockRecorder) NodeID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeID", reflect.TypeOf((*MockScheduledStaker)(nil).NodeID)) +} + +// PendingPriority mocks base method. +func (m *MockScheduledStaker) PendingPriority() Priority { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PendingPriority") + ret0, _ := ret[0].(Priority) + return ret0 +} + +// PendingPriority indicates an expected call of PendingPriority. +func (mr *MockScheduledStakerMockRecorder) PendingPriority() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PendingPriority", reflect.TypeOf((*MockScheduledStaker)(nil).PendingPriority)) +} + +// PublicKey mocks base method. +func (m *MockScheduledStaker) PublicKey() (*bls.PublicKey, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PublicKey") + ret0, _ := ret[0].(*bls.PublicKey) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// PublicKey indicates an expected call of PublicKey. +func (mr *MockScheduledStakerMockRecorder) PublicKey() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublicKey", reflect.TypeOf((*MockScheduledStaker)(nil).PublicKey)) +} + +// StartTime mocks base method. +func (m *MockScheduledStaker) StartTime() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StartTime") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// StartTime indicates an expected call of StartTime. +func (mr *MockScheduledStakerMockRecorder) StartTime() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartTime", reflect.TypeOf((*MockScheduledStaker)(nil).StartTime)) +} + +// SubnetID mocks base method. +func (m *MockScheduledStaker) SubnetID() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubnetID") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// SubnetID indicates an expected call of SubnetID. +func (mr *MockScheduledStakerMockRecorder) SubnetID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubnetID", reflect.TypeOf((*MockScheduledStaker)(nil).SubnetID)) +} + +// Weight mocks base method. +func (m *MockScheduledStaker) Weight() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Weight") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// Weight indicates an expected call of Weight. +func (mr *MockScheduledStakerMockRecorder) Weight() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Weight", reflect.TypeOf((*MockScheduledStaker)(nil).Weight)) +} diff --git a/vms/platformvm/txs/mock_unsigned_tx.go b/vms/platformvm/txs/mock_unsigned_tx.go index 9d9ec6c94dd4..f775c5203a4f 100644 --- a/vms/platformvm/txs/mock_unsigned_tx.go +++ b/vms/platformvm/txs/mock_unsigned_tx.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/txs (interfaces: UnsignedTx) +// Source: vms/platformvm/txs/unsigned_tx.go +// +// Generated by this command: +// +// mockgen -source=vms/platformvm/txs/unsigned_tx.go -destination=vms/platformvm/txs/mock_unsigned_tx.go -package=txs -exclude_interfaces= +// // Package txs is a generated GoMock package. package txs @@ -55,15 +57,15 @@ func (mr *MockUnsignedTxMockRecorder) Bytes() *gomock.Call { } // InitCtx mocks base method. -func (m *MockUnsignedTx) InitCtx(arg0 *snow.Context) { +func (m *MockUnsignedTx) InitCtx(ctx *snow.Context) { m.ctrl.T.Helper() - m.ctrl.Call(m, "InitCtx", arg0) + m.ctrl.Call(m, "InitCtx", ctx) } // InitCtx indicates an expected call of InitCtx. -func (mr *MockUnsignedTxMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { +func (mr *MockUnsignedTxMockRecorder) InitCtx(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockUnsignedTx)(nil).InitCtx), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockUnsignedTx)(nil).InitCtx), ctx) } // InputIDs mocks base method. @@ -95,41 +97,41 @@ func (mr *MockUnsignedTxMockRecorder) Outputs() *gomock.Call { } // SetBytes mocks base method. -func (m *MockUnsignedTx) SetBytes(arg0 []byte) { +func (m *MockUnsignedTx) SetBytes(unsignedBytes []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetBytes", arg0) + m.ctrl.Call(m, "SetBytes", unsignedBytes) } // SetBytes indicates an expected call of SetBytes. -func (mr *MockUnsignedTxMockRecorder) SetBytes(arg0 interface{}) *gomock.Call { +func (mr *MockUnsignedTxMockRecorder) SetBytes(unsignedBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBytes", reflect.TypeOf((*MockUnsignedTx)(nil).SetBytes), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBytes", reflect.TypeOf((*MockUnsignedTx)(nil).SetBytes), unsignedBytes) } // SyntacticVerify mocks base method. -func (m *MockUnsignedTx) SyntacticVerify(arg0 *snow.Context) error { +func (m *MockUnsignedTx) SyntacticVerify(ctx *snow.Context) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SyntacticVerify", arg0) + ret := m.ctrl.Call(m, "SyntacticVerify", ctx) ret0, _ := ret[0].(error) return ret0 } // SyntacticVerify indicates an expected call of SyntacticVerify. -func (mr *MockUnsignedTxMockRecorder) SyntacticVerify(arg0 interface{}) *gomock.Call { +func (mr *MockUnsignedTxMockRecorder) SyntacticVerify(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyntacticVerify", reflect.TypeOf((*MockUnsignedTx)(nil).SyntacticVerify), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyntacticVerify", reflect.TypeOf((*MockUnsignedTx)(nil).SyntacticVerify), ctx) } // Visit mocks base method. -func (m *MockUnsignedTx) Visit(arg0 Visitor) error { +func (m *MockUnsignedTx) Visit(visitor Visitor) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Visit", arg0) + ret := m.ctrl.Call(m, "Visit", visitor) ret0, _ := ret[0].(error) return ret0 } // Visit indicates an expected call of Visit. -func (mr *MockUnsignedTxMockRecorder) Visit(arg0 interface{}) *gomock.Call { +func (mr *MockUnsignedTxMockRecorder) Visit(visitor any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Visit", reflect.TypeOf((*MockUnsignedTx)(nil).Visit), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Visit", reflect.TypeOf((*MockUnsignedTx)(nil).Visit), visitor) } diff --git a/vms/platformvm/txs/priorities.go b/vms/platformvm/txs/priorities.go index 6a4fb4dc10a9..a324bdae8e1c 100644 --- a/vms/platformvm/txs/priorities.go +++ b/vms/platformvm/txs/priorities.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/platformvm/txs/priorities_test.go b/vms/platformvm/txs/priorities_test.go index ce266d5d7adb..5e629a853ca1 100644 --- a/vms/platformvm/txs/priorities_test.go +++ b/vms/platformvm/txs/priorities_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/platformvm/txs/remove_subnet_validator_tx.go b/vms/platformvm/txs/remove_subnet_validator_tx.go index 2221c2f345ca..ef55cccea290 100644 --- a/vms/platformvm/txs/remove_subnet_validator_tx.go +++ b/vms/platformvm/txs/remove_subnet_validator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/platformvm/txs/remove_subnet_validator_tx_test.go b/vms/platformvm/txs/remove_subnet_validator_tx_test.go index 73f2df3cdcf2..2890b6d8d103 100644 --- a/vms/platformvm/txs/remove_subnet_validator_tx_test.go +++ b/vms/platformvm/txs/remove_subnet_validator_tx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -51,11 +51,11 @@ func TestRemoveSubnetValidatorTxSerialization(t *testing.T) { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, } - nodeID := ids.NodeID{ + nodeID := ids.BuildTestNodeID([]byte{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, - } + }) subnetID := ids.ID{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, @@ -157,7 +157,7 @@ func TestRemoveSubnetValidatorTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x03, } var unsignedSimpleRemoveValidatorTx UnsignedTx = simpleRemoveValidatorTx - unsignedSimpleRemoveValidatorTxBytes, err := Codec.Marshal(Version, &unsignedSimpleRemoveValidatorTx) + unsignedSimpleRemoveValidatorTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleRemoveValidatorTx) require.NoError(err) require.Equal(expectedUnsignedSimpleRemoveValidatorTxBytes, unsignedSimpleRemoveValidatorTxBytes) @@ -417,7 +417,7 @@ func TestRemoveSubnetValidatorTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, } var unsignedComplexRemoveValidatorTx UnsignedTx = complexRemoveValidatorTx - unsignedComplexRemoveValidatorTxBytes, err := Codec.Marshal(Version, &unsignedComplexRemoveValidatorTx) + unsignedComplexRemoveValidatorTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexRemoveValidatorTx) require.NoError(err) require.Equal(expectedUnsignedComplexRemoveValidatorTxBytes, unsignedComplexRemoveValidatorTxBytes) diff --git a/vms/platformvm/txs/reward_validator_tx.go b/vms/platformvm/txs/reward_validator_tx.go index d4b579f1b95b..85129af4695c 100644 --- a/vms/platformvm/txs/reward_validator_tx.go +++ b/vms/platformvm/txs/reward_validator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -26,9 +26,6 @@ type RewardValidatorTx struct { // ID of the tx that created the delegator/validator being removed/rewarded TxID ids.ID `serialize:"true" json:"txID"` - // Marks if this validator should be rewarded according to this node. - ShouldPreferCommit bool `json:"-"` - unsignedBytes []byte // Unsigned byte representation of this data } diff --git a/vms/platformvm/txs/staker_tx.go b/vms/platformvm/txs/staker_tx.go index 049d3519375f..8adb1ac23f7d 100644 --- a/vms/platformvm/txs/staker_tx.go +++ b/vms/platformvm/txs/staker_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -48,9 +48,13 @@ type Staker interface { // PublicKey returns the BLS public key registered by this transaction. If // there was no key registered by this transaction, it will return false. PublicKey() (*bls.PublicKey, bool, error) - StartTime() time.Time EndTime() time.Time Weight() uint64 - PendingPriority() Priority CurrentPriority() Priority } + +type ScheduledStaker interface { + Staker + StartTime() time.Time + PendingPriority() Priority +} diff --git a/vms/platformvm/txs/subnet_validator.go b/vms/platformvm/txs/subnet_validator.go index d9da9d31b739..a7c683f35a8f 100644 --- a/vms/platformvm/txs/subnet_validator.go +++ b/vms/platformvm/txs/subnet_validator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/platformvm/txs/subnet_validator_test.go b/vms/platformvm/txs/subnet_validator_test.go index 7fcbf3e44e4e..cdfbeaf159a5 100644 --- a/vms/platformvm/txs/subnet_validator_test.go +++ b/vms/platformvm/txs/subnet_validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/platformvm/txs/transfer_subnet_ownership_tx.go b/vms/platformvm/txs/transfer_subnet_ownership_tx.go index 78dbf28b48b4..4fa2807809ce 100644 --- a/vms/platformvm/txs/transfer_subnet_ownership_tx.go +++ b/vms/platformvm/txs/transfer_subnet_ownership_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/platformvm/txs/transfer_subnet_ownership_tx_test.go b/vms/platformvm/txs/transfer_subnet_ownership_tx_test.go index e8cddeb3e1d0..39866c138e2b 100644 --- a/vms/platformvm/txs/transfer_subnet_ownership_tx_test.go +++ b/vms/platformvm/txs/transfer_subnet_ownership_tx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -164,7 +164,7 @@ func TestTransferSubnetOwnershipTxSerialization(t *testing.T) { 0x44, 0x55, 0x66, 0x77, } var unsignedSimpleTransferSubnetOwnershipTx UnsignedTx = simpleTransferSubnetOwnershipTx - unsignedSimpleTransferSubnetOwnershipTxBytes, err := Codec.Marshal(Version, &unsignedSimpleTransferSubnetOwnershipTx) + unsignedSimpleTransferSubnetOwnershipTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleTransferSubnetOwnershipTx) require.NoError(err) require.Equal(expectedUnsignedSimpleTransferSubnetOwnershipTxBytes, unsignedSimpleTransferSubnetOwnershipTxBytes) @@ -438,7 +438,7 @@ func TestTransferSubnetOwnershipTxSerialization(t *testing.T) { 0x44, 0x55, 0x66, 0x77, } var unsignedComplexTransferSubnetOwnershipTx UnsignedTx = complexTransferSubnetOwnershipTx - unsignedComplexTransferSubnetOwnershipTxBytes, err := Codec.Marshal(Version, &unsignedComplexTransferSubnetOwnershipTx) + unsignedComplexTransferSubnetOwnershipTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexTransferSubnetOwnershipTx) require.NoError(err) require.Equal(expectedUnsignedComplexTransferSubnetOwnershipTxBytes, unsignedComplexTransferSubnetOwnershipTxBytes) diff --git a/vms/platformvm/txs/transform_subnet_tx.go b/vms/platformvm/txs/transform_subnet_tx.go index f540ea674f4c..1ba543e1aa05 100644 --- a/vms/platformvm/txs/transform_subnet_tx.go +++ b/vms/platformvm/txs/transform_subnet_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/platformvm/txs/transform_subnet_tx_test.go b/vms/platformvm/txs/transform_subnet_tx_test.go index 4b88fd60ef3a..d5a237667ded 100644 --- a/vms/platformvm/txs/transform_subnet_tx_test.go +++ b/vms/platformvm/txs/transform_subnet_tx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -223,7 +223,7 @@ func TestTransformSubnetTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x03, } var unsignedSimpleTransformTx UnsignedTx = simpleTransformTx - unsignedSimpleTransformTxBytes, err := Codec.Marshal(Version, &unsignedSimpleTransformTx) + unsignedSimpleTransformTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleTransformTx) require.NoError(err) require.Equal(expectedUnsignedSimpleTransformTxBytes, unsignedSimpleTransformTxBytes) @@ -520,7 +520,7 @@ func TestTransformSubnetTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, } var unsignedComplexTransformTx UnsignedTx = complexTransformTx - unsignedComplexTransformTxBytes, err := Codec.Marshal(Version, &unsignedComplexTransformTx) + unsignedComplexTransformTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexTransformTx) require.NoError(err) require.Equal(expectedUnsignedComplexTransformTxBytes, unsignedComplexTransformTxBytes) diff --git a/vms/platformvm/txs/tx.go b/vms/platformvm/txs/tx.go index 27cc812e5a79..9874f66e0468 100644 --- a/vms/platformvm/txs/tx.go +++ b/vms/platformvm/txs/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" @@ -18,6 +19,8 @@ import ( ) var ( + _ gossip.Gossipable = (*Tx)(nil) + ErrNilSignedTx = errors.New("nil signed tx is not valid") errSignedTxNotInitialized = errors.New("signed tx was never initialized and is not valid") @@ -45,12 +48,12 @@ func NewSigned( } func (tx *Tx) Initialize(c codec.Manager) error { - signedBytes, err := c.Marshal(Version, tx) + signedBytes, err := c.Marshal(CodecVersion, tx) if err != nil { return fmt.Errorf("couldn't marshal ProposalTx: %w", err) } - unsignedBytesLen, err := c.Size(Version, &tx.Unsigned) + unsignedBytesLen, err := c.Size(CodecVersion, &tx.Unsigned) if err != nil { return fmt.Errorf("couldn't calculate UnsignedTx marshal length: %w", err) } @@ -75,7 +78,7 @@ func Parse(c codec.Manager, signedBytes []byte) (*Tx, error) { return nil, fmt.Errorf("couldn't parse tx: %w", err) } - unsignedBytesLen, err := c.Size(Version, &tx.Unsigned) + unsignedBytesLen, err := c.Size(CodecVersion, &tx.Unsigned) if err != nil { return nil, fmt.Errorf("couldn't calculate UnsignedTx marshal length: %w", err) } @@ -93,6 +96,10 @@ func (tx *Tx) ID() ids.ID { return tx.TxID } +func (tx *Tx) GossipID() ids.ID { + return tx.TxID +} + // UTXOs returns the UTXOs transaction is producing. func (tx *Tx) UTXOs() []*avax.UTXO { outs := tx.Unsigned.Outputs() @@ -125,7 +132,7 @@ func (tx *Tx) SyntacticVerify(ctx *snow.Context) error { // Note: We explicitly pass the codec in Sign since we may need to sign P-Chain // genesis txs whose length exceed the max length of txs.Codec. func (tx *Tx) Sign(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { - unsignedBytes, err := c.Marshal(Version, &tx.Unsigned) + unsignedBytes, err := c.Marshal(CodecVersion, &tx.Unsigned) if err != nil { return fmt.Errorf("couldn't marshal UnsignedTx: %w", err) } @@ -146,7 +153,7 @@ func (tx *Tx) Sign(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { tx.Creds = append(tx.Creds, cred) // Attach credential } - signedBytes, err := c.Marshal(Version, tx) + signedBytes, err := c.Marshal(CodecVersion, tx) if err != nil { return fmt.Errorf("couldn't marshal ProposalTx: %w", err) } diff --git a/vms/platformvm/txs/txheap/by_age.go b/vms/platformvm/txs/txheap/by_age.go deleted file mode 100644 index be888c437a0f..000000000000 --- a/vms/platformvm/txs/txheap/by_age.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package txheap - -import ( - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/heap" -) - -func NewByAge() Heap { - return &txHeap{ - heap: heap.NewMap[ids.ID, heapTx](func(a, b heapTx) bool { - return a.age < b.age - }), - } -} diff --git a/vms/platformvm/txs/txheap/by_end_time.go b/vms/platformvm/txs/txheap/by_end_time.go index ba144448919d..9cbba82cef07 100644 --- a/vms/platformvm/txs/txheap/by_end_time.go +++ b/vms/platformvm/txs/txheap/by_end_time.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txheap @@ -13,6 +13,12 @@ import ( var _ TimedHeap = (*byEndTime)(nil) +type TimedHeap interface { + Heap + + Timestamp() time.Time +} + type byEndTime struct { txHeap } diff --git a/vms/platformvm/txs/txheap/by_end_time_test.go b/vms/platformvm/txs/txheap/by_end_time_test.go index 8ea152d27e02..a629b7b1c3bd 100644 --- a/vms/platformvm/txs/txheap/by_end_time_test.go +++ b/vms/platformvm/txs/txheap/by_end_time_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txheap @@ -23,7 +23,7 @@ func TestByEndTime(t *testing.T) { utx0 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{0}, + NodeID: ids.BuildTestNodeID([]byte{0}), Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 1, }, @@ -34,7 +34,7 @@ func TestByEndTime(t *testing.T) { utx1 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{1}, + NodeID: ids.BuildTestNodeID([]byte{1}), Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 2, }, @@ -45,7 +45,7 @@ func TestByEndTime(t *testing.T) { utx2 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{1}, + NodeID: ids.BuildTestNodeID([]byte{1}), Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 3, }, diff --git a/vms/platformvm/txs/txheap/by_start_time.go b/vms/platformvm/txs/txheap/by_start_time.go deleted file mode 100644 index f19c28d76436..000000000000 --- a/vms/platformvm/txs/txheap/by_start_time.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package txheap - -import ( - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/heap" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -var _ TimedHeap = (*byStartTime)(nil) - -type TimedHeap interface { - Heap - - Timestamp() time.Time -} - -type byStartTime struct { - txHeap -} - -func NewByStartTime() TimedHeap { - return &byStartTime{ - txHeap: txHeap{ - heap: heap.NewMap[ids.ID, heapTx](func(a, b heapTx) bool { - aTime := a.tx.Unsigned.(txs.Staker).StartTime() - bTime := b.tx.Unsigned.(txs.Staker).StartTime() - return aTime.Before(bTime) - }), - }, - } -} - -func (h *byStartTime) Timestamp() time.Time { - return h.Peek().Unsigned.(txs.Staker).StartTime() -} diff --git a/vms/platformvm/txs/txheap/by_start_time_test.go b/vms/platformvm/txs/txheap/by_start_time_test.go deleted file mode 100644 index 164e2ec35e59..000000000000 --- a/vms/platformvm/txs/txheap/by_start_time_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package txheap - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" -) - -func TestByStartTime(t *testing.T) { - require := require.New(t) - - txHeap := NewByStartTime() - - baseTime := time.Now() - - utx0 := &txs.AddValidatorTx{ - Validator: txs.Validator{ - NodeID: ids.NodeID{0}, - Start: uint64(baseTime.Unix()) + 1, - End: uint64(baseTime.Unix()) + 1, - }, - RewardsOwner: &secp256k1fx.OutputOwners{}, - } - tx0 := &txs.Tx{Unsigned: utx0} - require.NoError(tx0.Initialize(txs.Codec)) - - utx1 := &txs.AddValidatorTx{ - Validator: txs.Validator{ - NodeID: ids.NodeID{1}, - Start: uint64(baseTime.Unix()) + 2, - End: uint64(baseTime.Unix()) + 2, - }, - RewardsOwner: &secp256k1fx.OutputOwners{}, - } - tx1 := &txs.Tx{Unsigned: utx1} - require.NoError(tx1.Initialize(txs.Codec)) - - utx2 := &txs.AddValidatorTx{ - Validator: txs.Validator{ - NodeID: ids.NodeID{1}, - Start: uint64(baseTime.Unix()) + 3, - End: uint64(baseTime.Unix()) + 3, - }, - RewardsOwner: &secp256k1fx.OutputOwners{}, - } - tx2 := &txs.Tx{Unsigned: utx2} - require.NoError(tx2.Initialize(txs.Codec)) - - txHeap.Add(tx2) - require.Equal(utx2.EndTime(), txHeap.Timestamp()) - - txHeap.Add(tx1) - require.Equal(utx1.EndTime(), txHeap.Timestamp()) - - txHeap.Add(tx0) - require.Equal(utx0.EndTime(), txHeap.Timestamp()) - require.Equal(tx0, txHeap.Peek()) -} diff --git a/vms/platformvm/txs/txheap/heap.go b/vms/platformvm/txs/txheap/heap.go index 3727bb891d92..7c9e33d3f989 100644 --- a/vms/platformvm/txs/txheap/heap.go +++ b/vms/platformvm/txs/txheap/heap.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txheap diff --git a/vms/platformvm/txs/txheap/with_metrics.go b/vms/platformvm/txs/txheap/with_metrics.go deleted file mode 100644 index 60ab4f93244d..000000000000 --- a/vms/platformvm/txs/txheap/with_metrics.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package txheap - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -var _ Heap = (*withMetrics)(nil) - -type withMetrics struct { - Heap - - numTxs prometheus.Gauge -} - -func NewWithMetrics( - txHeap Heap, - namespace string, - registerer prometheus.Registerer, -) (Heap, error) { - h := &withMetrics{ - Heap: txHeap, - numTxs: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "count", - Help: "Number of transactions in the heap", - }), - } - return h, registerer.Register(h.numTxs) -} - -func (h *withMetrics) Add(tx *txs.Tx) { - h.Heap.Add(tx) - h.numTxs.Set(float64(h.Heap.Len())) -} - -func (h *withMetrics) Remove(txID ids.ID) *txs.Tx { - tx := h.Heap.Remove(txID) - h.numTxs.Set(float64(h.Heap.Len())) - return tx -} - -func (h *withMetrics) RemoveTop() *txs.Tx { - tx := h.Heap.RemoveTop() - h.numTxs.Set(float64(h.Heap.Len())) - return tx -} diff --git a/vms/platformvm/txs/unsigned_tx.go b/vms/platformvm/txs/unsigned_tx.go index 7fe1702b0197..5b3e62dd1c33 100644 --- a/vms/platformvm/txs/unsigned_tx.go +++ b/vms/platformvm/txs/unsigned_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/platformvm/txs/validator.go b/vms/platformvm/txs/validator.go index ea7d048f5074..726ba23b1c5d 100644 --- a/vms/platformvm/txs/validator.go +++ b/vms/platformvm/txs/validator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -40,11 +40,6 @@ func (v *Validator) EndTime() time.Time { return time.Unix(int64(v.End), 0) } -// Duration is the amount of time that this validator will be in the validator set -func (v *Validator) Duration() time.Duration { - return v.EndTime().Sub(v.StartTime()) -} - // Weight is this validator's weight when sampling func (v *Validator) Weight() uint64 { return v.Wght diff --git a/vms/platformvm/txs/validator_test.go b/vms/platformvm/txs/validator_test.go index 3361d11939b4..0b9e749ca009 100644 --- a/vms/platformvm/txs/validator_test.go +++ b/vms/platformvm/txs/validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -9,22 +9,20 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" ) const defaultWeight = 10000 -// each key controls an address that has [defaultBalance] AVAX at genesis -var keys = secp256k1.TestKeys() - func TestBoundedBy(t *testing.T) { require := require.New(t) + nodeID := ids.GenerateTestNodeID() + // case 1: a starts, a finishes, b starts, b finishes aStartTime := uint64(0) aEndTIme := uint64(1) a := &Validator{ - NodeID: ids.NodeID(keys[0].PublicKey().Address()), + NodeID: nodeID, Start: aStartTime, End: aEndTIme, Wght: defaultWeight, @@ -33,12 +31,12 @@ func TestBoundedBy(t *testing.T) { bStartTime := uint64(2) bEndTime := uint64(3) b := &Validator{ - NodeID: ids.NodeID(keys[0].PublicKey().Address()), + NodeID: nodeID, Start: bStartTime, End: bEndTime, Wght: defaultWeight, } - require.False(BoundedBy(a.StartTime(), b.EndTime(), b.StartTime(), b.EndTime())) + require.False(BoundedBy(a.StartTime(), a.EndTime(), b.StartTime(), b.EndTime())) require.False(BoundedBy(b.StartTime(), b.EndTime(), a.StartTime(), a.EndTime())) // case 2: a starts, b starts, a finishes, b finishes diff --git a/vms/platformvm/txs/visitor.go b/vms/platformvm/txs/visitor.go index 14b33e7d58a8..ea454f33bad1 100644 --- a/vms/platformvm/txs/visitor.go +++ b/vms/platformvm/txs/visitor.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/vms/platformvm/utxo/camino_helpers_test.go b/vms/platformvm/utxo/camino_helpers_test.go index c0f4c0e2c3b8..2cbe4a42d7de 100644 --- a/vms/platformvm/utxo/camino_helpers_test.go +++ b/vms/platformvm/utxo/camino_helpers_test.go @@ -5,12 +5,12 @@ package utxo import ( "testing" + "time" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/logging" @@ -43,7 +43,7 @@ func defaultCaminoHandler(t *testing.T) *caminoHandler { vm := &secp256k1fx.TestVM{ Clk: *clk, Log: logging.NoLog{}, - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), } fx := &secp256k1fx.Fx{} require.NoError(t, fx.InitializeVM(vm)) @@ -51,7 +51,7 @@ func defaultCaminoHandler(t *testing.T) *caminoHandler { return &caminoHandler{ handler: handler{ - ctx: snow.DefaultContextTest(), + ctx: test.Context(t), clk: clk, fx: fx, }, diff --git a/vms/platformvm/utxo/camino_locked_test.go b/vms/platformvm/utxo/camino_locked_test.go index 18ddecced1e6..b3d3281c6326 100644 --- a/vms/platformvm/utxo/camino_locked_test.go +++ b/vms/platformvm/utxo/camino_locked_test.go @@ -16,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -155,7 +154,7 @@ func TestLock(t *testing.T) { require.NoError(t, fx.Bootstrapped()) config := test.Config(t, test.PhaseLast) - ctx := snow.DefaultContextTest() + ctx := test.Context(t) baseDB := versiondb.New(memdb.New()) rewardsCalc := reward.NewCalculator(config.RewardConfig) diff --git a/vms/platformvm/utxo/camino_multisig_test.go b/vms/platformvm/utxo/camino_multisig_test.go index 56a5af26954c..c04ed7cb656a 100644 --- a/vms/platformvm/utxo/camino_multisig_test.go +++ b/vms/platformvm/utxo/camino_multisig_test.go @@ -5,6 +5,7 @@ package utxo import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -83,7 +84,7 @@ func TestUTXOWithMsigVerify(t *testing.T) { func TestUTXOWithMSigSerialized(t *testing.T) { // Create a new codec manager and linear codec instance manager := codec.NewDefaultManager() - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) // Register all relevant types with the codec errs := wrappers.Errs{} diff --git a/vms/platformvm/utxo/handler.go b/vms/platformvm/utxo/handler.go index 1332a7e18b9d..b26e5dcf85d7 100644 --- a/vms/platformvm/utxo/handler.go +++ b/vms/platformvm/utxo/handler.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utxo @@ -573,7 +573,7 @@ func (h *handler) VerifySpendUTXOs( return fmt.Errorf("expected fx.Owned but got %T", out) } owner := owned.Owners() - ownerBytes, err := txs.Codec.Marshal(txs.Version, owner) + ownerBytes, err := txs.Codec.Marshal(txs.CodecVersion, owner) if err != nil { return fmt.Errorf("couldn't marshal owner: %w", err) } @@ -622,7 +622,7 @@ func (h *handler) VerifySpendUTXOs( return fmt.Errorf("expected fx.Owned but got %T", out) } owner := owned.Owners() - ownerBytes, err := txs.Codec.Marshal(txs.Version, owner) + ownerBytes, err := txs.Codec.Marshal(txs.CodecVersion, owner) if err != nil { return fmt.Errorf("couldn't marshal owner: %w", err) } diff --git a/vms/platformvm/utxo/handler_test.go b/vms/platformvm/utxo/handler_test.go index 38e8e857bddd..7e31fb1099ca 100644 --- a/vms/platformvm/utxo/handler_test.go +++ b/vms/platformvm/utxo/handler_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utxo @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -49,8 +49,10 @@ func TestVerifySpendUTXOs(t *testing.T) { require.NoError(t, fx.InitializeVM(&secp256k1fx.TestVM{})) require.NoError(t, fx.Bootstrapped()) + ctx := snowtest.Context(t, snowtest.PChainID) + h := &handler{ - ctx: snow.DefaultContextTest(), + ctx: ctx, clk: &mockable.Clock{}, fx: fx, } diff --git a/vms/platformvm/utxo/mock_verifier.go b/vms/platformvm/utxo/mock_verifier.go index 1f70549e2b22..c8b9c6db15ac 100644 --- a/vms/platformvm/utxo/mock_verifier.go +++ b/vms/platformvm/utxo/mock_verifier.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/utxo (interfaces: Verifier) +// +// Generated by this command: +// +// mockgen -package=utxo -destination=vms/platformvm/utxo/mock_verifier.go github.com/ava-labs/avalanchego/vms/platformvm/utxo Verifier +// // Package utxo is a generated GoMock package. package utxo @@ -53,7 +55,7 @@ func (m *MockVerifier) Unlock(arg0 state.Chain, arg1 []ids.ID, arg2 locked.State } // Unlock indicates an expected call of Unlock. -func (mr *MockVerifierMockRecorder) Unlock(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockVerifierMockRecorder) Unlock(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unlock", reflect.TypeOf((*MockVerifier)(nil).Unlock), arg0, arg1, arg2) } @@ -67,7 +69,7 @@ func (m *MockVerifier) VerifyLock(arg0 txs.UnsignedTx, arg1 avax.UTXOGetter, arg } // VerifyLock indicates an expected call of VerifyLock. -func (mr *MockVerifierMockRecorder) VerifyLock(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { +func (mr *MockVerifierMockRecorder) VerifyLock(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyLock", reflect.TypeOf((*MockVerifier)(nil).VerifyLock), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) } @@ -81,7 +83,7 @@ func (m *MockVerifier) VerifySpend(arg0 txs.UnsignedTx, arg1 avax.UTXOGetter, ar } // VerifySpend indicates an expected call of VerifySpend. -func (mr *MockVerifierMockRecorder) VerifySpend(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { +func (mr *MockVerifierMockRecorder) VerifySpend(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifySpend", reflect.TypeOf((*MockVerifier)(nil).VerifySpend), arg0, arg1, arg2, arg3, arg4, arg5) } @@ -95,7 +97,7 @@ func (m *MockVerifier) VerifySpendUTXOs(arg0 avax.UTXOGetter, arg1 txs.UnsignedT } // VerifySpendUTXOs indicates an expected call of VerifySpendUTXOs. -func (mr *MockVerifierMockRecorder) VerifySpendUTXOs(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { +func (mr *MockVerifierMockRecorder) VerifySpendUTXOs(arg0, arg1, arg2, arg3, arg4, arg5, arg6 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifySpendUTXOs", reflect.TypeOf((*MockVerifier)(nil).VerifySpendUTXOs), arg0, arg1, arg2, arg3, arg4, arg5, arg6) } @@ -109,7 +111,7 @@ func (m *MockVerifier) VerifyUnlockDeposit(arg0 avax.UTXOGetter, arg1 txs.Unsign } // VerifyUnlockDeposit indicates an expected call of VerifyUnlockDeposit. -func (mr *MockVerifierMockRecorder) VerifyUnlockDeposit(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { +func (mr *MockVerifierMockRecorder) VerifyUnlockDeposit(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyUnlockDeposit", reflect.TypeOf((*MockVerifier)(nil).VerifyUnlockDeposit), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } diff --git a/vms/platformvm/validator_set_property_test.go b/vms/platformvm/validator_set_property_test.go index fab84cc6d50b..5ca5bfd6c241 100644 --- a/vms/platformvm/validator_set_property_test.go +++ b/vms/platformvm/validator_set_property_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm @@ -26,6 +26,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" @@ -373,31 +374,15 @@ func addPrimaryValidatorWithoutBLSKey(vm *VM, data *validatorInputData) (*state. } func internalAddValidator(vm *VM, signedTx *txs.Tx) (*state.Staker, error) { - stakerTx := signedTx.Unsigned.(txs.StakerTx) - if err := vm.Builder.AddUnverifiedTx(signedTx); err != nil { - return nil, fmt.Errorf("could not add tx to mempool: %w", err) - } + vm.ctx.Lock.Unlock() + err := vm.issueTx(context.Background(), signedTx) + vm.ctx.Lock.Lock() - blk, err := vm.Builder.BuildBlock(context.Background()) if err != nil { - return nil, fmt.Errorf("failed building block: %w", err) - } - if err := blk.Verify(context.Background()); err != nil { - return nil, fmt.Errorf("failed verifying block: %w", err) - } - if err := blk.Accept(context.Background()); err != nil { - return nil, fmt.Errorf("failed accepting block: %w", err) - } - if err := vm.SetPreference(context.Background(), vm.manager.LastAccepted()); err != nil { - return nil, fmt.Errorf("failed setting preference: %w", err) + return nil, fmt.Errorf("could not add tx to mempool: %w", err) } - // move time ahead, promoting the validator to current - currentTime := stakerTx.StartTime() - vm.clock.Set(currentTime) - vm.state.SetTimestamp(currentTime) - - blk, err = vm.Builder.BuildBlock(context.Background()) + blk, err := vm.Builder.BuildBlock(context.Background()) if err != nil { return nil, fmt.Errorf("failed building block: %w", err) } @@ -411,6 +396,7 @@ func internalAddValidator(vm *VM, signedTx *txs.Tx) (*state.Staker, error) { return nil, fmt.Errorf("failed setting preference: %w", err) } + stakerTx := signedTx.Unsigned.(txs.Staker) return vm.state.GetCurrentValidator(stakerTx.SubnetID(), stakerTx.NodeID()) } @@ -752,7 +738,7 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { atomicDB := prefixdb.New([]byte{1}, baseDB) msgChan := make(chan common.Message, 1) - ctx := defaultContext(t) + ctx := snowtest.Context(t, snowtest.PChainID) m := atomic.NewMemory(atomicDB) ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) @@ -765,7 +751,7 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { return nil } - genesisBytes, err := buildCustomGenesis() + genesisBytes, err := buildCustomGenesis(ctx.AVAXAssetID) if err != nil { return nil, ids.Empty, err } @@ -802,7 +788,10 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { if err != nil { return nil, ids.Empty, err } - if err := vm.Builder.AddUnverifiedTx(testSubnet1); err != nil { + vm.ctx.Lock.Unlock() + err = vm.issueTx(context.Background(), testSubnet1) + vm.ctx.Lock.Lock() + if err != nil { return nil, ids.Empty, err } @@ -823,7 +812,7 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { return vm, testSubnet1.ID(), nil } -func buildCustomGenesis() ([]byte, error) { +func buildCustomGenesis(avaxAssetID ids.ID) ([]byte, error) { genesisUTXOs := make([]api.UTXO, len(keys)) for i, key := range keys { id := key.PublicKey().Address() @@ -841,7 +830,7 @@ func buildCustomGenesis() ([]byte, error) { // won't find next staker to promote/evict from stakers set. Contrary to // what happens with production code we push such validator at the end of // times, so to avoid interference with our tests - nodeID := ids.NodeID(keys[len(keys)-1].PublicKey().Address()) + nodeID := genesisNodeIDs[len(genesisNodeIDs)-1] addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) if err != nil { return nil, err @@ -849,8 +838,8 @@ func buildCustomGenesis() ([]byte, error) { starTime := mockable.MaxTime.Add(-1 * defaultMinStakingDuration) endTime := mockable.MaxTime - genesisValidator := api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidator := api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(starTime.Unix()), EndTime: json.Uint64(endTime.Unix()), NodeID: nodeID, @@ -871,7 +860,7 @@ func buildCustomGenesis() ([]byte, error) { NetworkID: json.Uint32(constants.UnitTestID), AvaxAssetID: avaxAssetID, UTXOs: genesisUTXOs, - Validators: []api.PermissionlessValidator{genesisValidator}, + Validators: []api.GenesisPermissionlessValidator{genesisValidator}, Chains: nil, Time: json.Uint64(defaultGenesisTime.Unix()), InitialSupply: json.Uint64(360 * units.MegaAvax), diff --git a/vms/platformvm/validators/manager.go b/vms/platformvm/validators/manager.go index fb7c314c90a7..2c8b025a128b 100644 --- a/vms/platformvm/validators/manager.go +++ b/vms/platformvm/validators/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators @@ -48,10 +48,6 @@ type State interface { GetLastAccepted() ids.ID GetStatelessBlock(blockID ids.ID) (block.Block, error) - // ApplyCurrentValidators adds all the current validators and delegators of - // [subnetID] into [vdrs]. - ApplyCurrentValidators(subnetID ids.ID, vdrs validators.Manager) error - // ApplyValidatorWeightDiffs iterates from [startHeight] towards the genesis // block until it has applied all of the diffs up to and including // [endHeight]. Applying the diffs modifies [validators]. @@ -346,22 +342,7 @@ func (m *manager) getCurrentValidatorSets( ctx context.Context, subnetID ids.ID, ) (map[ids.NodeID]*validators.GetValidatorOutput, map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { - subnetManager := m.cfg.Validators - if subnetManager.Count(subnetID) == 0 { - // If this subnet isn't tracked, there will not be any registered - // validators. To calculate the current validators we need to first - // fetch them from state. We generate a new manager as we don't want to - // modify that long-lived reference. - // - // TODO: remove this once all subnets are included in the validator - // manager. - subnetManager = validators.NewManager() - if err := m.state.ApplyCurrentValidators(subnetID, subnetManager); err != nil { - return nil, nil, 0, err - } - } - - subnetMap := subnetManager.GetMap(subnetID) + subnetMap := m.cfg.Validators.GetMap(subnetID) primaryMap := m.cfg.Validators.GetMap(constants.PrimaryNetworkID) currentHeight, err := m.getCurrentHeight(ctx) return subnetMap, primaryMap, currentHeight, err diff --git a/vms/platformvm/validators/manager_benchmark_test.go b/vms/platformvm/validators/manager_benchmark_test.go index 54d0e264e63e..7c84589574df 100644 --- a/vms/platformvm/validators/manager_benchmark_test.go +++ b/vms/platformvm/validators/manager_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/formatting" @@ -66,8 +65,8 @@ func BenchmarkGetValidatorSet(b *testing.B) { addr, err := address.FormatBech32(constants.UnitTestHRP, ids.GenerateTestShortID().Bytes()) require.NoError(err) - genesisValidators := []api.PermissionlessValidator{{ - Staker: api.Staker{ + genesisValidators := []api.GenesisPermissionlessValidator{{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(genesisTime.Unix()), EndTime: json.Uint64(genesisEndTime.Unix()), NodeID: ids.GenerateTestNodeID(), @@ -129,7 +128,6 @@ func BenchmarkGetValidatorSet(b *testing.B) { MintingPeriod: 365 * 24 * time.Hour, SupplyCap: 720 * units.MegaAvax, }), - new(utils.Atomic[bool]), ) require.NoError(err) diff --git a/vms/platformvm/validators/test_manager.go b/vms/platformvm/validators/test_manager.go index d7ffe993248e..e04742f265c7 100644 --- a/vms/platformvm/validators/test_manager.go +++ b/vms/platformvm/validators/test_manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 6a569f147f4c..1e1c85cde968 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -8,15 +8,19 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm import ( "context" + "errors" "fmt" + "math" "net/http" + "sync" + "time" "github.com/gorilla/rpc/v2" @@ -41,11 +45,11 @@ import ( "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/api" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" + "github.com/ava-labs/avalanchego/vms/platformvm/network" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" @@ -71,6 +75,7 @@ var ( type VM struct { config.Config blockbuilder.Builder + network.Network validators.State metrics metrics.Metrics @@ -96,6 +101,12 @@ type VM struct { txBuilder txbuilder.CaminoBuilder manager blockexecutor.Manager + // Cancelled on shutdown + onShutdownCtx context.Context + // Call [onShutdownCtxCancel] to cancel [onShutdownCtx] during Shutdown() + onShutdownCtxCancel context.CancelFunc + awaitShutdown sync.WaitGroup + // TODO: Remove after v1.11.x is activated pruned utils.Atomic[bool] } @@ -135,7 +146,8 @@ func (vm *VM) Initialize( vm.ctx = chainCtx vm.db = db - vm.codecRegistry = linearcodec.NewCaminoDefault() + // Note: this codec is never used to serialize anything + vm.codecRegistry = linearcodec.NewCaminoDefault(time.Time{}) vm.fx = &secp256k1fx.CaminoFx{} if err := vm.fx.Initialize(vm); err != nil { return err @@ -152,7 +164,6 @@ func (vm *VM) Initialize( vm.ctx, vm.metrics, rewards, - &vm.bootstrapped, ) if err != nil { return err @@ -187,9 +198,7 @@ func (vm *VM) Initialize( Bootstrapped: &vm.bootstrapped, } - // Note: There is a circular dependency between the mempool and block - // builder which is broken by passing in the vm. - mempool, err := mempool.NewMempool("mempool", registerer, vm) + mempool, err := mempool.New("mempool", registerer, toEngine) if err != nil { return fmt.Errorf("failed to create mempool: %w", err) } @@ -201,13 +210,43 @@ func (vm *VM) Initialize( txExecutorBackend, validatorManager, ) - vm.Builder = blockbuilder.CaminoNew( + + txVerifier := network.NewLockedTxVerifier(&txExecutorBackend.Ctx.Lock, vm.manager) + vm.Network, err = network.NewCamino( + chainCtx.Log, + chainCtx.NodeID, + chainCtx.SubnetID, + validators.NewLockedState( + &chainCtx.Lock, + validatorManager, + ), + txVerifier, + mempool, + txExecutorBackend.Config.PartialSyncPrimaryNetwork, + appSender, + registerer, + execConfig.Network, + vm.txBuilder, + &txExecutorBackend.Ctx.Lock, + ) + if err != nil { + return fmt.Errorf("failed to initialize network: %w", err) + } + + vm.onShutdownCtx, vm.onShutdownCtxCancel = context.WithCancel(context.Background()) + vm.awaitShutdown.Add(1) + go func() { + defer vm.awaitShutdown.Done() + + // Invariant: Gossip must never grab the context lock. + vm.Network.Gossip(vm.onShutdownCtx) + }() + + vm.Builder = blockbuilder.New( mempool, vm.txBuilder, txExecutorBackend, vm.manager, - toEngine, - appSender, ) // Create all of the chains that the database says exist @@ -226,6 +265,10 @@ func (vm *VM) Initialize( return err } + // Incrementing [awaitShutdown] would cause a deadlock since + // [periodicallyPruneMempool] grabs the context lock. + go vm.periodicallyPruneMempool(execConfig.MempoolPruneFrequency) + shouldPrune, err := vm.state.ShouldPrune() if err != nil { return fmt.Errorf( @@ -253,6 +296,49 @@ func (vm *VM) Initialize( return nil } +func (vm *VM) periodicallyPruneMempool(frequency time.Duration) { + ticker := time.NewTicker(frequency) + defer ticker.Stop() + + for { + select { + case <-vm.onShutdownCtx.Done(): + return + case <-ticker.C: + if err := vm.pruneMempool(); err != nil { + vm.ctx.Log.Debug("pruning mempool failed", + zap.Error(err), + ) + } + } + } +} + +func (vm *VM) pruneMempool() error { + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + // Packing all of the transactions in order performs additional checks that + // the MempoolTxVerifier doesn't include. So, evicting transactions from + // here is expected to happen occasionally. + blockTxs, err := vm.Builder.PackBlockTxs(math.MaxInt) + if err != nil { + return err + } + + for _, tx := range blockTxs { + if err := vm.Builder.Add(tx); err != nil { + vm.ctx.Log.Debug( + "failed to reissue tx", + zap.Stringer("txID", tx.ID()), + zap.Error(err), + ) + } + } + + return nil +} + // Create all chains that exist that this node validates. func (vm *VM) initBlockchains() error { if vm.Config.PartialSyncPrimaryNetwork { @@ -315,17 +401,21 @@ func (vm *VM) onNormalOperationsStarted() error { } primaryVdrIDs := vm.Validators.GetValidatorIDs(constants.PrimaryNetworkID) - if err := vm.uptimeManager.StartTracking(primaryVdrIDs, constants.PrimaryNetworkID); err != nil { return err } + vl := validators.NewLogger(vm.ctx.Log, constants.PrimaryNetworkID, vm.ctx.NodeID) + vm.Validators.RegisterCallbackListener(constants.PrimaryNetworkID, vl) + for subnetID := range vm.TrackedSubnets { vdrIDs := vm.Validators.GetValidatorIDs(subnetID) - if err := vm.uptimeManager.StartTracking(vdrIDs, subnetID); err != nil { return err } + + vl := validators.NewLogger(vm.ctx.Log, subnetID, vm.ctx.NodeID) + vm.Validators.RegisterCallbackListener(subnetID, vl) } if err := vm.state.Commit(); err != nil { @@ -333,7 +423,7 @@ func (vm *VM) onNormalOperationsStarted() error { } // Start the block builder - vm.Builder.ResetBlockTimer() + vm.Builder.StartBlockTimer() return nil } @@ -354,7 +444,10 @@ func (vm *VM) Shutdown(context.Context) error { return nil } - vm.Builder.Shutdown() + vm.onShutdownCtxCancel() + vm.awaitShutdown.Wait() + + vm.Builder.ShutdownBlockTimer() if vm.bootstrapped.Get() { primaryVdrIDs := vm.Validators.GetValidatorIDs(constants.PrimaryNetworkID) @@ -405,7 +498,9 @@ func (vm *VM) LastAccepted(context.Context) (ids.ID, error) { // SetPreference sets the preferred block to be the one with ID [blkID] func (vm *VM) SetPreference(_ context.Context, blkID ids.ID) error { - vm.Builder.SetPreference(blkID) + if vm.manager.SetPreference(blkID) { + vm.Builder.ResetBlockTimer() + } return nil } @@ -437,18 +532,6 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { }, err } -// CreateStaticHandlers returns a map where: -// * keys are API endpoint extensions -// * values are API handlers -func (*VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, error) { - server := rpc.NewServer() - server.RegisterCodec(json.NewCodec(), "application/json") - server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") - return map[string]http.Handler{ - "": server, - }, server.RegisterService(&api.StaticService{}, "platform") -} - func (vm *VM) Connected(_ context.Context, nodeID ids.NodeID, _ *version.Application) error { return vm.uptimeManager.Connect(nodeID, constants.PrimaryNetworkID) } @@ -487,3 +570,16 @@ func (vm *VM) VerifyHeightIndex(_ context.Context) error { func (vm *VM) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, error) { return vm.state.GetBlockIDAtHeight(height) } + +func (vm *VM) issueTx(ctx context.Context, tx *txs.Tx) error { + err := vm.Network.IssueTx(ctx, tx) + if err != nil && !errors.Is(err, mempool.ErrDuplicateTx) { + vm.ctx.Log.Debug("failed to add tx to mempool", + zap.Stringer("txID", tx.ID()), + zap.Error(err), + ) + return err + } + + return nil +} diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index 8416c4114662..f4e84d0776ae 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm @@ -14,25 +14,33 @@ import ( "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" + "github.com/ava-labs/avalanchego/vms/platformvm/network" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/state" @@ -46,12 +54,9 @@ import ( func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() validatorStartTime := vm.clock.Time().Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) @@ -73,7 +78,9 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -105,7 +112,9 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(addFirstDelegatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addFirstDelegatorTx)) + vm.ctx.Lock.Lock() addFirstDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -139,7 +148,9 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(addSecondDelegatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addSecondDelegatorTx)) + vm.ctx.Lock.Lock() addSecondDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -163,12 +174,14 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require.NoError(err) // trigger block creation - err = vm.Builder.AddUnverifiedTx(addThirdDelegatorTx) + vm.ctx.Lock.Unlock() + err = vm.issueTx(context.Background(), addThirdDelegatorTx) require.ErrorIs(err, executor.ErrOverDelegated) + vm.ctx.Lock.Lock() } func TestAddDelegatorTxHeapCorruption(t *testing.T) { - validatorStartTime := banffForkTime.Add(executor.SyncBound).Add(1 * time.Second) + validatorStartTime := latestForkTime.Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) validatorStake := defaultMaxValidatorStake / 5 @@ -206,20 +219,17 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, apricotPhase3) vm.ApricotPhase3Time = test.ap3Time vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() key, err := secp256k1.NewPrivateKey() require.NoError(err) id := key.PublicKey().Address() + nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() // create valid tx @@ -227,7 +237,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { validatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, id, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0], keys[1]}, @@ -236,7 +246,9 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // issue the add validator tx - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -250,7 +262,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { delegator1Stake, uint64(delegator1StartTime.Unix()), uint64(delegator1EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -258,7 +270,9 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // issue the first add delegator tx - require.NoError(vm.Builder.AddUnverifiedTx(addFirstDelegatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addFirstDelegatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the first add delegator tx addFirstDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -272,7 +286,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { delegator2Stake, uint64(delegator2StartTime.Unix()), uint64(delegator2EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -280,7 +294,9 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // issue the second add delegator tx - require.NoError(vm.Builder.AddUnverifiedTx(addSecondDelegatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addSecondDelegatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the second add delegator tx addSecondDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -294,7 +310,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { delegator3Stake, uint64(delegator3StartTime.Unix()), uint64(delegator3EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -302,7 +318,9 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // issue the third add delegator tx - require.NoError(vm.Builder.AddUnverifiedTx(addThirdDelegatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addThirdDelegatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the third add delegator tx addThirdDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -316,7 +334,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { delegator4Stake, uint64(delegator4StartTime.Unix()), uint64(delegator4EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -324,7 +342,9 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // issue the fourth add delegator tx - require.NoError(vm.Builder.AddUnverifiedTx(addFourthDelegatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addFourthDelegatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the fourth add delegator tx addFourthDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -340,7 +360,6 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { // panic. func TestUnverifiedParentPanicRegression(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis(t) baseDB := memdb.New() atomicDB := prefixdb.New([]byte{1}, baseDB) @@ -352,16 +371,20 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: mockable.MaxTime, + DurangoTime: mockable.MaxTime, }} - ctx := defaultContext(t) + ctx := snowtest.Context(t, snowtest.PChainID) ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() + _, genesisBytes := defaultGenesis(t, ctx.AVAXAssetID) + msgChan := make(chan common.Message, 1) require.NoError(vm.Initialize( context.Background(), @@ -379,8 +402,8 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { vm.ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) // set time to post Banff fork - vm.clock.Set(banffForkTime.Add(time.Second)) - vm.state.SetTimestamp(banffForkTime.Add(time.Second)) + vm.clock.Set(latestForkTime.Add(time.Second)) + vm.state.SetTimestamp(latestForkTime.Add(time.Second)) key0 := keys[0] key1 := keys[1] @@ -411,11 +434,10 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { ) require.NoError(err) - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - preferredChainTime := preferred.Timestamp() - preferredID := preferred.ID() preferredHeight := preferred.Height() statelessStandardBlk, err := block.NewBanffStandardBlock( @@ -464,29 +486,21 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { require := require.New(t) - vm, baseDB, mutableSharedMemory := defaultVM(t) + vm, baseDB, mutableSharedMemory := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() + nodeID := ids.GenerateTestNodeID() newValidatorStartTime := vm.clock.Time().Add(executor.SyncBound).Add(1 * time.Second) newValidatorEndTime := newValidatorStartTime.Add(defaultMinStakingDuration) - key, err := secp256k1.NewPrivateKey() - require.NoError(err) - - nodeID := ids.NodeID(key.PublicKey().Address()) - // Create the tx to add a new validator addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( vm.MinValidatorStake, uint64(newValidatorStartTime.Unix()), uint64(newValidatorEndTime.Unix()), nodeID, - ids.ShortID(nodeID), + ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, @@ -494,11 +508,10 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { require.NoError(err) // Create the standard block to add the new validator - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - preferredChainTime := preferred.Timestamp() - preferredID := preferred.ID() preferredHeight := preferred.Height() statelessBlk, err := block.NewBanffStandardBlock( @@ -589,7 +602,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { mutableSharedMemory.SharedMemory = m.NewSharedMemory(vm.ctx.ChainID) peerSharedMemory := m.NewSharedMemory(vm.ctx.XChainID) - utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) require.NoError(err) inputID := utxo.InputID() @@ -659,7 +672,6 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { vm.ctx, metrics.Noop, reward.NewCalculator(vm.Config.RewardConfig), - &utils.Atomic[bool]{}, ) require.NoError(err) @@ -679,20 +691,16 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require := require.New(t) - vm, baseDB, mutableSharedMemory := defaultVM(t) + vm, baseDB, mutableSharedMemory := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() vm.state.SetCurrentSupply(constants.PrimaryNetworkID, defaultRewardConfig.SupplyCap/2) newValidatorStartTime0 := vm.clock.Time().Add(executor.SyncBound).Add(1 * time.Second) newValidatorEndTime0 := newValidatorStartTime0.Add(defaultMaxStakingDuration) - nodeID0 := ids.NodeID(ids.GenerateTestShortID()) + nodeID0 := ids.GenerateTestNodeID() // Create the tx to add the first new validator addValidatorTx0, err := vm.txBuilder.NewAddValidatorTx( @@ -700,7 +708,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { uint64(newValidatorStartTime0.Unix()), uint64(newValidatorEndTime0.Unix()), nodeID0, - ids.ShortID(nodeID0), + ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, @@ -708,11 +716,10 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require.NoError(err) // Create the standard block to add the first new validator - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - preferredChainTime := preferred.Timestamp() - preferredID := preferred.ID() preferredHeight := preferred.Height() statelessAddValidatorStandardBlk0, err := block.NewBanffStandardBlock( @@ -837,7 +844,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { mutableSharedMemory.SharedMemory = m.NewSharedMemory(vm.ctx.ChainID) peerSharedMemory := m.NewSharedMemory(vm.ctx.XChainID) - utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) require.NoError(err) inputID := utxo.InputID() @@ -865,7 +872,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { newValidatorStartTime1 := newValidatorStartTime0.Add(executor.SyncBound).Add(1 * time.Second) newValidatorEndTime1 := newValidatorStartTime1.Add(defaultMaxStakingDuration) - nodeID1 := ids.NodeID(ids.GenerateTestShortID()) + nodeID1 := ids.GenerateTestNodeID() // Create the tx to add the second new validator addValidatorTx1, err := vm.txBuilder.NewAddValidatorTx( @@ -873,7 +880,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { uint64(newValidatorStartTime1.Unix()), uint64(newValidatorEndTime1.Unix()), nodeID1, - ids.ShortID(nodeID1), + ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[1]}, ids.ShortEmpty, @@ -968,7 +975,6 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { vm.ctx, metrics.Noop, reward.NewCalculator(vm.Config.RewardConfig), - &utils.Atomic[bool]{}, ) require.NoError(err) @@ -997,30 +1003,20 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - - vm.ctx.Lock.Unlock() - }() - - nodeID0 := ids.NodeID(keys[0].PublicKey().Address()) - nodeID1 := ids.NodeID(keys[1].PublicKey().Address()) - nodeID2 := ids.NodeID(keys[2].PublicKey().Address()) - nodeID3 := ids.NodeID(keys[3].PublicKey().Address()) - nodeID4 := ids.NodeID(keys[4].PublicKey().Address()) + defer vm.ctx.Lock.Unlock() currentHeight, err := vm.GetCurrentHeight(context.Background()) require.NoError(err) require.Equal(uint64(1), currentHeight) expectedValidators1 := map[ids.NodeID]uint64{ - nodeID0: defaultWeight, - nodeID1: defaultWeight, - nodeID2: defaultWeight, - nodeID3: defaultWeight, - nodeID4: defaultWeight, + genesisNodeIDs[0]: defaultWeight, + genesisNodeIDs[1]: defaultWeight, + genesisNodeIDs[2]: defaultWeight, + genesisNodeIDs[3]: defaultWeight, + genesisNodeIDs[4]: defaultWeight, } validators, err := vm.GetValidatorSet(context.Background(), 1, constants.PrimaryNetworkID) require.NoError(err) @@ -1031,14 +1027,14 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { newValidatorStartTime0 := vm.clock.Time().Add(executor.SyncBound).Add(1 * time.Second) newValidatorEndTime0 := newValidatorStartTime0.Add(defaultMaxStakingDuration) - nodeID5 := ids.GenerateTestNodeID() + extraNodeID := ids.GenerateTestNodeID() // Create the tx to add the first new validator addValidatorTx0, err := vm.txBuilder.NewAddValidatorTx( vm.MaxValidatorStake, uint64(newValidatorStartTime0.Unix()), uint64(newValidatorEndTime0.Unix()), - nodeID5, + extraNodeID, ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, @@ -1047,11 +1043,10 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { require.NoError(err) // Create the standard block to add the first new validator - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - preferredChainTime := preferred.Timestamp() - preferredID := preferred.ID() preferredHeight := preferred.Height() statelessStandardBlk, err := block.NewBanffStandardBlock( @@ -1084,7 +1079,8 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { // Create the standard block that moves the first new validator from the // pending validator set into the current validator set. - preferred, err = vm.Builder.Preferred() + preferredID = vm.manager.Preferred() + preferred, err = vm.manager.GetBlock(preferredID) require.NoError(err) preferredID = preferred.ID() preferredHeight = preferred.Height() @@ -1114,12 +1110,12 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { } expectedValidators2 := map[ids.NodeID]uint64{ - nodeID0: defaultWeight, - nodeID1: defaultWeight, - nodeID2: defaultWeight, - nodeID3: defaultWeight, - nodeID4: defaultWeight, - nodeID5: vm.MaxValidatorStake, + genesisNodeIDs[0]: defaultWeight, + genesisNodeIDs[1]: defaultWeight, + genesisNodeIDs[2]: defaultWeight, + genesisNodeIDs[3]: defaultWeight, + genesisNodeIDs[4]: defaultWeight, + extraNodeID: vm.MaxValidatorStake, } validators, err = vm.GetValidatorSet(context.Background(), 3, constants.PrimaryNetworkID) require.NoError(err) @@ -1131,7 +1127,7 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require := require.New(t) - validatorStartTime := banffForkTime.Add(executor.SyncBound).Add(1 * time.Second) + validatorStartTime := latestForkTime.Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) validatorStake := defaultMaxValidatorStake / 5 @@ -1143,19 +1139,15 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { delegator2EndTime := delegator2StartTime.Add(3 * defaultMinStakingDuration) delegator2Stake := defaultMaxValidatorStake - validatorStake - vm, _, _ := defaultVM(t) - + vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() key, err := secp256k1.NewPrivateKey() require.NoError(err) - id := key.PublicKey().Address() + id := key.Address() + nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() // create valid tx @@ -1163,7 +1155,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { validatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, id, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0], keys[1]}, @@ -1172,7 +1164,9 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require.NoError(err) // issue the add validator tx - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1186,7 +1180,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { delegator1Stake, uint64(delegator1StartTime.Unix()), uint64(delegator1EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -1194,7 +1188,9 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require.NoError(err) // issue the first add delegator tx - require.NoError(vm.Builder.AddUnverifiedTx(addFirstDelegatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addFirstDelegatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the first add delegator tx addFirstDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1208,7 +1204,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { delegator2Stake, uint64(delegator2StartTime.Unix()), uint64(delegator2EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -1217,36 +1213,34 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { // attempting to issue the second add delegator tx should fail because the // total stake weight would go over the limit. - err = vm.Builder.AddUnverifiedTx(addSecondDelegatorTx) + vm.ctx.Lock.Unlock() + err = vm.issueTx(context.Background(), addSecondDelegatorTx) require.ErrorIs(err, executor.ErrOverDelegated) + vm.ctx.Lock.Lock() } func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t *testing.T) { require := require.New(t) - validatorStartTime := banffForkTime.Add(executor.SyncBound).Add(1 * time.Second) + validatorStartTime := latestForkTime.Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) - vm, _, _ := defaultVM(t) - + vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() key, err := secp256k1.NewPrivateKey() require.NoError(err) - id := key.PublicKey().Address() + id := key.Address() + nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, id, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0], keys[1]}, @@ -1254,7 +1248,9 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1271,7 +1267,9 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(createSubnetTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), createSubnetTx)) + vm.ctx.Lock.Lock() // trigger block creation for the subnet tx createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1284,14 +1282,16 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(addSubnetValidatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addSubnetValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx addSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1309,7 +1309,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t require.Empty(emptyValidatorSet) removeSubnetValidatorTx, err := vm.txBuilder.NewRemoveSubnetValidatorTx( - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -1320,7 +1320,9 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t // validator set into the current validator set. vm.clock.Set(validatorStartTime) - require.NoError(vm.Builder.AddUnverifiedTx(removeSubnetValidatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), removeSubnetValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx removeSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1341,29 +1343,25 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *testing.T) { require := require.New(t) - validatorStartTime := banffForkTime.Add(executor.SyncBound).Add(1 * time.Second) + validatorStartTime := latestForkTime.Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) - vm, _, _ := defaultVM(t) - + vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() key, err := secp256k1.NewPrivateKey() require.NoError(err) id := key.PublicKey().Address() + nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, id, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0], keys[1]}, @@ -1371,7 +1369,9 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1388,7 +1388,9 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(createSubnetTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), createSubnetTx)) + vm.ctx.Lock.Lock() // trigger block creation for the subnet tx createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1397,21 +1399,20 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t require.NoError(createSubnetBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - vm.TrackedSubnets.Add(createSubnetTx.ID()) - require.NoError(vm.state.ApplyCurrentValidators(createSubnetTx.ID(), vm.Validators)) - addSubnetValidatorTx, err := vm.txBuilder.NewAddSubnetValidatorTx( defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(addSubnetValidatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addSubnetValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx addSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1421,7 +1422,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) removeSubnetValidatorTx, err := vm.txBuilder.NewRemoveSubnetValidatorTx( - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -1432,7 +1433,9 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t // validator set into the current validator set. vm.clock.Set(validatorStartTime) - require.NoError(vm.Builder.AddUnverifiedTx(removeSubnetValidatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), removeSubnetValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx removeSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1447,13 +1450,10 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { // setup require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) + defer vm.ctx.Lock.Unlock() - vm.ctx.Lock.Unlock() - }() subnetID := testSubnet1.TxID // setup time @@ -1526,7 +1526,9 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { require.NoError(err) require.NoError(primaryTx.SyntacticVerify(vm.ctx)) - require.NoError(vm.Builder.AddUnverifiedTx(primaryTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), primaryTx)) + vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting primary validator to current @@ -1553,7 +1555,9 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(subnetTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), subnetTx)) + vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting the subnet validator to current @@ -1657,7 +1661,9 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { require.NoError(err) require.NoError(uPrimaryRestartTx.SyntacticVerify(vm.ctx)) - require.NoError(vm.Builder.AddUnverifiedTx(primaryRestartTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), primaryRestartTx)) + vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting restarted primary validator to current @@ -1729,13 +1735,9 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { // setup require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // setup time currentTime := defaultGenesisTime @@ -1765,7 +1767,9 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(primaryTx1)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), primaryTx1)) + vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting primary validator to current @@ -1857,7 +1861,9 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { require.NoError(err) require.NoError(uPrimaryRestartTx.SyntacticVerify(vm.ctx)) - require.NoError(vm.Builder.AddUnverifiedTx(primaryRestartTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), primaryRestartTx)) + vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting restarted primary validator to current @@ -1889,13 +1895,10 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { // setup require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) + defer vm.ctx.Lock.Unlock() - vm.ctx.Lock.Unlock() - }() subnetID := testSubnet1.TxID // setup time @@ -1928,7 +1931,9 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(primaryTx1)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), primaryTx1)) + vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting primary validator to current @@ -1955,7 +1960,9 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(subnetTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), subnetTx)) + vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting the subnet validator to current @@ -2059,7 +2066,9 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { require.NoError(err) require.NoError(uPrimaryRestartTx.SyntacticVerify(vm.ctx)) - require.NoError(vm.Builder.AddUnverifiedTx(primaryRestartTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), primaryRestartTx)) + vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting restarted primary validator to current @@ -2100,13 +2109,10 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { // setup require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) + defer vm.ctx.Lock.Unlock() - vm.ctx.Lock.Unlock() - }() subnetID := testSubnet1.TxID // setup time @@ -2137,7 +2143,9 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(primaryTx1)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), primaryTx1)) + vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting primary validator to current @@ -2161,7 +2169,9 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(subnetTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), subnetTx)) + vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting the subnet validator to current @@ -2215,6 +2225,72 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { require.NoError(err) } +func TestValidatorSetRaceCondition(t *testing.T) { + require := require.New(t) + vm, _, _ := defaultVM(t, cortinaFork) + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + nodeID := ids.GenerateTestNodeID() + require.NoError(vm.Connected(context.Background(), nodeID, version.CurrentApp)) + + protocolAppRequestBytest, err := gossip.MarshalAppRequest( + bloom.EmptyFilter.Marshal(), + ids.Empty[:], + ) + require.NoError(err) + + appRequestBytes := p2p.PrefixMessage( + p2p.ProtocolPrefix(network.TxGossipHandlerID), + protocolAppRequestBytest, + ) + + var ( + eg errgroup.Group + ctx, cancel = context.WithCancel(context.Background()) + ) + // keep 10 workers running + for i := 0; i < 10; i++ { + eg.Go(func() error { + for ctx.Err() == nil { + err := vm.AppRequest( + context.Background(), + nodeID, + 0, + time.Now().Add(time.Hour), + appRequestBytes, + ) + if err != nil { + return err + } + } + return nil + }) + } + + // If the validator set lock isn't held, the race detector should fail here. + for i := uint64(0); i < 1000; i++ { + blk, err := block.NewBanffStandardBlock( + time.Now(), + vm.state.GetLastAccepted(), + i, + nil, + ) + require.NoError(err) + + vm.state.SetLastAccepted(blk.ID()) + vm.state.SetHeight(blk.Height()) + vm.state.AddStatelessBlock(blk) + } + + // If the validator set lock is grabbed, we need to make sure to release the + // lock to avoid a deadlock. + vm.ctx.Lock.Unlock() + cancel() // stop and wait for workers + require.NoError(eg.Wait()) + vm.ctx.Lock.Lock() +} + func buildAndAcceptStandardBlock(vm *VM) error { blk, err := vm.Builder.BuildBlock(context.Background()) if err != nil { diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index ea8d43891dd6..7cc53bb2320d 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm @@ -6,7 +6,7 @@ package platformvm import ( "bytes" "context" - "errors" + "fmt" "testing" "time" @@ -34,6 +34,7 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/sender" "github.com/ava-labs/avalanchego/snow/networking/timeout" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/subnets" @@ -47,6 +48,7 @@ import ( "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -68,7 +70,19 @@ import ( txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" ) -const defaultWeight uint64 = 10000 +type activeFork uint8 + +const ( + apricotPhase3 activeFork = iota + apricotPhase5 + banffFork + cortinaFork + durangoFork + + latestFork activeFork = durangoFork + + defaultWeight uint64 = 10000 +) var ( defaultMinStakingDuration = 24 * time.Hour @@ -81,9 +95,6 @@ var ( SupplyCap: 720 * units.MegaAvax, } - // AVAX asset ID in tests - avaxAssetID = ids.ID{'y', 'e', 'e', 't'} - defaultTxFee = uint64(100) // chain timestamp at genesis @@ -95,73 +106,44 @@ var ( // time that genesis validators stop validating defaultValidateEndTime = defaultValidateStartTime.Add(10 * defaultMinStakingDuration) - banffForkTime = defaultValidateEndTime.Add(-5 * defaultMinStakingDuration) + latestForkTime = defaultGenesisTime.Add(time.Second) // each key controls an address that has [defaultBalance] AVAX at genesis keys = secp256k1.TestKeys() - defaultMinValidatorStake = 5 * units.MilliAvax - defaultMaxValidatorStake = 500 * units.MilliAvax + // Node IDs of genesis validators. Initialized in init function + genesisNodeIDs []ids.NodeID defaultMinDelegatorStake = 1 * units.MilliAvax - - // amount all genesis validators have in defaultVM - defaultBalance = 100 * defaultMinValidatorStake + defaultMinValidatorStake = 5 * defaultMinDelegatorStake + defaultMaxValidatorStake = 100 * defaultMinValidatorStake + defaultBalance = 2 * defaultMaxValidatorStake // amount all genesis validators have in defaultVM // subnet that exists at genesis in defaultVM // Its controlKeys are keys[0], keys[1], keys[2] // Its threshold is 2 testSubnet1 *txs.Tx testSubnet1ControlKeys = keys[0:3] +) - xChainID = ids.Empty.Prefix(0) - cChainID = ids.Empty.Prefix(1) +func init() { + for _, key := range keys { + // TODO: use ids.GenerateTestNodeID() instead of ids.BuildTestNodeID + // Can be done when TestGetState is refactored + nodeBytes := key.PublicKey().Address() + nodeID := ids.BuildTestNodeID(nodeBytes[:]) - errMissing = errors.New("missing") -) + genesisNodeIDs = append(genesisNodeIDs, nodeID) + } +} type mutableSharedMemory struct { atomic.SharedMemory } -func defaultContext(t *testing.T) *snow.Context { - require := require.New(t) - - ctx := snow.DefaultContextTest() - ctx.NetworkID = constants.UnitTestID - ctx.XChainID = xChainID - ctx.CChainID = cChainID - ctx.AVAXAssetID = avaxAssetID - aliaser := ids.NewAliaser() - - require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) - require.NoError(aliaser.Alias(constants.PlatformChainID, constants.PlatformChainID.String())) - require.NoError(aliaser.Alias(xChainID, "X")) - require.NoError(aliaser.Alias(xChainID, xChainID.String())) - require.NoError(aliaser.Alias(cChainID, "C")) - require.NoError(aliaser.Alias(cChainID, cChainID.String())) - - ctx.BCLookup = aliaser - - ctx.ValidatorState = &validators.TestState{ - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - constants.PlatformChainID: constants.PrimaryNetworkID, - xChainID: constants.PrimaryNetworkID, - cChainID: constants.PrimaryNetworkID, - }[chainID] - if !ok { - return ids.Empty, errMissing - } - return subnetID, nil - }, - } - return ctx -} - // Returns: // 1) The genesis state // 2) The byte representation of the default genesis for tests -func defaultGenesis(t *testing.T) (*api.BuildGenesisArgs, []byte) { +func defaultGenesis(t *testing.T, avaxAssetID ids.ID) (*api.BuildGenesisArgs, []byte) { require := require.New(t) genesisUTXOs := make([]api.UTXO, len(keys)) @@ -175,13 +157,12 @@ func defaultGenesis(t *testing.T) (*api.BuildGenesisArgs, []byte) { } } - genesisValidators := make([]api.PermissionlessValidator, len(keys)) - for i, key := range keys { - nodeID := ids.NodeID(key.PublicKey().Address()) + genesisValidators := make([]api.GenesisPermissionlessValidator, len(genesisNodeIDs)) + for i, nodeID := range genesisNodeIDs { addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidators[i] = api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), EndTime: json.Uint64(defaultValidateEndTime.Unix()), NodeID: nodeID, @@ -219,82 +200,38 @@ func defaultGenesis(t *testing.T) (*api.BuildGenesisArgs, []byte) { return &buildGenesisArgs, genesisBytes } -// Returns: -// 1) The genesis state -// 2) The byte representation of the default genesis for tests -func BuildGenesisTest(t *testing.T) (*api.BuildGenesisArgs, []byte) { - return BuildGenesisTestWithArgs(t, nil) -} - -// Returns: -// 1) The genesis state -// 2) The byte representation of the default genesis for tests -func BuildGenesisTestWithArgs(t *testing.T, args *api.BuildGenesisArgs) (*api.BuildGenesisArgs, []byte) { +func defaultVM(t *testing.T, fork activeFork) (*VM, database.Database, *mutableSharedMemory) { require := require.New(t) - genesisUTXOs := make([]api.UTXO, len(keys)) - for i, key := range keys { - id := key.PublicKey().Address() - addr, err := address.FormatBech32(constants.UnitTestHRP, id.Bytes()) - require.NoError(err) - - genesisUTXOs[i] = api.UTXO{ - Amount: json.Uint64(defaultBalance), - Address: addr, - } - } - - genesisValidators := make([]api.PermissionlessValidator, len(keys)) - for i, key := range keys { - nodeID := ids.NodeID(key.PublicKey().Address()) - addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) - require.NoError(err) - - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ - StartTime: json.Uint64(defaultValidateStartTime.Unix()), - EndTime: json.Uint64(defaultValidateEndTime.Unix()), - NodeID: nodeID, - }, - RewardOwner: &api.Owner{ - Threshold: 1, - Addresses: []string{addr}, - }, - Staked: []api.UTXO{{ - Amount: json.Uint64(defaultWeight), - Address: addr, - }}, - DelegationFee: reward.PercentDenominator, - } - } - - buildGenesisArgs := api.BuildGenesisArgs{ - NetworkID: json.Uint32(constants.UnitTestID), - AvaxAssetID: avaxAssetID, - UTXOs: genesisUTXOs, - Validators: genesisValidators, - Chains: nil, - Time: json.Uint64(defaultGenesisTime.Unix()), - InitialSupply: json.Uint64(360 * units.MegaAvax), - Encoding: formatting.Hex, - } + var ( + apricotPhase3Time = mockable.MaxTime + apricotPhase5Time = mockable.MaxTime + banffTime = mockable.MaxTime + cortinaTime = mockable.MaxTime + durangoTime = mockable.MaxTime + ) - if args != nil { - buildGenesisArgs = *args + // always reset latestForkTime (a package level variable) + // to ensure test independence + latestForkTime = defaultGenesisTime.Add(time.Second) + switch fork { + case durangoFork: + durangoTime = latestForkTime + fallthrough + case cortinaFork: + cortinaTime = latestForkTime + fallthrough + case banffFork: + banffTime = latestForkTime + fallthrough + case apricotPhase5: + apricotPhase5Time = latestForkTime + fallthrough + case apricotPhase3: + apricotPhase3Time = latestForkTime + default: + require.NoError(fmt.Errorf("unhandled fork %d", fork)) } - buildGenesisResponse := api.BuildGenesisReply{} - platformvmSS := api.StaticService{} - require.NoError(platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse)) - - genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) - require.NoError(err) - - return &buildGenesisArgs, genesisBytes -} - -func defaultVM(t *testing.T) (*VM, database.Database, *mutableSharedMemory) { - require := require.New(t) - vm := &VM{Config: config.Config{ Chains: chains.TestManager, UptimeLockedCalculator: uptime.NewLockedCalculator(), @@ -310,18 +247,20 @@ func defaultVM(t *testing.T) (*VM, database.Database, *mutableSharedMemory) { MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - ApricotPhase3Time: defaultValidateEndTime, - ApricotPhase5Time: defaultValidateEndTime, - BanffTime: banffForkTime, + ApricotPhase3Time: apricotPhase3Time, + ApricotPhase5Time: apricotPhase5Time, + BanffTime: banffTime, + CortinaTime: cortinaTime, + DurangoTime: durangoTime, }} db := memdb.New() chainDB := prefixdb.New([]byte{0}, db) atomicDB := prefixdb.New([]byte{1}, db) - vm.clock.Set(banffForkTime.Add(time.Second)) + vm.clock.Set(latestForkTime) msgChan := make(chan common.Message, 1) - ctx := defaultContext(t) + ctx := snowtest.Context(t, snowtest.PChainID) m := atomic.NewMemory(atomicDB) msm := &mutableSharedMemory{ @@ -331,25 +270,29 @@ func defaultVM(t *testing.T) (*VM, database.Database, *mutableSharedMemory) { ctx.Lock.Lock() defer ctx.Lock.Unlock() - _, genesisBytes := defaultGenesis(t) + _, genesisBytes := defaultGenesis(t, ctx.AVAXAssetID) appSender := &common.SenderTest{} appSender.CantSendAppGossip = true appSender.SendAppGossipF = func(context.Context, []byte) error { return nil } + dynamicConfigBytes := []byte(`{"network":{"max-validator-set-staleness":0}}`) require.NoError(vm.Initialize( context.Background(), ctx, chainDB, genesisBytes, nil, - nil, + dynamicConfigBytes, msgChan, nil, appSender, )) + // align chain time and local clock + vm.state.SetTimestamp(vm.clock.Time()) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) // Create a subnet and store it in testSubnet1 @@ -364,25 +307,31 @@ func defaultVM(t *testing.T) (*VM, database.Database, *mutableSharedMemory) { keys[0].PublicKey().Address(), // change addr ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(testSubnet1)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), testSubnet1)) + vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) require.NoError(blk.Verify(context.Background())) require.NoError(blk.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + t.Cleanup(func() { + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + require.NoError(vm.Shutdown(context.Background())) + }) + return vm, db, msm } // Ensure genesis state is parsed from bytes and stored correctly func TestGenesis(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // Ensure the genesis block has been accepted and stored genesisBlockID, err := vm.LastAccepted(context.Background()) // lastAccepted should be ID of genesis block @@ -392,7 +341,7 @@ func TestGenesis(t *testing.T) { require.NoError(err) require.Equal(choices.Accepted, genesisBlock.Status()) - genesisState, _ := defaultGenesis(t) + genesisState, _ := defaultGenesis(t, vm.ctx.AVAXAssetID) // Ensure all the genesis UTXOs are there for _, utxo := range genesisState.UTXOs { _, addrBytes, err := address.ParseBech32(utxo.Address) @@ -413,15 +362,14 @@ func TestGenesis(t *testing.T) { require.NoError(err) require.Equal(utxo.Address, addr) - require.Equal(uint64(utxo.Amount)-vm.TxFee, out.Amount()) + require.Equal(uint64(utxo.Amount)-vm.CreateSubnetTxFee, out.Amount()) } } // Ensure current validator set of primary network is correct require.Len(genesisState.Validators, vm.Validators.Count(constants.PrimaryNetworkID)) - for _, key := range keys { - nodeID := ids.NodeID(key.PublicKey().Address()) + for _, nodeID := range genesisNodeIDs { _, ok := vm.Validators.GetValidator(constants.PrimaryNetworkID, nodeID) require.True(ok) } @@ -434,17 +382,16 @@ func TestGenesis(t *testing.T) { // accept proposal to add validator to primary network func TestAddValidatorCommit(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() - startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) - endTime := startTime.Add(defaultMinStakingDuration) - nodeID := ids.GenerateTestNodeID() - rewardAddress := ids.GenerateTestShortID() + var ( + startTime = vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) + endTime = startTime.Add(defaultMinStakingDuration) + nodeID = ids.GenerateTestNodeID() + rewardAddress = ids.GenerateTestShortID() + ) // create valid tx tx, err := vm.txBuilder.NewAddValidatorTx( @@ -460,7 +407,9 @@ func TestAddValidatorCommit(t *testing.T) { require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), tx)) + vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -472,25 +421,21 @@ func TestAddValidatorCommit(t *testing.T) { require.NoError(err) require.Equal(status.Committed, txStatus) - // Verify that new validator now in pending validator set - _, err = vm.state.GetPendingValidator(constants.PrimaryNetworkID, nodeID) + // Verify that new validator now in current validator set + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) require.NoError(err) } // verify invalid attempt to add validator to primary network func TestInvalidAddValidatorCommit(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() + nodeID := ids.GenerateTestNodeID() startTime := defaultGenesisTime.Add(-txexecutor.SyncBound).Add(-1 * time.Second) endTime := startTime.Add(defaultMinStakingDuration) - key, _ := secp256k1.NewPrivateKey() - nodeID := ids.NodeID(key.PublicKey().Address()) // create invalid tx tx, err := vm.txBuilder.NewAddValidatorTx( @@ -498,18 +443,18 @@ func TestInvalidAddValidatorCommit(t *testing.T) { uint64(startTime.Unix()), uint64(endTime.Unix()), nodeID, - ids.ShortID(nodeID), + ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr ) require.NoError(err) - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - - preferredID := preferred.ID() preferredHeight := preferred.Height() + statelessBlk, err := block.NewBanffStandardBlock( preferred.Timestamp(), preferredID, @@ -534,17 +479,16 @@ func TestInvalidAddValidatorCommit(t *testing.T) { // Reject attempt to add validator to primary network func TestAddValidatorReject(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() - startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) - endTime := startTime.Add(defaultMinStakingDuration) - nodeID := ids.GenerateTestNodeID() - rewardAddress := ids.GenerateTestShortID() + var ( + startTime = vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) + endTime = startTime.Add(defaultMinStakingDuration) + nodeID = ids.GenerateTestNodeID() + rewardAddress = ids.GenerateTestShortID() + ) // create valid tx tx, err := vm.txBuilder.NewAddValidatorTx( @@ -560,7 +504,9 @@ func TestAddValidatorReject(t *testing.T) { require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), tx)) + vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -578,17 +524,14 @@ func TestAddValidatorReject(t *testing.T) { // Reject proposal to add validator to primary network func TestAddValidatorInvalidNotReissued(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // Use nodeID that is already in the genesis - repeatNodeID := ids.NodeID(keys[0].PublicKey().Address()) + repeatNodeID := genesisNodeIDs[0] - startTime := banffForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + startTime := latestForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) endTime := startTime.Add(defaultMinStakingDuration) // create valid tx @@ -597,7 +540,7 @@ func TestAddValidatorInvalidNotReissued(t *testing.T) { uint64(startTime.Unix()), uint64(endTime.Unix()), repeatNodeID, - ids.ShortID(repeatNodeID), + ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr @@ -605,23 +548,24 @@ func TestAddValidatorInvalidNotReissued(t *testing.T) { require.NoError(err) // trigger block creation - err = vm.Builder.AddUnverifiedTx(tx) + vm.ctx.Lock.Unlock() + err = vm.issueTx(context.Background(), tx) require.ErrorIs(err, txexecutor.ErrAlreadyValidator) + vm.ctx.Lock.Lock() } // Accept proposal to add validator to subnet func TestAddSubnetValidatorAccept(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() - startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) - endTime := startTime.Add(defaultMinStakingDuration) - nodeID := ids.NodeID(keys[0].PublicKey().Address()) + var ( + startTime = vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) + endTime = startTime.Add(defaultMinStakingDuration) + nodeID = genesisNodeIDs[0] + ) // create valid tx // note that [startTime, endTime] is a subset of time that keys[0] @@ -638,7 +582,9 @@ func TestAddSubnetValidatorAccept(t *testing.T) { require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), tx)) + vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -650,24 +596,23 @@ func TestAddSubnetValidatorAccept(t *testing.T) { require.NoError(err) require.Equal(status.Committed, txStatus) - // Verify that new validator is in pending validator set - _, err = vm.state.GetPendingValidator(testSubnet1.ID(), nodeID) + // Verify that new validator is in current validator set + _, err = vm.state.GetCurrentValidator(testSubnet1.ID(), nodeID) require.NoError(err) } // Reject proposal to add validator to subnet func TestAddSubnetValidatorReject(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() - startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) - endTime := startTime.Add(defaultMinStakingDuration) - nodeID := ids.NodeID(keys[0].PublicKey().Address()) + var ( + startTime = vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) + endTime = startTime.Add(defaultMinStakingDuration) + nodeID = genesisNodeIDs[0] + ) // create valid tx // note that [startTime, endTime] is a subset of time that keys[0] @@ -684,7 +629,9 @@ func TestAddSubnetValidatorReject(t *testing.T) { require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), tx)) + vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -695,20 +642,17 @@ func TestAddSubnetValidatorReject(t *testing.T) { _, _, err = vm.state.GetTx(tx.ID()) require.ErrorIs(err, database.ErrNotFound) - // Verify that new validator NOT in pending validator set - _, err = vm.state.GetPendingValidator(testSubnet1.ID(), nodeID) + // Verify that new validator NOT in validator set + _, err = vm.state.GetCurrentValidator(testSubnet1.ID(), nodeID) require.ErrorIs(err, database.ErrNotFound) } // Test case where primary network validator rewarded func TestRewardValidatorAccept(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // Fast forward clock to time for genesis validators to leave vm.clock.Set(defaultValidateEndTime) @@ -719,8 +663,7 @@ func TestRewardValidatorAccept(t *testing.T) { require.NoError(blk.Verify(context.Background())) // Assert preferences are correct - oracleBlk := blk.(smcon.OracleBlock) - options, err := oracleBlk.Options(context.Background()) + options, err := blk.(smcon.OracleBlock).Options(context.Background()) require.NoError(err) commit := options[0].(*blockexecutor.Block) @@ -729,13 +672,13 @@ func TestRewardValidatorAccept(t *testing.T) { require.IsType(&block.BanffAbortBlock{}, abort.Block) // Assert block tries to reward a genesis validator - rewardTx := oracleBlk.(block.Block).Txs()[0].Unsigned + rewardTx := blk.(block.Block).Txs()[0].Unsigned require.IsType(&txs.RewardValidatorTx{}, rewardTx) // Verify options and accept commmit block require.NoError(commit.Verify(context.Background())) require.NoError(abort.Verify(context.Background())) - txID := oracleBlk.(block.Block).Txs()[0].ID() + txID := blk.(block.Block).Txs()[0].ID() { onAbort, ok := vm.manager.GetState(abort.ID()) require.True(ok) @@ -745,7 +688,7 @@ func TestRewardValidatorAccept(t *testing.T) { require.Equal(status.Aborted, txStatus) } - require.NoError(oracleBlk.Accept(context.Background())) + require.NoError(blk.Accept(context.Background())) require.NoError(commit.Accept(context.Background())) // Verify that chain's timestamp has advanced @@ -775,12 +718,9 @@ func TestRewardValidatorAccept(t *testing.T) { // Test case where primary network validator not rewarded func TestRewardValidatorReject(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // Fast forward clock to time for genesis validators to leave vm.clock.Set(defaultValidateEndTime) @@ -848,12 +788,10 @@ func TestRewardValidatorReject(t *testing.T) { // Ensure BuildBlock errors when there is no block to build func TestUnneededBuildBlock(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() + _, err := vm.Builder.BuildBlock(context.Background()) require.ErrorIs(err, blockbuilder.ErrNoPendingBlocks) } @@ -861,12 +799,9 @@ func TestUnneededBuildBlock(t *testing.T) { // test acceptance of proposal to create a new chain func TestCreateChain(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() tx, err := vm.txBuilder.NewCreateChainTx( testSubnet1.ID(), @@ -879,7 +814,9 @@ func TestCreateChain(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), tx)) + vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) // should contain proposal to create chain @@ -907,20 +844,15 @@ func TestCreateChain(t *testing.T) { // test where we: // 1) Create a subnet -// 2) Add a validator to the subnet's pending validator set -// 3) Advance timestamp to validator's start time (moving the validator from pending to current) -// 4) Advance timestamp to validator's end time (removing validator from current) +// 2) Add a validator to the subnet's current validator set +// 3) Advance timestamp to validator's end time (removing validator from current) func TestCreateSubnet(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() - - nodeID := ids.NodeID(keys[0].PublicKey().Address()) + defer vm.ctx.Lock.Unlock() + nodeID := genesisNodeIDs[0] createSubnetTx, err := vm.txBuilder.NewCreateSubnetTx( 1, // threshold []ids.ShortID{ // control keys @@ -932,9 +864,11 @@ func TestCreateSubnet(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(createSubnetTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), createSubnetTx)) + vm.ctx.Lock.Lock() - // should contain proposal to create subnet + // should contain the CreateSubnetTx blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -973,13 +907,15 @@ func TestCreateSubnet(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() blk, err = vm.Builder.BuildBlock(context.Background()) // should add validator to the new subnet require.NoError(err) require.NoError(blk.Verify(context.Background())) - require.NoError(blk.Accept(context.Background())) // add the validator to pending validator set + require.NoError(blk.Accept(context.Background())) // add the validator to current validator set require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) txID := blk.(block.Block).Txs()[0].ID() @@ -987,19 +923,6 @@ func TestCreateSubnet(t *testing.T) { require.NoError(err) require.Equal(status.Committed, txStatus) - _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), nodeID) - require.NoError(err) - - // Advance time to when new validator should start validating - // Create a block with an advance time tx that moves validator - // from pending to current validator set - vm.clock.Set(startTime) - blk, err = vm.Builder.BuildBlock(context.Background()) // should be advance time tx - require.NoError(err) - require.NoError(blk.Verify(context.Background())) - require.NoError(blk.Accept(context.Background())) // move validator addValidatorTx from pending to current - require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), nodeID) require.ErrorIs(err, database.ErrNotFound) @@ -1023,12 +946,9 @@ func TestCreateSubnet(t *testing.T) { // test asset import func TestAtomicImport(t *testing.T) { require := require.New(t) - vm, baseDB, mutableSharedMemory := defaultVM(t) + vm, baseDB, mutableSharedMemory := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() utxoID := avax.UTXOID{ TxID: ids.Empty.Prefix(1), @@ -1054,7 +974,7 @@ func TestAtomicImport(t *testing.T) { utxo := &avax.UTXO{ UTXOID: utxoID, - Asset: avax.Asset{ID: avaxAssetID}, + Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: amount, OutputOwners: secp256k1fx.OutputOwners{ @@ -1063,7 +983,7 @@ func TestAtomicImport(t *testing.T) { }, }, } - utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) require.NoError(err) inputID := utxo.InputID() @@ -1089,7 +1009,9 @@ func TestAtomicImport(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), tx)) + vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -1110,12 +1032,9 @@ func TestAtomicImport(t *testing.T) { // test optimistic asset import func TestOptimisticAtomicImport(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, apricotPhase3) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -1136,10 +1055,9 @@ func TestOptimisticAtomicImport(t *testing.T) { }} require.NoError(tx.Initialize(txs.Codec)) - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - - preferredID := preferred.ID() preferredHeight := preferred.Height() statelessBlk, err := block.NewApricotAtomicBlock( @@ -1171,7 +1089,6 @@ func TestOptimisticAtomicImport(t *testing.T) { // test restarting the node func TestRestartFullyAccepted(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis(t) db := memdb.New() firstDB := prefixdb.New([]byte{}, db) @@ -1182,20 +1099,21 @@ func TestRestartFullyAccepted(t *testing.T) { MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} - firstCtx := defaultContext(t) + firstCtx := snowtest.Context(t, snowtest.PChainID) + + _, genesisBytes := defaultGenesis(t, firstCtx.AVAXAssetID) baseDB := memdb.New() atomicDB := prefixdb.New([]byte{1}, baseDB) m := atomic.NewMemory(atomicDB) - msm := &mutableSharedMemory{ - SharedMemory: m.NewSharedMemory(firstCtx.ChainID), - } - firstCtx.SharedMemory = msm + firstCtx.SharedMemory = m.NewSharedMemory(firstCtx.ChainID) - initialClkTime := banffForkTime.Add(time.Second) + initialClkTime := latestForkTime.Add(time.Second) firstVM.clock.Set(initialClkTime) firstCtx.Lock.Lock() @@ -1215,13 +1133,6 @@ func TestRestartFullyAccepted(t *testing.T) { genesisID, err := firstVM.LastAccepted(context.Background()) require.NoError(err) - nextChainTime := initialClkTime.Add(time.Second) - firstVM.clock.Set(initialClkTime) - preferred, err := firstVM.Builder.Preferred() - require.NoError(err) - preferredID := preferred.ID() - preferredHeight := preferred.Height() - // include a tx to make the block be accepted tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -1242,6 +1153,14 @@ func TestRestartFullyAccepted(t *testing.T) { }} require.NoError(tx.Initialize(txs.Codec)) + nextChainTime := initialClkTime.Add(time.Second) + firstVM.clock.Set(initialClkTime) + + preferredID := firstVM.manager.Preferred() + preferred, err := firstVM.manager.GetBlock(preferredID) + require.NoError(err) + preferredHeight := preferred.Height() + statelessBlk, err := block.NewBanffStandardBlock( nextChainTime, preferredID, @@ -1267,11 +1186,13 @@ func TestRestartFullyAccepted(t *testing.T) { MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} - secondCtx := defaultContext(t) - secondCtx.SharedMemory = msm + secondCtx := snowtest.Context(t, snowtest.PChainID) + secondCtx.SharedMemory = firstCtx.SharedMemory secondVM.clock.Set(initialClkTime) secondCtx.Lock.Lock() defer func() { @@ -1302,11 +1223,9 @@ func TestRestartFullyAccepted(t *testing.T) { func TestBootstrapPartiallyAccepted(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis(t) - baseDB := memdb.New() - vmDB := prefixdb.New([]byte("vm"), baseDB) - bootstrappingDB := prefixdb.New([]byte("bootstrapping"), baseDB) + vmDB := prefixdb.New(chains.VMDBPrefix, baseDB) + bootstrappingDB := prefixdb.New(chains.ChainBootstrappingDBPrefix, baseDB) blocked, err := queue.NewWithMissing(bootstrappingDB, "", prometheus.NewRegistry()) require.NoError(err) @@ -1317,22 +1236,22 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} - initialClkTime := banffForkTime.Add(time.Second) + initialClkTime := latestForkTime.Add(time.Second) vm.clock.Set(initialClkTime) - ctx := defaultContext(t) + ctx := snowtest.Context(t, snowtest.PChainID) + + _, genesisBytes := defaultGenesis(t, ctx.AVAXAssetID) atomicDB := prefixdb.New([]byte{1}, baseDB) m := atomic.NewMemory(atomicDB) - msm := &mutableSharedMemory{ - SharedMemory: m.NewSharedMemory(ctx.ChainID), - } - ctx.SharedMemory = msm + ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) - consensusCtx := snow.DefaultConsensusContextTest() - consensusCtx.Context = ctx + consensusCtx := snowtest.ConsensusContext(ctx) ctx.Lock.Lock() msgChan := make(chan common.Message, 1) @@ -1348,9 +1267,6 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { nil, )) - preferred, err := vm.Builder.Preferred() - require.NoError(err) - // include a tx to make the block be accepted tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -1372,8 +1288,12 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { require.NoError(tx.Initialize(txs.Codec)) nextChainTime := initialClkTime.Add(time.Second) - preferredID := preferred.ID() + + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) + require.NoError(err) preferredHeight := preferred.Height() + statelessBlk, err := block.NewBanffStandardBlock( nextChainTime, preferredID, @@ -1388,7 +1308,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { advanceTimeBlkID := advanceTimeBlk.ID() advanceTimeBlkBytes := advanceTimeBlk.Bytes() - peerID := ids.NodeID{1, 2, 3, 4, 5, 4, 3, 2, 1} + peerID := ids.BuildTestNodeID([]byte{1, 2, 3, 4, 5, 4, 3, 2, 1}) beacons := validators.NewManager() require.NoError(beacons.AddStaker(ctx.SubnetID, peerID, nil, ids.Empty, 1)) @@ -1451,19 +1371,6 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { ) require.NoError(err) - var reqID uint32 - externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { - inMsg, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) - require.NoError(err) - require.Equal(message.GetAcceptedFrontierOp, inMsg.Op()) - - requestID, ok := message.GetRequestID(inMsg.Message()) - require.True(ok) - - reqID = requestID - return nodeIDs - } - isBootstrapped := false bootstrapTracker := &common.BootstrapTrackerTest{ T: t, @@ -1482,28 +1389,27 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { beacons.RegisterCallbackListener(ctx.SubnetID, startup) // The engine handles consensus - consensus := &smcon.Topological{} - commonCfg := common.Config{ + snowGetHandler, err := snowgetter.New( + vm, + sender, + consensusCtx.Log, + time.Second, + 2000, + consensusCtx.Registerer, + ) + require.NoError(err) + + bootstrapConfig := bootstrap.Config{ + AllGetsServer: snowGetHandler, Ctx: consensusCtx, Beacons: beacons, SampleK: beacons.Count(ctx.SubnetID), StartupTracker: startup, - Alpha: (totalWeight + 1) / 2, Sender: sender, BootstrapTracker: bootstrapTracker, - AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, - } - - snowGetHandler, err := snowgetter.New(vm, commonCfg) - require.NoError(err) - - bootstrapConfig := bootstrap.Config{ - Config: commonCfg, - AllGetsServer: snowGetHandler, - Blocked: blocked, - VM: vm, + Blocked: blocked, + VM: vm, } // Asynchronously passes messages from the network to the consensus engine @@ -1545,7 +1451,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, }, - Consensus: consensus, + Consensus: &smcon.Topological{}, } engine, err := smeng.New(engineConfig) require.NoError(err) @@ -1581,6 +1487,19 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { h.Start(context.Background(), false) ctx.Lock.Lock() + var reqID uint32 + externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + inMsg, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) + require.NoError(err) + require.Equal(message.GetAcceptedFrontierOp, inMsg.Op()) + + requestID, ok := message.GetRequestID(inMsg.Message()) + require.True(ok) + + reqID = requestID + return nodeIDs + } + require.NoError(bootstrapper.Connected(context.Background(), peerID, version.CurrentApp)) externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { @@ -1609,18 +1528,40 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { return nodeIDs } - frontier := []ids.ID{advanceTimeBlkID} + frontier := set.Of(advanceTimeBlkID) require.NoError(bootstrapper.Accepted(context.Background(), peerID, reqID, frontier)) - externalSender.SendF = nil - externalSender.CantSend = false + externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + inMsg, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) + require.NoError(err) + require.Equal(message.GetAcceptedFrontierOp, inMsg.Op()) + + requestID, ok := message.GetRequestID(inMsg.Message()) + require.True(ok) + + reqID = requestID + return nodeIDs + } require.NoError(bootstrapper.Ancestors(context.Background(), peerID, reqID, [][]byte{advanceTimeBlkBytes})) - preferred, err = vm.Builder.Preferred() - require.NoError(err) + externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + inMsgIntf, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) + require.NoError(err) + require.Equal(message.GetAcceptedOp, inMsgIntf.Op()) + inMsg := inMsgIntf.Message().(*p2p.GetAccepted) - require.Equal(advanceTimeBlk.ID(), preferred.ID()) + reqID = inMsg.RequestId + return nodeIDs + } + + require.NoError(bootstrapper.AcceptedFrontier(context.Background(), peerID, reqID, advanceTimeBlkID)) + + externalSender.SendF = nil + externalSender.CantSend = false + + require.NoError(bootstrapper.Accepted(context.Background(), peerID, reqID, frontier)) + require.Equal(advanceTimeBlk.ID(), vm.manager.Preferred()) ctx.Lock.Unlock() chainRouter.Shutdown(context.Background()) @@ -1628,7 +1569,6 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { func TestUnverifiedParent(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis(t) vm := &VM{Config: config.Config{ Chains: chains.TestManager, @@ -1637,18 +1577,22 @@ func TestUnverifiedParent(t *testing.T) { MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} - initialClkTime := banffForkTime.Add(time.Second) + initialClkTime := latestForkTime.Add(time.Second) vm.clock.Set(initialClkTime) - ctx := defaultContext(t) + ctx := snowtest.Context(t, snowtest.PChainID) ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() + _, genesisBytes := defaultGenesis(t, ctx.AVAXAssetID) + msgChan := make(chan common.Message, 1) require.NoError(vm.Initialize( context.Background(), @@ -1682,10 +1626,11 @@ func TestUnverifiedParent(t *testing.T) { }} require.NoError(tx1.Initialize(txs.Codec)) - preferred, err := vm.Builder.Preferred() - require.NoError(err) nextChainTime := initialClkTime.Add(time.Second) - preferredID := preferred.ID() + + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) + require.NoError(err) preferredHeight := preferred.Height() statelessBlk, err := block.NewBanffStandardBlock( @@ -1698,7 +1643,7 @@ func TestUnverifiedParent(t *testing.T) { firstAdvanceTimeBlk := vm.manager.NewBlock(statelessBlk) require.NoError(firstAdvanceTimeBlk.Verify(context.Background())) - // include a tx1 to make the block be accepted + // include a tx2 to make the block be accepted tx2 := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: vm.ctx.NetworkID, @@ -1716,7 +1661,7 @@ func TestUnverifiedParent(t *testing.T) { }, }}, }} - require.NoError(tx1.Initialize(txs.Codec)) + require.NoError(tx2.Initialize(txs.Codec)) nextChainTime = nextChainTime.Add(time.Second) vm.clock.Set(nextChainTime) statelessSecondAdvanceTimeBlk, err := block.NewBanffStandardBlock( @@ -1733,14 +1678,11 @@ func TestUnverifiedParent(t *testing.T) { } func TestMaxStakeAmount(t *testing.T) { - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(t, vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() - nodeID := ids.NodeID(keys[0].PublicKey().Address()) + nodeID := genesisNodeIDs[0] tests := []struct { description string @@ -1784,7 +1726,7 @@ func TestMaxStakeAmount(t *testing.T) { func TestUptimeDisallowedWithRestart(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis(t) + latestForkTime = defaultValidateStartTime.Add(defaultMinStakingDuration) db := memdb.New() firstDB := prefixdb.New([]byte{}, db) @@ -1795,12 +1737,16 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { RewardConfig: defaultRewardConfig, Validators: validators.NewManager(), UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} - firstCtx := defaultContext(t) + firstCtx := snowtest.Context(t, snowtest.PChainID) firstCtx.Lock.Lock() + _, genesisBytes := defaultGenesis(t, firstCtx.AVAXAssetID) + firstMsgChan := make(chan common.Message, 1) require.NoError(firstVM.Initialize( context.Background(), @@ -1814,7 +1760,7 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { nil, )) - initialClkTime := defaultValidateStartTime + initialClkTime := latestForkTime.Add(time.Second) firstVM.clock.Set(initialClkTime) // Set VM state to NormalOp, to start tracking validators' uptime @@ -1823,7 +1769,8 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { // Fast forward clock so that validators meet 20% uptime required for reward durationForReward := defaultValidateEndTime.Sub(defaultValidateStartTime) * firstUptimePercentage / 100 - firstVM.clock.Set(defaultValidateStartTime.Add(durationForReward)) + vmStopTime := defaultValidateStartTime.Add(durationForReward) + firstVM.clock.Set(vmStopTime) // Shutdown VM to stop all genesis validator uptime. // At this point they have been validating for the 20% uptime needed to be rewarded @@ -1838,16 +1785,22 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { UptimePercentage: secondUptimePercentage / 100., Validators: validators.NewManager(), UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} - secondCtx := defaultContext(t) + secondCtx := snowtest.Context(t, snowtest.PChainID) secondCtx.Lock.Lock() defer func() { require.NoError(secondVM.Shutdown(context.Background())) secondCtx.Lock.Unlock() }() + atomicDB := prefixdb.New([]byte{1}, db) + m := atomic.NewMemory(atomicDB) + secondCtx.SharedMemory = m.NewSharedMemory(secondCtx.ChainID) + secondMsgChan := make(chan common.Message, 1) require.NoError(secondVM.Initialize( context.Background(), @@ -1861,8 +1814,7 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { nil, )) - // set clock to the time we switched firstVM off - secondVM.clock.Set(defaultValidateStartTime.Add(durationForReward)) + secondVM.clock.Set(vmStopTime) // Set VM state to NormalOp, to start tracking validators' uptime require.NoError(secondVM.SetState(context.Background(), snow.Bootstrapping)) @@ -1922,7 +1874,8 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis(t) + latestForkTime = defaultValidateStartTime.Add(defaultMinStakingDuration) + db := memdb.New() vm := &VM{Config: config.Config{ @@ -1931,12 +1884,20 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { RewardConfig: defaultRewardConfig, Validators: validators.NewManager(), UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} - ctx := defaultContext(t) + ctx := snowtest.Context(t, snowtest.PChainID) ctx.Lock.Lock() + _, genesisBytes := defaultGenesis(t, ctx.AVAXAssetID) + + atomicDB := prefixdb.New([]byte{1}, db) + m := atomic.NewMemory(atomicDB) + ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) + msgChan := make(chan common.Message, 1) appSender := &common.SenderTest{T: t} require.NoError(vm.Initialize( @@ -1956,7 +1917,7 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { ctx.Lock.Unlock() }() - initialClkTime := defaultValidateStartTime + initialClkTime := latestForkTime.Add(time.Second) vm.clock.Set(initialClkTime) // Set VM state to NormalOp, to start tracking validators' uptime @@ -2018,28 +1979,24 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { require := require.New(t) - validatorStartTime := banffForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + validatorStartTime := latestForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) - vm, _, _ := defaultVM(t) - + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() key, err := secp256k1.NewPrivateKey() require.NoError(err) id := key.PublicKey().Address() + nodeID := ids.GenerateTestNodeID() addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, id, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, @@ -2047,7 +2004,9 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -2064,7 +2023,9 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(createSubnetTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), createSubnetTx)) + vm.ctx.Lock.Lock() // trigger block creation for the subnet tx createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -2077,7 +2038,7 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{key, keys[1]}, keys[1].Address(), @@ -2085,7 +2046,7 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { require.NoError(err) removeSubnetValidatorTx, err := vm.txBuilder.NewRemoveSubnetValidatorTx( - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{key, keys[2]}, keys[2].Address(), @@ -2110,18 +2071,15 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { require.NoError(block.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), ids.NodeID(id)) + _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), nodeID) require.ErrorIs(err, database.ErrNotFound) } func TestTransferSubnetOwnershipTx(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // Create a subnet createSubnetTx, err := vm.txBuilder.NewCreateSubnetTx( @@ -2133,7 +2091,9 @@ func TestTransferSubnetOwnershipTx(t *testing.T) { require.NoError(err) subnetID := createSubnetTx.ID() - require.NoError(vm.Builder.AddUnverifiedTx(createSubnetTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), createSubnetTx)) + vm.ctx.Lock.Lock() createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -2165,7 +2125,9 @@ func TestTransferSubnetOwnershipTx(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(transferSubnetOwnershipTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), transferSubnetOwnershipTx)) + vm.ctx.Lock.Lock() transferSubnetOwnershipBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -2191,12 +2153,9 @@ func TestTransferSubnetOwnershipTx(t *testing.T) { func TestBaseTx(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t) + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() sendAmt := uint64(100000) changeAddr := ids.ShortEmpty @@ -2251,7 +2210,9 @@ func TestBaseTx(t *testing.T) { require.Equal(vm.TxFee, totalInputAmt-totalOutputAmt) require.Equal(sendAmt, key1OutputAmt) - require.NoError(vm.Builder.AddUnverifiedTx(baseTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), baseTx)) + vm.ctx.Lock.Lock() baseTxBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -2263,3 +2224,79 @@ func TestBaseTx(t *testing.T) { require.NoError(baseTxBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) } + +func TestPruneMempool(t *testing.T) { + require := require.New(t) + vm, _, _ := defaultVM(t, latestFork) + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + // Create a tx that will be valid regardless of timestamp. + sendAmt := uint64(100000) + changeAddr := ids.ShortEmpty + + baseTx, err := vm.txBuilder.NewBaseTx( + sendAmt, + secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[1].Address(), + }, + }, + []*secp256k1.PrivateKey{keys[0]}, + changeAddr, + ) + require.NoError(err) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), baseTx)) + vm.ctx.Lock.Lock() + + // [baseTx] should be in the mempool. + baseTxID := baseTx.ID() + _, ok := vm.Builder.Get(baseTxID) + require.True(ok) + + // Create a tx that will be invalid after time advancement. + var ( + startTime = vm.clock.Time() + endTime = startTime.Add(vm.MinStakeDuration) + ) + + addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( + defaultMinValidatorStake, + uint64(startTime.Unix()), + uint64(endTime.Unix()), + ids.GenerateTestNodeID(), + keys[2].Address(), + 20000, + []*secp256k1.PrivateKey{keys[1]}, + ids.ShortEmpty, + ) + require.NoError(err) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() + + // Advance clock to [endTime], making [addValidatorTx] invalid. + vm.clock.Set(endTime) + + // [addValidatorTx] and [baseTx] should still be in the mempool. + addValidatorTxID := addValidatorTx.ID() + _, ok = vm.Builder.Get(addValidatorTxID) + require.True(ok) + _, ok = vm.Builder.Get(baseTxID) + require.True(ok) + + vm.ctx.Lock.Unlock() + require.NoError(vm.pruneMempool()) + vm.ctx.Lock.Lock() + + // [addValidatorTx] should be ejected from the mempool. + // [baseTx] should still be in the mempool. + _, ok = vm.Builder.Get(addValidatorTxID) + require.False(ok) + _, ok = vm.Builder.Get(baseTxID) + require.True(ok) +} diff --git a/vms/platformvm/warp/codec.go b/vms/platformvm/warp/codec.go index cf4587224751..6ef6e526bdc1 100644 --- a/vms/platformvm/warp/codec.go +++ b/vms/platformvm/warp/codec.go @@ -1,28 +1,28 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/utils" ) -const codecVersion = 0 +const CodecVersion = 0 -// Codec does serialization and deserialization for Warp messages. -var c codec.Manager +var Codec codec.Manager func init() { - c = codec.NewManager(math.MaxInt) - lc := linearcodec.NewCustomMaxLength(math.MaxInt32) + Codec = codec.NewManager(math.MaxInt) + lc := linearcodec.NewDefault(time.Time{}) err := utils.Err( lc.RegisterType(&BitSetSignature{}), - c.RegisterCodec(codecVersion, lc), + Codec.RegisterCodec(CodecVersion, lc), ) if err != nil { panic(err) diff --git a/vms/platformvm/warp/constants.go b/vms/platformvm/warp/constants.go index a91f5f39394f..723cdf50bc82 100644 --- a/vms/platformvm/warp/constants.go +++ b/vms/platformvm/warp/constants.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp diff --git a/vms/platformvm/warp/gwarp/client.go b/vms/platformvm/warp/gwarp/client.go index 6619b4ff6ab7..0b51a54971f7 100644 --- a/vms/platformvm/warp/gwarp/client.go +++ b/vms/platformvm/warp/gwarp/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gwarp diff --git a/vms/platformvm/warp/gwarp/server.go b/vms/platformvm/warp/gwarp/server.go index 4fbee3a3e736..7857f4e0ee70 100644 --- a/vms/platformvm/warp/gwarp/server.go +++ b/vms/platformvm/warp/gwarp/server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gwarp diff --git a/vms/platformvm/warp/gwarp/signer_test.go b/vms/platformvm/warp/gwarp/signer_test.go index d1d6e8f7147d..31c7b3e993d4 100644 --- a/vms/platformvm/warp/gwarp/signer_test.go +++ b/vms/platformvm/warp/gwarp/signer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gwarp @@ -23,7 +23,6 @@ type testSigner struct { sk *bls.SecretKey networkID uint32 chainID ids.ID - closeFn func() } func setupSigner(t testing.TB) *testSigner { @@ -55,18 +54,21 @@ func setupSigner(t testing.TB) *testSigner { require.NoError(err) s.client = NewClient(pb.NewSignerClient(conn)) - s.closeFn = func() { + + t.Cleanup(func() { serverCloser.Stop() _ = conn.Close() _ = listener.Close() - } + }) + return s } func TestInterface(t *testing.T) { - for _, test := range warp.SignerTests { - s := setupSigner(t) - test(t, s.client, s.sk, s.networkID, s.chainID) - s.closeFn() + for name, test := range warp.SignerTests { + t.Run(name, func(t *testing.T) { + s := setupSigner(t) + test(t, s.client, s.sk, s.networkID, s.chainID) + }) } } diff --git a/vms/platformvm/warp/message.go b/vms/platformvm/warp/message.go index 34850aed98ad..76383bdafa11 100644 --- a/vms/platformvm/warp/message.go +++ b/vms/platformvm/warp/message.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -28,7 +28,7 @@ func ParseMessage(b []byte) (*Message, error) { msg := &Message{ bytes: b, } - _, err := c.Unmarshal(b, msg) + _, err := Codec.Unmarshal(b, msg) if err != nil { return nil, err } @@ -38,7 +38,7 @@ func ParseMessage(b []byte) (*Message, error) { // Initialize recalculates the result of Bytes(). It does not call Initialize() // on the UnsignedMessage. func (m *Message) Initialize() error { - bytes, err := c.Marshal(codecVersion, m) + bytes, err := Codec.Marshal(CodecVersion, m) m.bytes = bytes return err } diff --git a/vms/platformvm/warp/message_test.go b/vms/platformvm/warp/message_test.go index 910abd3403c9..99a50b366d95 100644 --- a/vms/platformvm/warp/message_test.go +++ b/vms/platformvm/warp/message_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp diff --git a/vms/platformvm/warp/payload/addressed_call.go b/vms/platformvm/warp/payload/addressed_call.go index afdecd9e9f01..b3617ce487da 100644 --- a/vms/platformvm/warp/payload/addressed_call.go +++ b/vms/platformvm/warp/payload/addressed_call.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package payload diff --git a/vms/platformvm/warp/payload/addressed_call_test.go b/vms/platformvm/warp/payload/addressed_call_test.go index 0e60ef294c4b..77a885d836d5 100644 --- a/vms/platformvm/warp/payload/addressed_call_test.go +++ b/vms/platformvm/warp/payload/addressed_call_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package payload diff --git a/vms/platformvm/warp/payload/codec.go b/vms/platformvm/warp/payload/codec.go index e2e8ddd7a7f5..d188029abfed 100644 --- a/vms/platformvm/warp/payload/codec.go +++ b/vms/platformvm/warp/payload/codec.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package payload import ( + "time" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/utils" @@ -11,26 +13,21 @@ import ( ) const ( - codecVersion = 0 + CodecVersion = 0 MaxMessageSize = 24 * units.KiB - - // Note: Modifying this variable can have subtle implications on memory - // usage when parsing malformed payloads. - MaxSliceLen = 24 * 1024 ) -// Codec does serialization and deserialization for Warp messages. -var c codec.Manager +var Codec codec.Manager func init() { - c = codec.NewManager(MaxMessageSize) - lc := linearcodec.NewCustomMaxLength(MaxSliceLen) + Codec = codec.NewManager(MaxMessageSize) + lc := linearcodec.NewDefault(time.Time{}) err := utils.Err( lc.RegisterType(&Hash{}), lc.RegisterType(&AddressedCall{}), - c.RegisterCodec(codecVersion, lc), + Codec.RegisterCodec(CodecVersion, lc), ) if err != nil { panic(err) diff --git a/vms/platformvm/warp/payload/hash.go b/vms/platformvm/warp/payload/hash.go index f3a0eb0c09d3..330f74fd869d 100644 --- a/vms/platformvm/warp/payload/hash.go +++ b/vms/platformvm/warp/payload/hash.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package payload diff --git a/vms/platformvm/warp/payload/hash_test.go b/vms/platformvm/warp/payload/hash_test.go index 1d890a8bd551..d58fe5e6a0c0 100644 --- a/vms/platformvm/warp/payload/hash_test.go +++ b/vms/platformvm/warp/payload/hash_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package payload diff --git a/vms/platformvm/warp/payload/payload.go b/vms/platformvm/warp/payload/payload.go index e4601945be98..c5c09464803e 100644 --- a/vms/platformvm/warp/payload/payload.go +++ b/vms/platformvm/warp/payload/payload.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package payload @@ -22,7 +22,7 @@ type Payload interface { func Parse(bytes []byte) (Payload, error) { var payload Payload - if _, err := c.Unmarshal(bytes, &payload); err != nil { + if _, err := Codec.Unmarshal(bytes, &payload); err != nil { return nil, err } payload.initialize(bytes) @@ -30,7 +30,7 @@ func Parse(bytes []byte) (Payload, error) { } func initialize(p Payload) error { - bytes, err := c.Marshal(codecVersion, &p) + bytes, err := Codec.Marshal(CodecVersion, &p) if err != nil { return fmt.Errorf("couldn't marshal %T payload: %w", p, err) } diff --git a/vms/platformvm/warp/payload/payload_test.go b/vms/platformvm/warp/payload/payload_test.go index da14b8de0dbb..86b584ae33db 100644 --- a/vms/platformvm/warp/payload/payload_test.go +++ b/vms/platformvm/warp/payload/payload_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package payload diff --git a/vms/platformvm/warp/signature.go b/vms/platformvm/warp/signature.go index 2f2b0cae985b..667383376501 100644 --- a/vms/platformvm/warp/signature.go +++ b/vms/platformvm/warp/signature.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp diff --git a/vms/platformvm/warp/signature_test.go b/vms/platformvm/warp/signature_test.go index b3eaa88bbfe8..b50891ede5d4 100644 --- a/vms/platformvm/warp/signature_test.go +++ b/vms/platformvm/warp/signature_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -39,8 +39,8 @@ type testValidator struct { vdr *Validator } -func (v *testValidator) Less(o *testValidator) bool { - return v.vdr.Less(o.vdr) +func (v *testValidator) Compare(o *testValidator) int { + return v.vdr.Compare(o.vdr) } func newTestValidator() *testValidator { diff --git a/vms/platformvm/warp/signer.go b/vms/platformvm/warp/signer.go index 76f8ec02b4cd..8372aef0a728 100644 --- a/vms/platformvm/warp/signer.go +++ b/vms/platformvm/warp/signer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp diff --git a/vms/platformvm/warp/signer_test.go b/vms/platformvm/warp/signer_test.go index d4e83c24a850..84b51f6574fa 100644 --- a/vms/platformvm/warp/signer_test.go +++ b/vms/platformvm/warp/signer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -14,13 +14,15 @@ import ( ) func TestSigner(t *testing.T) { - for _, test := range SignerTests { - sk, err := bls.NewSecretKey() - require.NoError(t, err) + for name, test := range SignerTests { + t.Run(name, func(t *testing.T) { + sk, err := bls.NewSecretKey() + require.NoError(t, err) - chainID := ids.GenerateTestID() - s := NewSigner(sk, constants.UnitTestID, chainID) + chainID := ids.GenerateTestID() + s := NewSigner(sk, constants.UnitTestID, chainID) - test(t, s, sk, constants.UnitTestID, chainID) + test(t, s, sk, constants.UnitTestID, chainID) + }) } } diff --git a/vms/platformvm/warp/test_signer.go b/vms/platformvm/warp/test_signer.go index aef578f78ae2..e30423edf1ed 100644 --- a/vms/platformvm/warp/test_signer.go +++ b/vms/platformvm/warp/test_signer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -14,13 +14,14 @@ import ( ) // SignerTests is a list of all signer tests -var SignerTests = []func(t *testing.T, s Signer, sk *bls.SecretKey, networkID uint32, chainID ids.ID){ - TestSignerWrongChainID, - TestSignerVerifies, +var SignerTests = map[string]func(t *testing.T, s Signer, sk *bls.SecretKey, networkID uint32, chainID ids.ID){ + "WrongChainID": TestWrongChainID, + "WrongNetworkID": TestWrongNetworkID, + "Verifies": TestVerifies, } // Test that using a random SourceChainID results in an error -func TestSignerWrongChainID(t *testing.T, s Signer, _ *bls.SecretKey, _ uint32, _ ids.ID) { +func TestWrongChainID(t *testing.T, s Signer, _ *bls.SecretKey, _ uint32, _ ids.ID) { require := require.New(t) msg, err := NewUnsignedMessage( @@ -36,7 +37,7 @@ func TestSignerWrongChainID(t *testing.T, s Signer, _ *bls.SecretKey, _ uint32, } // Test that using a different networkID results in an error -func TestSignerWrongNetworkID(t *testing.T, s Signer, _ *bls.SecretKey, networkID uint32, blockchainID ids.ID) { +func TestWrongNetworkID(t *testing.T, s Signer, _ *bls.SecretKey, networkID uint32, blockchainID ids.ID) { require := require.New(t) msg, err := NewUnsignedMessage( @@ -52,7 +53,7 @@ func TestSignerWrongNetworkID(t *testing.T, s Signer, _ *bls.SecretKey, networkI } // Test that a signature generated with the signer verifies correctly -func TestSignerVerifies(t *testing.T, s Signer, sk *bls.SecretKey, networkID uint32, chainID ids.ID) { +func TestVerifies(t *testing.T, s Signer, sk *bls.SecretKey, networkID uint32, chainID ids.ID) { require := require.New(t) msg, err := NewUnsignedMessage( diff --git a/vms/platformvm/warp/unsigned_message.go b/vms/platformvm/warp/unsigned_message.go index 95ef0d2d07f0..1e66f552c9fa 100644 --- a/vms/platformvm/warp/unsigned_message.go +++ b/vms/platformvm/warp/unsigned_message.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -41,13 +41,13 @@ func ParseUnsignedMessage(b []byte) (*UnsignedMessage, error) { bytes: b, id: hashing.ComputeHash256Array(b), } - _, err := c.Unmarshal(b, msg) + _, err := Codec.Unmarshal(b, msg) return msg, err } // Initialize recalculates the result of Bytes(). func (m *UnsignedMessage) Initialize() error { - bytes, err := c.Marshal(codecVersion, m) + bytes, err := Codec.Marshal(CodecVersion, m) if err != nil { return fmt.Errorf("couldn't marshal warp unsigned message: %w", err) } diff --git a/vms/platformvm/warp/unsigned_message_test.go b/vms/platformvm/warp/unsigned_message_test.go index f3be73ef6c77..03a140d14c27 100644 --- a/vms/platformvm/warp/unsigned_message_test.go +++ b/vms/platformvm/warp/unsigned_message_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp diff --git a/vms/platformvm/warp/validator.go b/vms/platformvm/warp/validator.go index 42ff34e7cb5e..2ada068adc76 100644 --- a/vms/platformvm/warp/validator.go +++ b/vms/platformvm/warp/validator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -39,8 +39,8 @@ type Validator struct { NodeIDs []ids.NodeID } -func (v *Validator) Less(o *Validator) bool { - return bytes.Compare(v.PublicKeyBytes, o.PublicKeyBytes) < 0 +func (v *Validator) Compare(o *Validator) int { + return bytes.Compare(v.PublicKeyBytes, o.PublicKeyBytes) } // GetCanonicalValidatorSet returns the validator set of [subnetID] at diff --git a/vms/platformvm/warp/validator_test.go b/vms/platformvm/warp/validator_test.go index 9af37aed81f6..3fbaf8860dbe 100644 --- a/vms/platformvm/warp/validator_test.go +++ b/vms/platformvm/warp/validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp diff --git a/vms/propertyfx/burn_operation.go b/vms/propertyfx/burn_operation.go index 4217420b3b62..1dedb4c2f448 100644 --- a/vms/propertyfx/burn_operation.go +++ b/vms/propertyfx/burn_operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/vms/propertyfx/burn_operation_test.go b/vms/propertyfx/burn_operation_test.go index e9e9735efd3c..b6a995b0307c 100644 --- a/vms/propertyfx/burn_operation_test.go +++ b/vms/propertyfx/burn_operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/vms/propertyfx/credential.go b/vms/propertyfx/credential.go index 0a67c182a995..3a464cc29dfe 100644 --- a/vms/propertyfx/credential.go +++ b/vms/propertyfx/credential.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/vms/propertyfx/credential_test.go b/vms/propertyfx/credential_test.go index 4be34acd3247..3ce9bc97f3c3 100644 --- a/vms/propertyfx/credential_test.go +++ b/vms/propertyfx/credential_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/vms/propertyfx/factory.go b/vms/propertyfx/factory.go index 21c69c97cd98..53d6101b1306 100644 --- a/vms/propertyfx/factory.go +++ b/vms/propertyfx/factory.go @@ -1,16 +1,17 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/fx" ) +const Name = "propertyfx" + var ( - _ vms.Factory = (*Factory)(nil) + _ fx.Factory = (*Factory)(nil) // ID that this Fx uses when labeled ID = ids.ID{'p', 'r', 'o', 'p', 'e', 'r', 't', 'y', 'f', 'x'} @@ -18,6 +19,6 @@ var ( type Factory struct{} -func (*Factory) New(logging.Logger) (interface{}, error) { - return &Fx{}, nil +func (*Factory) New() any { + return &Fx{} } diff --git a/vms/propertyfx/factory_test.go b/vms/propertyfx/factory_test.go index ec921aef3f69..9aa461921e42 100644 --- a/vms/propertyfx/factory_test.go +++ b/vms/propertyfx/factory_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx @@ -7,15 +7,11 @@ import ( "testing" "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/utils/logging" ) func TestFactory(t *testing.T) { require := require.New(t) factory := Factory{} - fx, err := factory.New(logging.NoLog{}) - require.NoError(err) - require.NotNil(fx) + require.Equal(&Fx{}, factory.New()) } diff --git a/vms/propertyfx/fx.go b/vms/propertyfx/fx.go index 28d211a9b5ad..24a3dff171cb 100644 --- a/vms/propertyfx/fx.go +++ b/vms/propertyfx/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/vms/propertyfx/fx_test.go b/vms/propertyfx/fx_test.go index fdab69bb5bf4..0cd995ba5282 100644 --- a/vms/propertyfx/fx_test.go +++ b/vms/propertyfx/fx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx @@ -39,7 +39,7 @@ var ( func TestFxInitialize(t *testing.T) { vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } fx := Fx{} @@ -56,7 +56,7 @@ func TestFxVerifyMintOperation(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -98,7 +98,7 @@ func TestFxVerifyMintOperationWrongTx(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -132,7 +132,7 @@ func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -163,7 +163,7 @@ func TestFxVerifyMintOperationWrongCredential(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -195,7 +195,7 @@ func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -226,7 +226,7 @@ func TestFxVerifyMintOperationFailingVerification(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -264,7 +264,7 @@ func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -301,7 +301,7 @@ func TestFxVerifyTransferOperation(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -335,7 +335,7 @@ func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -364,7 +364,7 @@ func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -399,7 +399,7 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -431,7 +431,7 @@ func TestFxVerifyTransfer(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) diff --git a/vms/propertyfx/mint_operation.go b/vms/propertyfx/mint_operation.go index 535ea1359010..7eecf5de27ad 100644 --- a/vms/propertyfx/mint_operation.go +++ b/vms/propertyfx/mint_operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/vms/propertyfx/mint_operation_test.go b/vms/propertyfx/mint_operation_test.go index 138d989d3296..abcc552ace4c 100644 --- a/vms/propertyfx/mint_operation_test.go +++ b/vms/propertyfx/mint_operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/vms/propertyfx/mint_output.go b/vms/propertyfx/mint_output.go index 3aebd115a404..7ff60375721c 100644 --- a/vms/propertyfx/mint_output.go +++ b/vms/propertyfx/mint_output.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/vms/propertyfx/mint_output_test.go b/vms/propertyfx/mint_output_test.go index 0b4b76c55f88..4cfa1da038d8 100644 --- a/vms/propertyfx/mint_output_test.go +++ b/vms/propertyfx/mint_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/vms/propertyfx/owned_output.go b/vms/propertyfx/owned_output.go index 30e32ca3ddf2..cbe2f4376753 100644 --- a/vms/propertyfx/owned_output.go +++ b/vms/propertyfx/owned_output.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/vms/propertyfx/owned_output_test.go b/vms/propertyfx/owned_output_test.go index dbc7bea63698..a9c9adc57643 100644 --- a/vms/propertyfx/owned_output_test.go +++ b/vms/propertyfx/owned_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/vms/proposervm/README.md b/vms/proposervm/README.md index e01a014f1156..6dec4fe9d932 100644 --- a/vms/proposervm/README.md +++ b/vms/proposervm/README.md @@ -47,7 +47,7 @@ A proposer in position `i` in the proposers list has its submission windows star The following validation rules are enforced: - Given a `proposervm.Block` **C** and its parent block **P**, **P**'s inner block must be **C**'s inner block's parent. -- A block must have a `PChainHeight` is larger or equal to its parent's `PChainHeight` (`PChainHeight` is monotonic). +- A block must have a `PChainHeight` that is larger or equal to its parent's `PChainHeight` (`PChainHeight` is monotonic). - A block must have a `PChainHeight` that is less or equal to current P-Chain height. - A block must have a `Timestamp` larger or equal to its parent's `Timestamp` (`Timestamp` is monotonic) - A block received by a node at time `t_local` must have a `Timestamp` such that `Timestamp < t_local + maxSkew` (a block too far in the future is invalid). `maxSkew` is currently set to `10 seconds`. diff --git a/vms/proposervm/batched_vm.go b/vms/proposervm/batched_vm.go index fd104318cea7..0bf514827193 100644 --- a/vms/proposervm/batched_vm.go +++ b/vms/proposervm/batched_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -101,7 +101,7 @@ func (vm *VM) BatchedParseBlock(ctx context.Context, blks [][]byte) ([]snowman.B ) for ; blocksIndex < len(blks); blocksIndex++ { blkBytes := blks[blocksIndex] - statelessBlock, err := statelessblock.Parse(blkBytes) + statelessBlock, err := statelessblock.Parse(blkBytes, vm.DurangoTime) if err != nil { break } diff --git a/vms/proposervm/batched_vm_test.go b/vms/proposervm/batched_vm_test.go index 8acb92f74bc0..a691fb88643b 100644 --- a/vms/proposervm/batched_vm_test.go +++ b/vms/proposervm/batched_vm_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -30,16 +30,20 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/vms/proposervm/proposer" ) func TestCoreVMNotRemote(t *testing.T) { // if coreVM is not remote VM, a specific error is returned require := require.New(t) - _, _, proVM, _, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + _, _, proVM, _, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -64,7 +68,11 @@ func TestCoreVMNotRemote(t *testing.T) { func TestGetAncestorsPreForkOnly(t *testing.T) { require := require.New(t) - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, mockable.MaxTime) // disable ProBlks + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, activationTime, durangoTime) defer func() { require.NoError(proRemoteVM.Shutdown(context.Background())) }() @@ -210,7 +218,11 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { func TestGetAncestorsPostForkOnly(t *testing.T) { require := require.New(t) - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, time.Time{}) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, activationTime, durangoTime) defer func() { require.NoError(proRemoteVM.Shutdown(context.Background())) }() @@ -221,10 +233,9 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil @@ -235,17 +246,16 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { // prepare build of next block require.NoError(builtBlk1.Verify(context.Background())) require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) - proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proRemoteVM, builtBlk1, 0)) coreBlk2 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreBlk1.ID(), - HeightV: coreBlk1.Height() + 1, - TimestampV: coreBlk1.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{2}, + ParentV: coreBlk1.ID(), + HeightV: coreBlk1.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil @@ -256,17 +266,16 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { // prepare build of next block require.NoError(builtBlk2.Verify(context.Background())) require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) - proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proRemoteVM, builtBlk2, 0)) coreBlk3 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - TimestampV: coreBlk2.Timestamp(), + BytesV: []byte{3}, + ParentV: coreBlk2.ID(), + HeightV: coreBlk2.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil @@ -362,13 +371,18 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { require := require.New(t) - currentTime := time.Now().Truncate(time.Second) - preForkTime := currentTime.Add(5 * time.Minute) - forkTime := currentTime.Add(10 * time.Minute) - postForkTime := currentTime.Add(15 * time.Minute) + + var ( + currentTime = time.Now().Truncate(time.Second) + preForkTime = currentTime.Add(5 * time.Minute) + forkTime = currentTime.Add(10 * time.Minute) + postForkTime = currentTime.Add(15 * time.Minute) + + durangoTime = forkTime + ) // enable ProBlks in next future - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, forkTime) + coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, forkTime, durangoTime) defer func() { require.NoError(proRemoteVM.Shutdown(context.Background())) }() @@ -438,10 +452,9 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - TimestampV: postForkTime.Add(proposer.MaxDelay), + BytesV: []byte{3}, + ParentV: coreBlk2.ID(), + HeightV: coreBlk2.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil @@ -453,17 +466,16 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { // prepare build of next block require.NoError(builtBlk3.Verify(context.Background())) require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk3.ID())) - proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proRemoteVM, builtBlk3, builtBlk3.(*postForkBlock).PChainHeight())) coreBlk4 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{4}, - ParentV: coreBlk3.ID(), - HeightV: coreBlk3.Height() + 1, - TimestampV: postForkTime, + BytesV: []byte{4}, + ParentV: coreBlk3.ID(), + HeightV: coreBlk3.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk4, nil @@ -568,7 +580,11 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { func TestBatchedParseBlockPreForkOnly(t *testing.T) { require := require.New(t) - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, mockable.MaxTime) // disable ProBlks + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, activationTime, durangoTime) defer func() { require.NoError(proRemoteVM.Shutdown(context.Background())) }() @@ -579,10 +595,9 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil @@ -606,10 +621,9 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreBlk1.ID(), - HeightV: coreBlk1.Height() + 1, - TimestampV: coreBlk1.Timestamp(), + BytesV: []byte{2}, + ParentV: coreBlk1.ID(), + HeightV: coreBlk1.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil @@ -633,10 +647,9 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - TimestampV: coreBlk2.Timestamp(), + BytesV: []byte{3}, + ParentV: coreBlk2.ID(), + HeightV: coreBlk2.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil @@ -689,7 +702,11 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { func TestBatchedParseBlockPostForkOnly(t *testing.T) { require := require.New(t) - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, time.Time{}) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, activationTime, durangoTime) defer func() { require.NoError(proRemoteVM.Shutdown(context.Background())) }() @@ -700,10 +717,9 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil @@ -714,17 +730,16 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { // prepare build of next block require.NoError(builtBlk1.Verify(context.Background())) require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) - proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proRemoteVM, builtBlk1, 0)) coreBlk2 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreBlk1.ID(), - HeightV: coreBlk1.Height() + 1, - TimestampV: coreBlk1.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{2}, + ParentV: coreBlk1.ID(), + HeightV: coreBlk1.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil @@ -735,17 +750,16 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { // prepare build of next block require.NoError(builtBlk2.Verify(context.Background())) require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) - proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proRemoteVM, builtBlk2, builtBlk2.(*postForkBlock).PChainHeight())) coreBlk3 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - TimestampV: coreBlk2.Timestamp(), + BytesV: []byte{3}, + ParentV: coreBlk2.ID(), + HeightV: coreBlk2.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil @@ -798,13 +812,18 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { require := require.New(t) - currentTime := time.Now().Truncate(time.Second) - preForkTime := currentTime.Add(5 * time.Minute) - forkTime := currentTime.Add(10 * time.Minute) - postForkTime := currentTime.Add(15 * time.Minute) + + var ( + currentTime = time.Now().Truncate(time.Second) + preForkTime = currentTime.Add(5 * time.Minute) + forkTime = currentTime.Add(10 * time.Minute) + postForkTime = currentTime.Add(15 * time.Minute) + + durangoTime = forkTime + ) // enable ProBlks in next future - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, forkTime) + coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, forkTime, durangoTime) defer func() { require.NoError(proRemoteVM.Shutdown(context.Background())) }() @@ -874,10 +893,9 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - TimestampV: postForkTime.Add(proposer.MaxDelay), + BytesV: []byte{3}, + ParentV: coreBlk2.ID(), + HeightV: coreBlk2.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil @@ -889,17 +907,16 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { // prepare build of next block require.NoError(builtBlk3.Verify(context.Background())) require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk3.ID())) - proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proRemoteVM, builtBlk3, builtBlk3.(*postForkBlock).PChainHeight())) coreBlk4 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{4}, - ParentV: coreBlk3.ID(), - HeightV: coreBlk3.Height() + 1, - TimestampV: postForkTime, + BytesV: []byte{4}, + ParentV: coreBlk3.ID(), + HeightV: coreBlk3.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk4, nil @@ -966,7 +983,8 @@ type TestRemoteProposerVM struct { func initTestRemoteProposerVM( t *testing.T, - proBlkStartTime time.Time, + activationTime, + durangoTime time.Time, ) ( TestRemoteProposerVM, *VM, @@ -1030,12 +1048,15 @@ func initTestRemoteProposerVM( proVM := New( coreVM, - proBlkStartTime, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: activationTime, + DurangoTime: durangoTime, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) valState := &validators.TestState{ @@ -1048,28 +1069,34 @@ func initTestRemoteProposerVM( return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil } - ctx := snow.DefaultContextTest() - ctx.NodeID = pTestNodeID + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = pTestCert.NodeID ctx.ValidatorState = valState require.NoError(proVM.Initialize( diff --git a/vms/proposervm/block.go b/vms/proposervm/block.go index 7ba5cb0cfeac..3d96e972c9ac 100644 --- a/vms/proposervm/block.go +++ b/vms/proposervm/block.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -45,6 +45,7 @@ var ( errPChainHeightNotReached = errors.New("block P-chain height larger than current P-chain height") errTimeTooAdvanced = errors.New("time is too far advanced") errProposerWindowNotStarted = errors.New("proposer window hasn't started") + errUnexpectedProposer = errors.New("unexpected proposer for current window") errProposersNotActivated = errors.New("proposers haven't been activated yet") errPChainHeightTooLow = errors.New("block P-chain height is too low") ) @@ -134,12 +135,11 @@ func (p *postForkCommonComponents) Verify( // If the node is currently syncing - we don't assume that the P-chain has // been synced up to this point yet. if p.vm.consensusState == snow.NormalOp { - childID := child.ID() currentPChainHeight, err := p.vm.ctx.ValidatorState.GetCurrentHeight(ctx) if err != nil { p.vm.ctx.Log.Error("block verification failed", zap.String("reason", "failed to get current P-Chain height"), - zap.Stringer("blkID", childID), + zap.Stringer("blkID", child.ID()), zap.Error(err), ) return err @@ -152,28 +152,24 @@ func (p *postForkCommonComponents) Verify( ) } - childHeight := child.Height() - proposerID := child.Proposer() - minDelay, err := p.vm.Windower.Delay(ctx, childHeight, parentPChainHeight, proposerID) + var shouldHaveProposer bool + if p.vm.IsDurangoActivated(parentTimestamp) { + shouldHaveProposer, err = p.verifyPostDurangoBlockDelay(ctx, parentTimestamp, parentPChainHeight, child) + } else { + shouldHaveProposer, err = p.verifyPreDurangoBlockDelay(ctx, parentTimestamp, parentPChainHeight, child) + } if err != nil { return err } - delay := childTimestamp.Sub(parentTimestamp) - if delay < minDelay { - return errProposerWindowNotStarted - } - // Verify the signature of the node - shouldHaveProposer := delay < proposer.MaxDelay if err := child.SignedBlock.Verify(shouldHaveProposer, p.vm.ctx.ChainID); err != nil { return err } p.vm.ctx.Log.Debug("verified post-fork block", - zap.Stringer("blkID", childID), + zap.Stringer("blkID", child.ID()), zap.Time("parentTimestamp", parentTimestamp), - zap.Duration("minDelay", minDelay), zap.Time("blockTimestamp", childTimestamp), ) } @@ -212,37 +208,26 @@ func (p *postForkCommonComponents) buildChild( return nil, err } - delay := newTimestamp.Sub(parentTimestamp) - if delay < proposer.MaxDelay { - parentHeight := p.innerBlk.Height() - proposerID := p.vm.ctx.NodeID - minDelay, err := p.vm.Windower.Delay(ctx, parentHeight+1, parentPChainHeight, proposerID) - if err != nil { - p.vm.ctx.Log.Error("unexpected build block failure", - zap.String("reason", "failed to calculate required timestamp delay"), - zap.Stringer("parentID", parentID), - zap.Error(err), - ) - return nil, err - } - - if delay < minDelay { - // It's not our turn to propose a block yet. This is likely caused - // by having previously notified the consensus engine to attempt to - // build a block on top of a block that is no longer the preferred - // block. - p.vm.ctx.Log.Debug("build block dropped", - zap.Time("parentTimestamp", parentTimestamp), - zap.Duration("minDelay", minDelay), - zap.Time("blockTimestamp", newTimestamp), - ) - - // In case the inner VM only issued one pendingTxs message, we - // should attempt to re-handle that once it is our turn to build the - // block. - p.vm.notifyInnerBlockReady() - return nil, errProposerWindowNotStarted - } + var shouldBuildSignedBlock bool + if p.vm.IsDurangoActivated(parentTimestamp) { + shouldBuildSignedBlock, err = p.shouldBuildSignedBlockPostDurango( + ctx, + parentID, + parentTimestamp, + parentPChainHeight, + newTimestamp, + ) + } else { + shouldBuildSignedBlock, err = p.shouldBuildSignedBlockPreDurango( + ctx, + parentID, + parentTimestamp, + parentPChainHeight, + newTimestamp, + ) + } + if err != nil { + return nil, err } var innerBlock snowman.Block @@ -259,23 +244,22 @@ func (p *postForkCommonComponents) buildChild( // Build the child var statelessChild block.SignedBlock - if delay >= proposer.MaxDelay { - statelessChild, err = block.BuildUnsigned( + if shouldBuildSignedBlock { + statelessChild, err = block.Build( parentID, newTimestamp, pChainHeight, + p.vm.StakingCertLeaf, innerBlock.Bytes(), + p.vm.ctx.ChainID, + p.vm.StakingLeafSigner, ) } else { - statelessChild, err = block.Build( + statelessChild, err = block.BuildUnsigned( parentID, newTimestamp, pChainHeight, - p.vm.ctx.NodeID, - p.vm.stakingCertLeaf, innerBlock.Bytes(), - p.vm.ctx.ChainID, - p.vm.stakingLeafSigner, ) } if err != nil { @@ -345,3 +329,186 @@ func verifyIsNotOracleBlock(ctx context.Context, b snowman.Block) error { return err } } + +func (p *postForkCommonComponents) verifyPreDurangoBlockDelay( + ctx context.Context, + parentTimestamp time.Time, + parentPChainHeight uint64, + blk *postForkBlock, +) (bool, error) { + var ( + blkTimestamp = blk.Timestamp() + childHeight = blk.Height() + proposerID = blk.Proposer() + ) + minDelay, err := p.vm.Windower.Delay( + ctx, + childHeight, + parentPChainHeight, + proposerID, + proposer.MaxVerifyWindows, + ) + if err != nil { + p.vm.ctx.Log.Error("unexpected block verification failure", + zap.String("reason", "failed to calculate required timestamp delay"), + zap.Stringer("blkID", blk.ID()), + zap.Error(err), + ) + return false, err + } + + delay := blkTimestamp.Sub(parentTimestamp) + if delay < minDelay { + return false, fmt.Errorf("%w: delay %s < minDelay %s", errProposerWindowNotStarted, delay, minDelay) + } + + return delay < proposer.MaxVerifyDelay, nil +} + +func (p *postForkCommonComponents) verifyPostDurangoBlockDelay( + ctx context.Context, + parentTimestamp time.Time, + parentPChainHeight uint64, + blk *postForkBlock, +) (bool, error) { + var ( + blkTimestamp = blk.Timestamp() + blkHeight = blk.Height() + currentSlot = proposer.TimeToSlot(parentTimestamp, blkTimestamp) + proposerID = blk.Proposer() + ) + + expectedProposerID, err := p.vm.Windower.ExpectedProposer( + ctx, + blkHeight, + parentPChainHeight, + currentSlot, + ) + switch { + case errors.Is(err, proposer.ErrAnyoneCanPropose): + return false, nil // block should be unsigned + case err != nil: + p.vm.ctx.Log.Error("unexpected block verification failure", + zap.String("reason", "failed to calculate expected proposer"), + zap.Stringer("blkID", blk.ID()), + zap.Error(err), + ) + return false, err + case expectedProposerID == proposerID: + return true, nil // block should be signed + default: + return false, fmt.Errorf("%w: slot %d expects %s", errUnexpectedProposer, currentSlot, expectedProposerID) + } +} + +func (p *postForkCommonComponents) shouldBuildSignedBlockPostDurango( + ctx context.Context, + parentID ids.ID, + parentTimestamp time.Time, + parentPChainHeight uint64, + newTimestamp time.Time, +) (bool, error) { + parentHeight := p.innerBlk.Height() + currentSlot := proposer.TimeToSlot(parentTimestamp, newTimestamp) + expectedProposerID, err := p.vm.Windower.ExpectedProposer( + ctx, + parentHeight+1, + parentPChainHeight, + currentSlot, + ) + switch { + case errors.Is(err, proposer.ErrAnyoneCanPropose): + return false, nil // build an unsigned block + case err != nil: + p.vm.ctx.Log.Error("unexpected build block failure", + zap.String("reason", "failed to calculate expected proposer"), + zap.Stringer("parentID", parentID), + zap.Error(err), + ) + return false, err + case expectedProposerID == p.vm.ctx.NodeID: + return true, nil // build a signed block + } + + // It's not our turn to propose a block yet. This is likely caused by having + // previously notified the consensus engine to attempt to build a block on + // top of a block that is no longer the preferred block. + p.vm.ctx.Log.Debug("build block dropped", + zap.Time("parentTimestamp", parentTimestamp), + zap.Time("blockTimestamp", newTimestamp), + zap.Uint64("slot", currentSlot), + zap.Stringer("expectedProposer", expectedProposerID), + ) + + // We need to reschedule the block builder to the next time we can try to + // build a block. + // + // TODO: After Durango activates, restructure this logic to separate + // updating the scheduler from verifying the proposerID. + nextStartTime, err := p.vm.getPostDurangoSlotTime( + ctx, + parentHeight+1, + parentPChainHeight, + currentSlot+1, // We know we aren't the proposer for the current slot + parentTimestamp, + ) + if err != nil { + p.vm.ctx.Log.Error("failed to reset block builder scheduler", + zap.String("reason", "failed to calculate expected proposer"), + zap.Stringer("parentID", parentID), + zap.Error(err), + ) + return false, err + } + p.vm.Scheduler.SetBuildBlockTime(nextStartTime) + + // In case the inner VM only issued one pendingTxs message, we should + // attempt to re-handle that once it is our turn to build the block. + p.vm.notifyInnerBlockReady() + return false, fmt.Errorf("%w: slot %d expects %s", errUnexpectedProposer, currentSlot, expectedProposerID) +} + +func (p *postForkCommonComponents) shouldBuildSignedBlockPreDurango( + ctx context.Context, + parentID ids.ID, + parentTimestamp time.Time, + parentPChainHeight uint64, + newTimestamp time.Time, +) (bool, error) { + delay := newTimestamp.Sub(parentTimestamp) + if delay >= proposer.MaxBuildDelay { + return false, nil // time for any node to build an unsigned block + } + + parentHeight := p.innerBlk.Height() + proposerID := p.vm.ctx.NodeID + minDelay, err := p.vm.Windower.Delay(ctx, parentHeight+1, parentPChainHeight, proposerID, proposer.MaxBuildWindows) + if err != nil { + p.vm.ctx.Log.Error("unexpected build block failure", + zap.String("reason", "failed to calculate required timestamp delay"), + zap.Stringer("parentID", parentID), + zap.Error(err), + ) + return false, err + } + + if delay >= minDelay { + // it's time for this node to propose a block. It'll be signed or + // unsigned depending on the delay + return delay < proposer.MaxVerifyDelay, nil + } + + // It's not our turn to propose a block yet. This is likely caused by having + // previously notified the consensus engine to attempt to build a block on + // top of a block that is no longer the preferred block. + p.vm.ctx.Log.Debug("build block dropped", + zap.Time("parentTimestamp", parentTimestamp), + zap.Duration("minDelay", minDelay), + zap.Time("blockTimestamp", newTimestamp), + ) + + // In case the inner VM only issued one pendingTxs message, we should + // attempt to re-handle that once it is our turn to build the block. + p.vm.notifyInnerBlockReady() + return false, fmt.Errorf("%w: delay %s < minDelay %s", errProposerWindowNotStarted, delay, minDelay) +} diff --git a/vms/proposervm/block/block.go b/vms/proposervm/block/block.go index 4c5c3380c606..63fa930dcc8f 100644 --- a/vms/proposervm/block/block.go +++ b/vms/proposervm/block/block.go @@ -8,20 +8,18 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( - "crypto/x509" "errors" "fmt" "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/staking" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -40,7 +38,7 @@ type Block interface { Block() []byte Bytes() []byte - initialize(bytes []byte) error + initialize(bytes []byte, durangoTime time.Time) error } type SignedBlock interface { @@ -88,7 +86,7 @@ func (b *statelessBlock) Bytes() []byte { return b.bytes } -func (b *statelessBlock) initialize(bytes []byte) error { +func (b *statelessBlock) initialize(bytes []byte, durangoTime time.Time) error { b.bytes = bytes // The serialized form of the block is the unsignedBytes followed by the @@ -103,23 +101,18 @@ func (b *statelessBlock) initialize(bytes []byte) error { return nil } - tlsCert, err := x509.ParseCertificate(b.StatelessBlock.Certificate) + // TODO: Remove durangoTime after v1.11.x has activated. + var err error + if b.timestamp.Before(durangoTime) { + b.cert, err = staking.ParseCertificate(b.StatelessBlock.Certificate) + } else { + b.cert, err = staking.ParseCertificatePermissive(b.StatelessBlock.Certificate) + } if err != nil { return fmt.Errorf("%w: %w", errInvalidCertificate, err) } - cert := staking.CertificateFromX509(tlsCert) - b.cert = cert - - nodeIDBytes, err := secp256k1.RecoverSecp256PublicKey(tlsCert) - if err != nil { - return err - } - nodeID, err := ids.ToNodeID(nodeIDBytes) - if err != nil { - return err - } - b.proposer = nodeID + b.proposer = b.cert.NodeID return nil } diff --git a/vms/proposervm/block/block_test.go b/vms/proposervm/block/block_test.go index d0b1a817b941..8a8a57ae3b9d 100644 --- a/vms/proposervm/block/block_test.go +++ b/vms/proposervm/block/block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/vms/proposervm/block/build.go b/vms/proposervm/block/build.go index 7aad075d15e1..dc3af0c3354c 100644 --- a/vms/proposervm/block/build.go +++ b/vms/proposervm/block/build.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -41,18 +41,20 @@ func BuildUnsigned( timestamp: timestamp, } - bytes, err := c.Marshal(codecVersion, &block) + bytes, err := Codec.Marshal(CodecVersion, &block) if err != nil { return nil, err } - return block, block.initialize(bytes) + + // Invariant: The durango timestamp isn't used here because the certificate + // is empty. + return block, block.initialize(bytes, time.Time{}) } func Build( parentID ids.ID, timestamp time.Time, pChainHeight uint64, - nodeID ids.NodeID, cert *staking.Certificate, blockBytes []byte, chainID ids.ID, @@ -68,11 +70,11 @@ func Build( }, timestamp: timestamp, cert: cert, - proposer: nodeID, + proposer: cert.NodeID, } var blockIntf SignedBlock = block - unsignedBytesWithEmptySignature, err := c.Marshal(codecVersion, &blockIntf) + unsignedBytesWithEmptySignature, err := Codec.Marshal(CodecVersion, &blockIntf) if err != nil { return nil, err } @@ -96,7 +98,7 @@ func Build( return nil, err } - block.bytes, err = c.Marshal(codecVersion, &blockIntf) + block.bytes, err = Codec.Marshal(CodecVersion, &blockIntf) return block, err } @@ -111,7 +113,7 @@ func BuildHeader( Body: bodyID, } - bytes, err := c.Marshal(codecVersion, &header) + bytes, err := Codec.Marshal(CodecVersion, &header) header.bytes = bytes return &header, err } @@ -128,9 +130,11 @@ func BuildOption( InnerBytes: innerBytes, } - bytes, err := c.Marshal(codecVersion, &block) + bytes, err := Codec.Marshal(CodecVersion, &block) if err != nil { return nil, err } - return block, block.initialize(bytes) + + // Invariant: The durango timestamp isn't used. + return block, block.initialize(bytes, time.Time{}) } diff --git a/vms/proposervm/block/build_test.go b/vms/proposervm/block/build_test.go index 98dfe89695b7..6a927d727e0e 100644 --- a/vms/proposervm/block/build_test.go +++ b/vms/proposervm/block/build_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -36,14 +36,14 @@ func TestBuild(t *testing.T) { tlsCert, err := staking.NewTLSCert() require.NoError(err) - cert := staking.CertificateFromX509(tlsCert.Leaf) + cert, err := staking.CertificateFromX509(tlsCert.Leaf) + require.NoError(err) key := tlsCert.PrivateKey.(crypto.Signer) builtBlock, err := Build( parentID, timestamp, pChainHeight, - ids.EmptyNodeID, cert, innerBlockBytes, chainID, diff --git a/vms/proposervm/block/camino_test.go b/vms/proposervm/block/camino_test.go new file mode 100644 index 000000000000..2d507acafbf0 --- /dev/null +++ b/vms/proposervm/block/camino_test.go @@ -0,0 +1,113 @@ +// Copyright (C) 2023, Chain4Travel AG. All rights reserved. +// See the file LICENSE for licensing terms. + +package block + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/hex" + "fmt" + "math/big" + "testing" + "time" + + "github.com/stretchr/testify/require" + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/staking" + utilsSecp256k1 "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" +) + +// Convinient way to run generateTestBlock. Comment out SkipNow before run. +func TestGenerateTestBlock(t *testing.T) { + t.SkipNow() + key, cert, err := generateTestKeyAndCertWithDupExt() + require.NoError(t, err) + blockHex, err := generateTestBlock(key, cert) + require.NoError(t, err) + t.Logf("generated block hex: %s\n", blockHex) +} + +// Creates block with given key and cert, then prints block bytes hex. This hex is used by tests in this package. +func generateTestBlock(key crypto.Signer, cert *staking.Certificate) (string, error) { + parentID := ids.ID{1} + timestamp := time.Unix(123, 0) + pChainHeight := uint64(2) + innerBlockBytes := []byte{3} + chainID := ids.ID{4} + + block, err := Build( + parentID, + timestamp, + pChainHeight, + cert, + innerBlockBytes, + chainID, + key, + ) + if err != nil { + return "", err + } + + blockBytes, err := Codec.Marshal(CodecVersion, block) + if err != nil { + return "", err + } + + return "00000000" + hex.EncodeToString(blockBytes), nil +} + +// Creates key and certificate with duplicated extensions. +func generateTestKeyAndCertWithDupExt() (crypto.Signer, *staking.Certificate, error) { + // Create RSA key to sign cert with + rsaKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + return nil, nil, fmt.Errorf("couldn't generate rsa key: %w", err) + } + // Create SECP256K1 key to sign cert with + secpKey := utilsSecp256k1.RsaPrivateKeyToSecp256PrivateKey(rsaKey) + extension := utilsSecp256k1.SignRsaPublicKey(secpKey, &rsaKey.PublicKey) + + // Create self-signed staking cert + certTemplate := &x509.Certificate{ + SerialNumber: big.NewInt(0), + NotBefore: time.Date(2000, time.January, 0, 0, 0, 0, 0, time.UTC), + NotAfter: time.Now().AddDate(100, 0, 0), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageDataEncipherment, + ExtraExtensions: []pkix.Extension{*extension, *extension}, + BasicConstraintsValid: true, + } + certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, &rsaKey.PublicKey, rsaKey) + if err != nil { + return nil, nil, fmt.Errorf("couldn't create certificate: %w", err) + } + + input := cryptobyte.String(certBytes) + if !input.ReadASN1Element(&input, cryptobyte_asn1.SEQUENCE) { + return nil, nil, staking.ErrMalformedCertificate + } + + tlsCert := tls.Certificate{ + Certificate: [][]byte{certBytes}, + PrivateKey: rsaKey, + Leaf: &x509.Certificate{ + Raw: input, + PublicKey: &rsaKey.PublicKey, + Extensions: certTemplate.ExtraExtensions, + }, + } + + cert, err := staking.CertificateFromX509(tlsCert.Leaf) + if err != nil { + return nil, nil, err + } + + return rsaKey, cert, nil +} diff --git a/vms/proposervm/block/codec.go b/vms/proposervm/block/codec.go index 6d68a4cc2fe7..ca2318002093 100644 --- a/vms/proposervm/block/codec.go +++ b/vms/proposervm/block/codec.go @@ -1,33 +1,31 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/utils" ) -const codecVersion = 0 +const CodecVersion = 0 -// The maximum block size is enforced by the p2p message size limit. -// See: [constants.DefaultMaxMessageSize] -// -// Invariant: This codec must never be used to unmarshal a slice unless it is a -// `[]byte`. Otherwise a malicious payload could cause an OOM. -var c codec.Manager +var Codec codec.Manager func init() { - linearCodec := linearcodec.NewCustomMaxLength(math.MaxUint32) - c = codec.NewManager(math.MaxInt) + lc := linearcodec.NewDefault(time.Time{}) + // The maximum block size is enforced by the p2p message size limit. + // See: [constants.DefaultMaxMessageSize] + Codec = codec.NewManager(math.MaxInt) err := utils.Err( - linearCodec.RegisterType(&statelessBlock{}), - linearCodec.RegisterType(&option{}), - c.RegisterCodec(codecVersion, linearCodec), + lc.RegisterType(&statelessBlock{}), + lc.RegisterType(&option{}), + Codec.RegisterCodec(CodecVersion, lc), ) if err != nil { panic(err) diff --git a/vms/proposervm/block/header.go b/vms/proposervm/block/header.go index 0098ab7e1932..83c4e813c806 100644 --- a/vms/proposervm/block/header.go +++ b/vms/proposervm/block/header.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/vms/proposervm/block/header_test.go b/vms/proposervm/block/header_test.go index bdbfaf3be0fd..a4db59385f01 100644 --- a/vms/proposervm/block/header_test.go +++ b/vms/proposervm/block/header_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/vms/proposervm/block/option.go b/vms/proposervm/block/option.go index 180b90e31fd6..7edb39bd429f 100644 --- a/vms/proposervm/block/option.go +++ b/vms/proposervm/block/option.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( + "time" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/hashing" ) @@ -32,7 +34,7 @@ func (b *option) Bytes() []byte { return b.bytes } -func (b *option) initialize(bytes []byte) error { +func (b *option) initialize(bytes []byte, _ time.Time) error { b.id = hashing.ComputeHash256Array(bytes) b.bytes = bytes return nil diff --git a/vms/proposervm/block/option_test.go b/vms/proposervm/block/option_test.go index f6d4f409650d..d5af9c100079 100644 --- a/vms/proposervm/block/option_test.go +++ b/vms/proposervm/block/option_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/vms/proposervm/block/parse.go b/vms/proposervm/block/parse.go index aff15bde9d2b..bf9b44adf1f4 100644 --- a/vms/proposervm/block/parse.go +++ b/vms/proposervm/block/parse.go @@ -1,30 +1,33 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block -import "fmt" +import ( + "fmt" + "time" +) -func Parse(bytes []byte) (Block, error) { +func Parse(bytes []byte, durangoTime time.Time) (Block, error) { var block Block - parsedVersion, err := c.Unmarshal(bytes, &block) + parsedVersion, err := Codec.Unmarshal(bytes, &block) if err != nil { return nil, err } - if parsedVersion != codecVersion { - return nil, fmt.Errorf("expected codec version %d but got %d", codecVersion, parsedVersion) + if parsedVersion != CodecVersion { + return nil, fmt.Errorf("expected codec version %d but got %d", CodecVersion, parsedVersion) } - return block, block.initialize(bytes) + return block, block.initialize(bytes, durangoTime) } func ParseHeader(bytes []byte) (Header, error) { header := statelessHeader{} - parsedVersion, err := c.Unmarshal(bytes, &header) + parsedVersion, err := Codec.Unmarshal(bytes, &header) if err != nil { return nil, err } - if parsedVersion != codecVersion { - return nil, fmt.Errorf("expected codec version %d but got %d", codecVersion, parsedVersion) + if parsedVersion != CodecVersion { + return nil, fmt.Errorf("expected codec version %d but got %d", CodecVersion, parsedVersion) } header.bytes = bytes return &header, nil diff --git a/vms/proposervm/block/parse_test.go b/vms/proposervm/block/parse_test.go index cb0a1e66037c..54c9af83ac16 100644 --- a/vms/proposervm/block/parse_test.go +++ b/vms/proposervm/block/parse_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -19,8 +19,6 @@ import ( "testing" "time" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/codec" @@ -40,20 +38,14 @@ func TestParse(t *testing.T) { tlsCert, err := staking.NewTLSCert() require.NoError(err) - cert := staking.CertificateFromX509(tlsCert.Leaf) - key := tlsCert.PrivateKey.(crypto.Signer) - - nodeIDBytes, err := secp256k1.RecoverSecp256PublicKey(tlsCert.Leaf) - require.NoError(err) - - nodeID, err := ids.ToNodeID(nodeIDBytes) + cert, err := staking.CertificateFromX509(tlsCert.Leaf) require.NoError(err) + key := tlsCert.PrivateKey.(crypto.Signer) builtBlock, err := Build( parentID, timestamp, pChainHeight, - nodeID, cert, innerBlockBytes, chainID, @@ -62,25 +54,38 @@ func TestParse(t *testing.T) { require.NoError(err) builtBlockBytes := builtBlock.Bytes() - - parsedBlockIntf, err := Parse(builtBlockBytes) - require.NoError(err) - - parsedBlock, ok := parsedBlockIntf.(SignedBlock) - require.True(ok) - - equal(require, chainID, builtBlock, parsedBlock) + durangoTimes := []time.Time{ + timestamp.Add(time.Second), // Durango not activated yet + timestamp.Add(-time.Second), // Durango activated + } + for _, durangoTime := range durangoTimes { + parsedBlockIntf, err := Parse(builtBlockBytes, durangoTime) + require.NoError(err) + + parsedBlock, ok := parsedBlockIntf.(SignedBlock) + require.True(ok) + + equal(require, chainID, builtBlock, parsedBlock) + } } func TestParseDuplicateExtension(t *testing.T) { require := require.New(t) - blockHex := "0000000000000100000000000000000000000000000000000000000000000000000000000000000000000000007b0000000000000002000004bd308204b9308202a1a003020102020100300d06092a864886f70d01010b050030003020170d3939313233313030303030305a180f32313232303830333233323835335a300030820222300d06092a864886f70d01010105000382020f003082020a0282020100c2b2de1c16924d9b9254a0d5b80a4bc5f9beaa4f4f40a0e4efb69eb9b55d7d37f8c82328c237d7c5b451f5427b487284fa3f365f9caa53c7fcfef8d7a461d743bd7d88129f2da62b877ebe9d6feabf1bd12923e6c12321382c782fc3bb6b6cb4986a937a1edc3814f4e621e1a62053deea8c7649e43edd97ab6b56315b00d9ab5026bb9c31fb042dc574ba83c54e720e0120fcba2e8a66b77839be3ece0d4a6383ef3f76aac952b49a15b65e18674cd1340c32cecbcbaf80ae45be001366cb56836575fb0ab51ea44bf7278817e99b6b180fdd110a49831a132968489822c56692161bbd372cf89d9b8ee5a734cff15303b3a960ee78d79e76662a701941d9ec084429f26707f767e9b1d43241c0e4f96655d95c1f4f4aa00add78eff6bf0a6982766a035bf0b465786632c5bb240788ca0fdf032d8815899353ea4bec5848fd30118711e5b356bde8a0da074cc25709623225e734ff5bd0cf65c40d9fd8fccf746d8f8f35145bcebcf378d2b086e57d78b11e84f47fa467c4d037f92bff6dd4e934e0189b58193f24c4222ffb72b5c06361cf68ca64345bc3e230cc0f40063ad5f45b1659c643662996328c2eeddcd760d6f7c9cbae081ccc065844f7ea78c858564a408979764de882793706acc67d88092790dff567ed914b03355330932616a0f26f994b963791f0b1dbd8df979db86d1ea490700a3120293c3c2b10bef10203010001a33c303a300e0603551d0f0101ff0404030204b030130603551d25040c300a06082b0601050507030230130603551d25040c300a06082b06010505070302300d06092a864886f70d01010b05000382020100a21a0d73ec9ef4eb39f810557ac70b0b775772b8bae5f42c98565bc50b5b2c57317aa9cb1da12f55d0aac7bb36a00cd4fd0d7384c4efa284b53520c5a3c4b8a65240b393eeab02c802ea146c0728c3481c9e8d3aaad9d4dd7607103dcfaa96da83460adbe18174ed5b71bde7b0a93d4fb52234a9ff54e3fd25c5b74790dfb090f2e59dc5907357f510cc3a0b70ccdb87aee214def794b316224f318b471ffa13b66e44b467670e881cb1628c99c048a503376d9b6d7b8eef2e7be47ff7d5c1d56221f4cf7fa2519b594cb5917815c64dc75d8d281bcc99b5a12899b08f2ca0f189857b64a1afc5963337f3dd6e79390e85221569f6dbbb13aadce06a3dfb5032f0cc454809627872cd7cd0cea5eba187723f07652c8abc3fc42bd62136fc66287f2cc19a7cb416923ad1862d7f820b55cacb65e43731cb6df780e2651e457a3438456aeeeb278ad9c0ad2e760f6c1cbe276eeb621c8a4e609b5f2d902beb3212e3e45df99497021ff536d0b56390c5d785a8bf7909f6b61bdc705d7d92ae22f58e7b075f164a0450d82d8286bf449072751636ab5185f59f518b845a75d112d6f7b65223479202cff67635e2ad88106bc8a0cc9352d87c5b182ac19a4680a958d814a093acf46730f87da0df6926291d02590f215041b44a0a1a32eeb3a52cddabc3d256689bace18a8d85e644cf9137cce3718f7caac1cb16ae06e874f4c701000000010300000200b8e3a4d9a4394bac714cb597f5ba1a81865185e35c782d0317e7abc0b52d49ff8e10f787bedf86f08148e3dbd2d2d478caa2a2893d31db7d5ee51339883fe84d3004440f16cb3797a7fab0f627d3ebd79217e995488e785cd6bb7b96b9d306f8109daa9cfc4162f9839f60fb965bcb3b56a5fa787549c153a4c80027398f73a617b90b7f24f437b140cd3ac832c0b75ec98b9423b275782988a9fd426937b8f82fbb0e88a622934643fb6335c1a080a4d13125544b04585d5f5295be7cd2c8be364246ea3d5df3e837b39a85074575a1fa2f4799050460110bdfb20795c8a9172a20f61b95e1c5c43eccd0c2c155b67385366142c63409cb3fb488e7aba6c8930f7f151abf1c24a54bd21c3f7a06856ea9db35beddecb30d2c61f533a3d0590bdbb438c6f2a2286dfc3c71b383354f0abad72771c2cc3687b50c2298783e53857cf26058ed78d0c1cf53786eb8d006a058ee3c85a7b2b836b5d03ef782709ce8f2725548e557b3de45a395a669a15f1d910e97015d22ac70020cab7e2531e8b1f739b023b49e742203e9e19a7fe0053826a9a2fe2e118d3b83498c2cb308573202ad41aa4a390aee4b6b5dd2164e5c5cd1b5f68b7d5632cf7dbb9a9139663c9aac53a74b2c6fc73cad80e228a186ba027f6f32f0182d62503e04fcced385f2e7d2e11c00940622ebd533b4d144689082f9777e5b16c36f9af9066e0ad6564d43" + blockHex := "0000000000000100000000000000000000000000000000000000000000000000000000000000000000000000007b000000000000000200000549308205453082032da003020102020100300d06092a864886f70d01010b050030003020170d3939313233313030303030305a180f32313234303331383134303631345a300030820222300d06092a864886f70d01010105000382020f003082020a0282020100d26e5f3da1caab11ce37919f7e307ee7c3c994498e78a7b8ab54c1c7c5246cb72b29a8fe1288f0938860bdca7335a885c645dcb7bc53cf80775945533cb9d46548f0038ae15ba63c5dcbab1600b42abaf70f467054cced3cd17142c031c43626b10db7986ad858581f6ead5185b77102602fdf2c7e2cddb7c7f11d8d461e3022c0b853ee18a5a93f18b321c8391c745be4c36d5c1759ab8b0bf6779e36529af4b3fcd924b1a33bdc0d807d47bc20040d32f11f1210f3088d55a7282ea07c59da0442805998bcb50ffe98420fc9835d6e664d25e6e41766761588e0fbfc6dacdb9c724f877c28dc45e79aecc4fa5fc24b238aa4512fd7823879edff32073ef8f34c8e609605014712254c4a7cf50f8b35d406e587e5b24a5f75d43d43c57591ee8b2c9ad1c2044c581dac3227e2d404e1e9af4674e762fc125c169b9a1b254a485d656f5c91d0388b956ad52cdac520b701555c2fe0e09087b6bbcffda981a58d8e98456af6a69ae24127ee7b438e24c67d88872f2363b505ac427c49e1592c2436de5ec245fac56cc24111b8a38a24e0bdfbef7627d6ca27af96d6b20d6fecb032dee7f3a459dc34730f290fda40f0eea1024c9b2a087b0055fdbc1621d9a9d87dd4b356b7caf121ba00022bf8a87711ca39583890128d01333b9ddb0ec4447c5bb0c85c6b295b2481f3a8f86b45536b3d15a0582fd3ac780ab01739fd6cd4d70203010001a381c73081c4300e0603551d0f0101ff0404030204b0300c0603551d130101ff04023000305106092a864886f70d0109150101ff04411cd3184187185ef0be03549b4c5d9b9d7592fd75eebfbd3de12c71e7360e2776543cf4edf4dbb5d674f61c58841abad64fb1e0ca0c24255d119fd658387cf2b800305106092a864886f70d0109150101ff04411cd3184187185ef0be03549b4c5d9b9d7592fd75eebfbd3de12c71e7360e2776543cf4edf4dbb5d674f61c58841abad64fb1e0ca0c24255d119fd658387cf2b800300d06092a864886f70d01010b050003820201001cf95b768b37bde828ca239e739a4229bacff2c53eb09e6b7f1499cb5157851b51ebdb45f5a94a3d0dc16c3d844ce57bb1f551b9bb6f92bcbdc08a7692e98ac257e594696a6f124df3b8a230a2f6ea34a8dd996516993cd91a2c0993e2c77f73454e77ee0f9d9a191f0a1d6b6b1bec901a1466bc0bcf781aa2e96bc65abc20bb2f5643829d811c50af8360022ee1da37f14d3e46e3d23e17fab57a847f7f3ba685090abf16d548c275654ab832935ecc73d496159078e124223314d0e2d8fc9f27426c8fbe6721684d205bac75d955ee71dd8ce6a1ae3c94da7c87c9c3126f9ae4715cbcccb1a9213357c0115e89e9b8d31cc9bbe0ad7e41e25d7473bdc30eaa541228182f650f53b952bdac8c4e9e5f3ceebe5858d85dd58431eb9dba5e4ff28f4212dd9c5ebf6abcae5dcad6b5f09144befb5a7c3f02c0ba5bff781c3acedc22c1cde635a39fb245bcf9f514949fac8321d6ec054377dbc1b24839caaabc29e3884c4de84523e6fa549253b691f6b5c7bdba6a410dc176c765ca14a499ef01916742138fc8156f2c14e4a122e581d1b6ca79e82dfd015b13c38011e248d25e0daccbe266dafbdff3f4ec99227c56795fe75d0d32876d054e5d124d873bebbaeff57ecb9f35146e97f7683809a615c54b89a8b21d0120cfedf133d4253ab9ae521106d245b50de8163b3e97b2e9eae63a72fc283d73b086e35b83fff3cb3d60000000010300000200032444fef47bcd6f77f9b5890a51a1de3b52269d476a04506727aa20b61dc535d09511c4c403058e2fdd7ee5d751b1b6153c4d02f07bf60988be15bf3ff6469bcfde45bdf12e979879d9537586b7394df60ca465f5facdac1722570b5f51f1eb2e8fa20c46a390d4319555d1a39a289563de511d36d517ecdb21b02f76a76d518a6b0eb40d15544f6d1b2e7fd70108af12260e6eaca8efbb2e254b5a3bcf486da1ebabace68c42c13a2a8f04cc626711f0b26f2d66bd0b451b5b4db474364b2dea51b93a41c9c676c00f54e30d4ddad249faa851bf7e99a5dc1b6431c0f79fc4748fb8fa299ad0eb8d92b24aa083f6d93f60384bccc980fc7ba957b71068977eedb7da7884d8a969fb84f3ef921055d63ceebde7c45ead163e19f6425668ff5c205f8368d4df57179efd64312ea4ddcbbabc1e99438e8d2bd05c5728edf505b9caf87cc07ec19f8b457667fc402d0bf53b437b7079c57bbd1dc004950d016440a178061582d4f5431dcb7f7be3b44c085ea982938800272bb140a1aa53208c849c342cb534bad44d06fddb156c0429b9afa920d765dbf9fd09a9dfc9adcb8abe6e238d1a586ffb8164f05e44822d6130662a358d0ed54c0031fe48f0157d211d307a5ef423a7bea821c0886f562140d0347fb429cc978e69a3fe6733a373224acaccf9cbbd5574f5157c78cf2c1623d8f984efd730f7a9a553058073672d49a0" blockBytes, err := hex.DecodeString(blockHex) require.NoError(err) - _, err = Parse(blockBytes) + // Note: The above blockHex specifies 123 as the block's timestamp. + timestamp := time.Unix(123, 0) + durangoNotYetActivatedTime := timestamp.Add(time.Second) + durangoAlreadyActivatedTime := timestamp.Add(-time.Second) + + _, err = Parse(blockBytes, durangoNotYetActivatedTime) require.ErrorIs(err, errInvalidCertificate) + + _, err = Parse(blockBytes, durangoAlreadyActivatedTime) + require.NoError(err) } func TestParseHeader(t *testing.T) { @@ -116,7 +121,7 @@ func TestParseOption(t *testing.T) { builtOptionBytes := builtOption.Bytes() - parsedOption, err := Parse(builtOptionBytes) + parsedOption, err := Parse(builtOptionBytes, time.Time{}) require.NoError(err) equalOption(require, builtOption, parsedOption) @@ -134,14 +139,19 @@ func TestParseUnsigned(t *testing.T) { require.NoError(err) builtBlockBytes := builtBlock.Bytes() - - parsedBlockIntf, err := Parse(builtBlockBytes) - require.NoError(err) - - parsedBlock, ok := parsedBlockIntf.(SignedBlock) - require.True(ok) - - equal(require, ids.Empty, builtBlock, parsedBlock) + durangoTimes := []time.Time{ + timestamp.Add(time.Second), // Durango not activated yet + timestamp.Add(-time.Second), // Durango activated + } + for _, durangoTime := range durangoTimes { + parsedBlockIntf, err := Parse(builtBlockBytes, durangoTime) + require.NoError(err) + + parsedBlock, ok := parsedBlockIntf.(SignedBlock) + require.True(ok) + + equal(require, ids.Empty, builtBlock, parsedBlock) + } } func TestParseGibberish(t *testing.T) { @@ -149,6 +159,6 @@ func TestParseGibberish(t *testing.T) { bytes := []byte{0, 1, 2, 3, 4, 5} - _, err := Parse(bytes) + _, err := Parse(bytes, time.Time{}) require.ErrorIs(err, codec.ErrUnknownVersion) } diff --git a/vms/proposervm/block_server.go b/vms/proposervm/block_server.go index e9e2e192a4e5..6a056c8bc827 100644 --- a/vms/proposervm/block_server.go +++ b/vms/proposervm/block_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm diff --git a/vms/proposervm/block_test.go b/vms/proposervm/block_test.go index 90c99a22dd21..7f81f4e70175 100644 --- a/vms/proposervm/block_test.go +++ b/vms/proposervm/block_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( + "bytes" "context" "crypto/ecdsa" "crypto/elliptic" @@ -17,13 +18,15 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/proposervm/proposer" + "github.com/ava-labs/avalanchego/vms/proposervm/scheduler" ) // Assert that when the underlying VM implements ChainVMWithBuildBlockContext @@ -34,39 +37,53 @@ func TestPostForkCommonComponents_buildChild(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - pChainHeight := uint64(1337) - parentID := ids.GenerateTestID() - parentTimestamp := time.Now() - blkID := ids.GenerateTestID() + var ( + nodeID = ids.GenerateTestNodeID() + pChainHeight uint64 = 1337 + parentID = ids.GenerateTestID() + parentTimestamp = time.Now().Truncate(time.Second) + parentHeight uint64 = 1234 + blkID = ids.GenerateTestID() + ) + innerBlk := snowman.NewMockBlock(ctrl) innerBlk.EXPECT().ID().Return(blkID).AnyTimes() - innerBlk.EXPECT().Height().Return(pChainHeight - 1).AnyTimes() + innerBlk.EXPECT().Height().Return(parentHeight + 1).AnyTimes() + builtBlk := snowman.NewMockBlock(ctrl) builtBlk.EXPECT().Bytes().Return([]byte{1, 2, 3}).AnyTimes() builtBlk.EXPECT().ID().Return(ids.GenerateTestID()).AnyTimes() builtBlk.EXPECT().Height().Return(pChainHeight).AnyTimes() - innerVM := mocks.NewMockChainVM(ctrl) - innerBlockBuilderVM := mocks.NewMockBuildBlockWithContextChainVM(ctrl) + + innerVM := block.NewMockChainVM(ctrl) + innerBlockBuilderVM := block.NewMockBuildBlockWithContextChainVM(ctrl) innerBlockBuilderVM.EXPECT().BuildBlockWithContext(gomock.Any(), &block.Context{ PChainHeight: pChainHeight - 1, }).Return(builtBlk, nil).AnyTimes() + vdrState := validators.NewMockState(ctrl) vdrState.EXPECT().GetMinimumHeight(context.Background()).Return(pChainHeight, nil).AnyTimes() + windower := proposer.NewMockWindower(ctrl) - windower.EXPECT().Delay(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(time.Duration(0), nil).AnyTimes() + windower.EXPECT().ExpectedProposer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nodeID, nil).AnyTimes() pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) require.NoError(err) vm := &VM{ + Config: Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + StakingCertLeaf: &staking.Certificate{}, + StakingLeafSigner: pk, + }, ChainVM: innerVM, blockBuilderVM: innerBlockBuilderVM, ctx: &snow.Context{ + NodeID: nodeID, ValidatorState: vdrState, Log: logging.NoLog{}, }, - Windower: windower, - stakingCertLeaf: &staking.Certificate{}, - stakingLeafSigner: pk, + Windower: windower, } blk := &postForkCommonComponents{ @@ -84,3 +101,351 @@ func TestPostForkCommonComponents_buildChild(t *testing.T) { require.NoError(err) require.Equal(builtBlk, gotChild.(*postForkBlock).innerBlk) } + +func TestPreDurangoValidatorNodeBlockBuiltDelaysTests(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(ctx)) + }() + + // Build a post fork block. It'll be the parent block in our test cases + parentTime := time.Now().Truncate(time.Second) + proVM.Set(parentTime) + + coreParentBlk := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, + } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreParentBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + switch { + case blkID == coreParentBlk.ID(): + return coreParentBlk, nil + case blkID == coreGenBlk.ID(): + return coreGenBlk, nil + default: + return nil, errUnknownBlock + } + } + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { // needed when setting preference + switch { + case bytes.Equal(b, coreParentBlk.Bytes()): + return coreParentBlk, nil + case bytes.Equal(b, coreGenBlk.Bytes()): + return coreGenBlk, nil + default: + return nil, errUnknownBlock + } + } + + parentBlk, err := proVM.BuildBlock(ctx) + require.NoError(err) + require.NoError(parentBlk.Verify(ctx)) + require.NoError(parentBlk.Accept(ctx)) + + // Make sure preference is duly set + require.NoError(proVM.SetPreference(ctx, parentBlk.ID())) + require.Equal(proVM.preferred, parentBlk.ID()) + _, err = proVM.getPostForkBlock(ctx, parentBlk.ID()) + require.NoError(err) + + // Force this node to be the only validator, so to guarantee + // it'd be picked if block build time was before MaxVerifyDelay + valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + // a validator with a weight large enough to fully fill the proposers list + weight := uint64(proposer.MaxBuildWindows * 2) + + return map[ids.NodeID]*validators.GetValidatorOutput{ + proVM.ctx.NodeID: { + NodeID: proVM.ctx.NodeID, + Weight: weight, + }, + }, nil + } + + coreChildBlk := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + BytesV: []byte{2}, + ParentV: coreParentBlk.ID(), + HeightV: coreParentBlk.Height() + 1, + } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreChildBlk, nil + } + + { + // Set local clock before MaxVerifyDelay from parent timestamp. + // Check that child block is signed. + localTime := parentBlk.Timestamp().Add(proposer.MaxVerifyDelay - time.Second) + proVM.Set(localTime) + + childBlk, err := proVM.BuildBlock(ctx) + require.NoError(err) + require.IsType(&postForkBlock{}, childBlk) + require.Equal(proVM.ctx.NodeID, childBlk.(*postForkBlock).Proposer()) // signed block + } + + { + // Set local clock exactly MaxVerifyDelay from parent timestamp. + // Check that child block is unsigned. + localTime := parentBlk.Timestamp().Add(proposer.MaxVerifyDelay) + proVM.Set(localTime) + + childBlk, err := proVM.BuildBlock(ctx) + require.NoError(err) + require.IsType(&postForkBlock{}, childBlk) + require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // signed block + } + + { + // Set local clock among MaxVerifyDelay and MaxBuildDelay from parent timestamp + // Check that child block is unsigned + localTime := parentBlk.Timestamp().Add((proposer.MaxVerifyDelay + proposer.MaxBuildDelay) / 2) + proVM.Set(localTime) + + childBlk, err := proVM.BuildBlock(ctx) + require.NoError(err) + require.IsType(&postForkBlock{}, childBlk) + require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // unsigned so no proposer + } + + { + // Set local clock after MaxBuildDelay from parent timestamp + // Check that child block is unsigned + localTime := parentBlk.Timestamp().Add(proposer.MaxBuildDelay) + proVM.Set(localTime) + + childBlk, err := proVM.BuildBlock(ctx) + require.NoError(err) + require.IsType(&postForkBlock{}, childBlk) + require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // unsigned so no proposer + } +} + +func TestPreDurangoNonValidatorNodeBlockBuiltDelaysTests(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(ctx)) + }() + + // Build a post fork block. It'll be the parent block in our test cases + parentTime := time.Now().Truncate(time.Second) + proVM.Set(parentTime) + + coreParentBlk := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, + } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreParentBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + switch { + case blkID == coreParentBlk.ID(): + return coreParentBlk, nil + case blkID == coreGenBlk.ID(): + return coreGenBlk, nil + default: + return nil, errUnknownBlock + } + } + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { // needed when setting preference + switch { + case bytes.Equal(b, coreParentBlk.Bytes()): + return coreParentBlk, nil + case bytes.Equal(b, coreGenBlk.Bytes()): + return coreGenBlk, nil + default: + return nil, errUnknownBlock + } + } + + parentBlk, err := proVM.BuildBlock(ctx) + require.NoError(err) + require.NoError(parentBlk.Verify(ctx)) + require.NoError(parentBlk.Accept(ctx)) + + // Make sure preference is duly set + require.NoError(proVM.SetPreference(ctx, parentBlk.ID())) + require.Equal(proVM.preferred, parentBlk.ID()) + _, err = proVM.getPostForkBlock(ctx, parentBlk.ID()) + require.NoError(err) + + // Mark node as non validator + valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + aValidator = ids.GenerateTestNodeID() + + // a validator with a weight large enough to fully fill the proposers list + weight = uint64(proposer.MaxBuildWindows * 2) + ) + return map[ids.NodeID]*validators.GetValidatorOutput{ + aValidator: { + NodeID: aValidator, + Weight: weight, + }, + }, nil + } + + coreChildBlk := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + BytesV: []byte{2}, + ParentV: coreParentBlk.ID(), + HeightV: coreParentBlk.Height() + 1, + } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreChildBlk, nil + } + + { + // Set local clock before MaxVerifyDelay from parent timestamp. + // Check that child block is not built. + localTime := parentBlk.Timestamp().Add(proposer.MaxVerifyDelay - time.Second) + proVM.Set(localTime) + + _, err := proVM.BuildBlock(ctx) + require.ErrorIs(err, errProposerWindowNotStarted) + } + + { + // Set local clock exactly MaxVerifyDelay from parent timestamp. + // Check that child block is not built. + localTime := parentBlk.Timestamp().Add(proposer.MaxVerifyDelay) + proVM.Set(localTime) + + _, err := proVM.BuildBlock(ctx) + require.ErrorIs(err, errProposerWindowNotStarted) + } + + { + // Set local clock among MaxVerifyDelay and MaxBuildDelay from parent timestamp + // Check that child block is not built. + localTime := parentBlk.Timestamp().Add((proposer.MaxVerifyDelay + proposer.MaxBuildDelay) / 2) + proVM.Set(localTime) + + _, err := proVM.BuildBlock(ctx) + require.ErrorIs(err, errProposerWindowNotStarted) + } + + { + // Set local clock after MaxBuildDelay from parent timestamp + // Check that child block is built and it is unsigned + localTime := parentBlk.Timestamp().Add(proposer.MaxBuildDelay) + proVM.Set(localTime) + + childBlk, err := proVM.BuildBlock(ctx) + require.NoError(err) + require.IsType(&postForkBlock{}, childBlk) + require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // unsigned so no proposer + } +} + +// We consider cases where this node is not current proposer (may be scheduled in the next future or not). +// We check that scheduler is called nonetheless, to be able to process innerVM block requests +func TestPostDurangoBuildChildResetScheduler(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + var ( + thisNodeID = ids.GenerateTestNodeID() + selectedProposer = ids.GenerateTestNodeID() + pChainHeight uint64 = 1337 + parentID = ids.GenerateTestID() + parentTimestamp = time.Now().Truncate(time.Second) + now = parentTimestamp.Add(12 * time.Second) + parentHeight uint64 = 1234 + ) + + innerBlk := snowman.NewMockBlock(ctrl) + innerBlk.EXPECT().Height().Return(parentHeight + 1).AnyTimes() + + vdrState := validators.NewMockState(ctrl) + vdrState.EXPECT().GetMinimumHeight(context.Background()).Return(pChainHeight, nil).AnyTimes() + + windower := proposer.NewMockWindower(ctrl) + windower.EXPECT().ExpectedProposer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(selectedProposer, nil).AnyTimes() // return a proposer different from thisNode, to check whether scheduler is reset + + scheduler := scheduler.NewMockScheduler(ctrl) + + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(err) + vm := &VM{ + Config: Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + StakingCertLeaf: &staking.Certificate{}, + StakingLeafSigner: pk, + }, + ChainVM: block.NewMockChainVM(ctrl), + ctx: &snow.Context{ + NodeID: thisNodeID, + ValidatorState: vdrState, + Log: logging.NoLog{}, + }, + Windower: windower, + Scheduler: scheduler, + } + vm.Clock.Set(now) + + blk := &postForkCommonComponents{ + innerBlk: innerBlk, + vm: vm, + } + + delays := []time.Duration{ + proposer.MaxLookAheadWindow - time.Minute, + proposer.MaxLookAheadWindow, + proposer.MaxLookAheadWindow + time.Minute, + } + + for _, delay := range delays { + windower.EXPECT().MinDelayForProposer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(delay, nil).Times(1) + + // we mock the scheduler setting the exact time we expect it to be reset + // to + expectedSchedulerTime := parentTimestamp.Add(delay) + scheduler.EXPECT().SetBuildBlockTime(expectedSchedulerTime).Times(1) + + _, err = blk.buildChild( + context.Background(), + parentID, + parentTimestamp, + pChainHeight-1, + ) + require.ErrorIs(err, errUnexpectedProposer) + } +} diff --git a/vms/proposervm/config.go b/vms/proposervm/config.go new file mode 100644 index 000000000000..a7eb4ff0db9b --- /dev/null +++ b/vms/proposervm/config.go @@ -0,0 +1,39 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package proposervm + +import ( + "crypto" + "time" + + "github.com/ava-labs/avalanchego/staking" +) + +type Config struct { + // Time at which proposerVM activates its congestion control mechanism + ActivationTime time.Time + + // Durango fork activation time + DurangoTime time.Time + + // Minimal P-chain height referenced upon block building + MinimumPChainHeight uint64 + + // Configurable minimal delay among blocks issued consecutively + MinBlkDelay time.Duration + + // Maximal number of block indexed. + // Zero signals all blocks are indexed. + NumHistoricalBlocks uint64 + + // Block signer + StakingLeafSigner crypto.Signer + + // Block certificate + StakingCertLeaf *staking.Certificate +} + +func (c *Config) IsDurangoActivated(timestamp time.Time) bool { + return !timestamp.Before(c.DurangoTime) +} diff --git a/vms/proposervm/height_indexed_vm.go b/vms/proposervm/height_indexed_vm.go index 99b911c5be64..a29334f6d8dd 100644 --- a/vms/proposervm/height_indexed_vm.go +++ b/vms/proposervm/height_indexed_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -136,7 +136,7 @@ func (vm *VM) storeHeightEntry(height uint64, blkID ids.ID) error { zap.Uint64("height", height), ) - if vm.numHistoricalBlocks == 0 { + if vm.NumHistoricalBlocks == 0 { return nil } @@ -145,13 +145,13 @@ func (vm *VM) storeHeightEntry(height uint64, blkID ids.ID) error { // is why <= is used rather than <. This prevents the user from only storing // the last accepted block, which can never be safe due to the non-atomic // commits between the proposervm database and the innerVM's database. - if blocksSinceFork <= vm.numHistoricalBlocks { + if blocksSinceFork <= vm.NumHistoricalBlocks { return nil } // Note: heightToDelete is >= forkHeight, so it is guaranteed not to // underflow. - heightToDelete := height - vm.numHistoricalBlocks - 1 + heightToDelete := height - vm.NumHistoricalBlocks - 1 blockToDelete, err := vm.State.GetBlockIDAtHeight(heightToDelete) if err == database.ErrNotFound { // Block may have already been deleted. This can happen due to a @@ -180,7 +180,7 @@ func (vm *VM) storeHeightEntry(height uint64, blkID ids.ID) error { // TODO: Support async deletion of old blocks. func (vm *VM) pruneOldBlocks() error { - if vm.numHistoricalBlocks == 0 { + if vm.NumHistoricalBlocks == 0 { return nil } @@ -194,7 +194,7 @@ func (vm *VM) pruneOldBlocks() error { // // Note: vm.lastAcceptedHeight is guaranteed to be >= height, so the // subtraction can never underflow. - for vm.lastAcceptedHeight-height > vm.numHistoricalBlocks { + for vm.lastAcceptedHeight-height > vm.NumHistoricalBlocks { blockToDelete, err := vm.State.GetBlockIDAtHeight(height) if err != nil { return err diff --git a/vms/proposervm/indexer/block_server.go b/vms/proposervm/indexer/block_server.go index e817b9bad830..fcecaf9e9fcf 100644 --- a/vms/proposervm/indexer/block_server.go +++ b/vms/proposervm/indexer/block_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer diff --git a/vms/proposervm/indexer/block_server_test.go b/vms/proposervm/indexer/block_server_test.go index e132926c811c..a973d66a05a9 100644 --- a/vms/proposervm/indexer/block_server_test.go +++ b/vms/proposervm/indexer/block_server_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer diff --git a/vms/proposervm/indexer/height_indexer.go b/vms/proposervm/indexer/height_indexer.go index 697570306f6b..c0a1e4155b3b 100644 --- a/vms/proposervm/indexer/height_indexer.go +++ b/vms/proposervm/indexer/height_indexer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer diff --git a/vms/proposervm/indexer/height_indexer_test.go b/vms/proposervm/indexer/height_indexer_test.go index 0ff9ebcdbb59..2a093530048a 100644 --- a/vms/proposervm/indexer/height_indexer_test.go +++ b/vms/proposervm/indexer/height_indexer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer diff --git a/vms/proposervm/main_test.go b/vms/proposervm/main_test.go index 913e29613f1c..72165ddb6e78 100644 --- a/vms/proposervm/main_test.go +++ b/vms/proposervm/main_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm diff --git a/vms/proposervm/mock_post_fork_block.go b/vms/proposervm/mock_post_fork_block.go index 4f0847424253..ab449b6363bf 100644 --- a/vms/proposervm/mock_post_fork_block.go +++ b/vms/proposervm/mock_post_fork_block.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/proposervm (interfaces: PostForkBlock) +// +// Generated by this command: +// +// mockgen -package=proposervm -destination=vms/proposervm/mock_post_fork_block.go github.com/ava-labs/avalanchego/vms/proposervm PostForkBlock +// // Package proposervm is a generated GoMock package. package proposervm @@ -51,7 +53,7 @@ func (m *MockPostForkBlock) Accept(arg0 context.Context) error { } // Accept indicates an expected call of Accept. -func (mr *MockPostForkBlockMockRecorder) Accept(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) Accept(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockPostForkBlock)(nil).Accept), arg0) } @@ -121,7 +123,7 @@ func (m *MockPostForkBlock) Reject(arg0 context.Context) error { } // Reject indicates an expected call of Reject. -func (mr *MockPostForkBlockMockRecorder) Reject(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) Reject(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reject", reflect.TypeOf((*MockPostForkBlock)(nil).Reject), arg0) } @@ -163,7 +165,7 @@ func (m *MockPostForkBlock) Verify(arg0 context.Context) error { } // Verify indicates an expected call of Verify. -func (mr *MockPostForkBlockMockRecorder) Verify(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) Verify(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockPostForkBlock)(nil).Verify), arg0) } @@ -177,7 +179,7 @@ func (m *MockPostForkBlock) acceptInnerBlk(arg0 context.Context) error { } // acceptInnerBlk indicates an expected call of acceptInnerBlk. -func (mr *MockPostForkBlockMockRecorder) acceptInnerBlk(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) acceptInnerBlk(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "acceptInnerBlk", reflect.TypeOf((*MockPostForkBlock)(nil).acceptInnerBlk), arg0) } @@ -206,7 +208,7 @@ func (m *MockPostForkBlock) buildChild(arg0 context.Context) (Block, error) { } // buildChild indicates an expected call of buildChild. -func (mr *MockPostForkBlockMockRecorder) buildChild(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) buildChild(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "buildChild", reflect.TypeOf((*MockPostForkBlock)(nil).buildChild), arg0) } @@ -249,7 +251,7 @@ func (m *MockPostForkBlock) pChainHeight(arg0 context.Context) (uint64, error) { } // pChainHeight indicates an expected call of pChainHeight. -func (mr *MockPostForkBlockMockRecorder) pChainHeight(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) pChainHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "pChainHeight", reflect.TypeOf((*MockPostForkBlock)(nil).pChainHeight), arg0) } @@ -261,7 +263,7 @@ func (m *MockPostForkBlock) setInnerBlk(arg0 snowman.Block) { } // setInnerBlk indicates an expected call of setInnerBlk. -func (mr *MockPostForkBlockMockRecorder) setInnerBlk(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) setInnerBlk(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "setInnerBlk", reflect.TypeOf((*MockPostForkBlock)(nil).setInnerBlk), arg0) } @@ -273,7 +275,7 @@ func (m *MockPostForkBlock) setStatus(arg0 choices.Status) { } // setStatus indicates an expected call of setStatus. -func (mr *MockPostForkBlockMockRecorder) setStatus(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) setStatus(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "setStatus", reflect.TypeOf((*MockPostForkBlock)(nil).setStatus), arg0) } @@ -287,7 +289,7 @@ func (m *MockPostForkBlock) verifyPostForkChild(arg0 context.Context, arg1 *post } // verifyPostForkChild indicates an expected call of verifyPostForkChild. -func (mr *MockPostForkBlockMockRecorder) verifyPostForkChild(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) verifyPostForkChild(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "verifyPostForkChild", reflect.TypeOf((*MockPostForkBlock)(nil).verifyPostForkChild), arg0, arg1) } @@ -301,7 +303,7 @@ func (m *MockPostForkBlock) verifyPostForkOption(arg0 context.Context, arg1 *pos } // verifyPostForkOption indicates an expected call of verifyPostForkOption. -func (mr *MockPostForkBlockMockRecorder) verifyPostForkOption(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) verifyPostForkOption(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "verifyPostForkOption", reflect.TypeOf((*MockPostForkBlock)(nil).verifyPostForkOption), arg0, arg1) } @@ -315,7 +317,7 @@ func (m *MockPostForkBlock) verifyPreForkChild(arg0 context.Context, arg1 *preFo } // verifyPreForkChild indicates an expected call of verifyPreForkChild. -func (mr *MockPostForkBlockMockRecorder) verifyPreForkChild(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) verifyPreForkChild(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "verifyPreForkChild", reflect.TypeOf((*MockPostForkBlock)(nil).verifyPreForkChild), arg0, arg1) } diff --git a/vms/proposervm/post_fork_block.go b/vms/proposervm/post_fork_block.go index 28d127d33f70..707b6dc327c7 100644 --- a/vms/proposervm/post_fork_block.go +++ b/vms/proposervm/post_fork_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm diff --git a/vms/proposervm/post_fork_block_test.go b/vms/proposervm/post_fork_block_test.go index 7f99a3b11e8e..efe03d5a2c32 100644 --- a/vms/proposervm/post_fork_block_test.go +++ b/vms/proposervm/post_fork_block_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -26,6 +26,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/proposervm/block" "github.com/ava-labs/avalanchego/vms/proposervm/proposer" ) @@ -48,7 +50,11 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { require.Equal(snowman.ErrNotOracle, err) // setup - _, _, proVM, _, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + _, _, proVM, _, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -80,11 +86,10 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { ids.Empty, // refer unknown parent time.Time{}, 0, // pChainHeight, - proVM.ctx.NodeID, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, innerOracleBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) proBlk = postForkBlock{ @@ -102,10 +107,14 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { } // ProposerBlock.Verify tests section -func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { +func TestBlockVerify_PostForkBlock_PreDurango_ParentChecks(t *testing.T) { require := require.New(t) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime // pre Durango + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -116,24 +125,24 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { } // create parent block ... - prntCoreBlk := &snowman.TestBlock{ + parentCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { - return prntCoreBlk, nil + return parentCoreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil - case prntCoreBlk.ID(): - return prntCoreBlk, nil + case parentCoreBlk.ID(): + return parentCoreBlk, nil default: return nil, database.ErrNotFound } @@ -142,39 +151,26 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil - case bytes.Equal(b, prntCoreBlk.Bytes()): - return prntCoreBlk, nil + case bytes.Equal(b, parentCoreBlk.Bytes()): + return parentCoreBlk, nil default: return nil, errUnknownBlock } } - proVM.Set(proVM.Time().Add(proposer.MaxDelay)) - prntProBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(context.Background()) require.NoError(err) - require.NoError(prntProBlk.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), prntProBlk.ID())) + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) // .. create child block ... childCoreBlk := &snowman.TestBlock{ - ParentV: prntCoreBlk.ID(), - BytesV: []byte{2}, - TimestampV: prntCoreBlk.Timestamp(), + ParentV: parentCoreBlk.ID(), + BytesV: []byte{2}, + HeightV: parentCoreBlk.Height() + 1, } - childSlb, err := block.Build( - ids.Empty, // refer unknown parent - childCoreBlk.Timestamp(), - pChainHeight, - proVM.ctx.NodeID, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, - ) - require.NoError(err) - childProBlk := postForkBlock{ - SignedBlock: childSlb, + childBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ vm: proVM, innerBlk: childCoreBlk, @@ -182,57 +178,203 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { }, } - // child block referring unknown parent does not verify - err = childProBlk.Verify(context.Background()) - require.ErrorIs(err, database.ErrNotFound) + // set proVM to be able to build unsigned blocks + proVM.Set(proVM.Time().Add(proposer.MaxVerifyDelay)) + + { + // child block referring unknown parent does not verify + childSlb, err := block.BuildUnsigned( + ids.Empty, // refer unknown parent + proVM.Time(), + pChainHeight, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, database.ErrNotFound) + } - // child block referring known parent does verify - childSlb, err = block.BuildUnsigned( - prntProBlk.ID(), // refer known parent - prntProBlk.Timestamp().Add(proposer.MaxDelay), - pChainHeight, - childCoreBlk.Bytes(), + { + // child block referring known parent does verify + childSlb, err := block.BuildUnsigned( + parentBlk.ID(), // refer known parent + proVM.Time(), + pChainHeight, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } +} + +func TestBlockVerify_PostForkBlock_PostDurango_ParentChecks(t *testing.T) { + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime // post Durango ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + + pChainHeight := uint64(100) + valState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return pChainHeight, nil + } + + parentCoreBlk := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.Empty.Prefix(1111), + StatusV: choices.Processing, + }, + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, + } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return parentCoreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + switch blkID { + case coreGenBlk.ID(): + return coreGenBlk, nil + case parentCoreBlk.ID(): + return parentCoreBlk, nil + default: + return nil, database.ErrNotFound + } + } + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { + switch { + case bytes.Equal(b, coreGenBlk.Bytes()): + return coreGenBlk, nil + case bytes.Equal(b, parentCoreBlk.Bytes()): + return parentCoreBlk, nil + default: + return nil, errUnknownBlock + } + } + + parentBlk, err := proVM.BuildBlock(context.Background()) require.NoError(err) - childProBlk.SignedBlock = childSlb - require.NoError(err) - proVM.Set(proVM.Time().Add(proposer.MaxDelay)) - require.NoError(childProBlk.Verify(context.Background())) + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) + + childCoreBlk := &snowman.TestBlock{ + ParentV: parentCoreBlk.ID(), + BytesV: []byte{2}, + HeightV: parentCoreBlk.Height() + 1, + } + childBlk := postForkBlock{ + postForkCommonComponents: postForkCommonComponents{ + vm: proVM, + innerBlk: childCoreBlk, + status: choices.Processing, + }, + } + + require.NoError(waitForProposerWindow(proVM, parentBlk, parentBlk.(*postForkBlock).PChainHeight())) + + { + // child block referring unknown parent does not verify + childSlb, err := block.Build( + ids.Empty, // refer unknown parent + proVM.Time(), + pChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, database.ErrNotFound) + } + + { + // child block referring known parent does verify + childSlb, err := block.Build( + parentBlk.ID(), + proVM.Time(), + pChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + + require.NoError(err) + childBlk.SignedBlock = childSlb + + proVM.Set(childSlb.Timestamp()) + require.NoError(childBlk.Verify(context.Background())) + } } func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { require := require.New(t) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() + // reduce validator state to allow proVM.ctx.NodeID to be easily selected as proposer + valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + ) + return map[ids.NodeID]*validators.GetValidatorOutput{ + thisNode: { + NodeID: thisNode, + Weight: 5, + }, + nodeID1: { + NodeID: nodeID1, + Weight: 100, + }, + }, nil + } + proVM.ctx.ValidatorState = valState + pChainHeight := uint64(100) valState.GetCurrentHeightF = func(context.Context) (uint64, error) { return pChainHeight, nil } // create parent block ... - prntCoreBlk := &snowman.TestBlock{ + parentCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { - return prntCoreBlk, nil + return parentCoreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil - case prntCoreBlk.ID(): - return prntCoreBlk, nil + case parentCoreBlk.ID(): + return parentCoreBlk, nil default: return nil, database.ErrNotFound } @@ -241,46 +383,34 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil - case bytes.Equal(b, prntCoreBlk.Bytes()): - return prntCoreBlk, nil + case bytes.Equal(b, parentCoreBlk.Bytes()): + return parentCoreBlk, nil default: return nil, errUnknownBlock } } - prntProBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(context.Background()) require.NoError(err) - require.NoError(prntProBlk.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), prntProBlk.ID())) + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) - prntTimestamp := prntProBlk.Timestamp() + var ( + parentTimestamp = parentBlk.Timestamp() + parentPChainHeight = parentBlk.(*postForkBlock).PChainHeight() + ) childCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - ParentV: prntCoreBlk.ID(), + ParentV: parentCoreBlk.ID(), + HeightV: parentCoreBlk.Height() + 1, BytesV: []byte{2}, } - - // child block timestamp cannot be lower than parent timestamp - childCoreBlk.TimestampV = prntTimestamp.Add(-1 * time.Second) - proVM.Clock.Set(childCoreBlk.TimestampV) - childSlb, err := block.Build( - prntProBlk.ID(), - childCoreBlk.Timestamp(), - pChainHeight, - proVM.ctx.NodeID, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, - ) - require.NoError(err) - childProBlk := postForkBlock{ - SignedBlock: childSlb, + childBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ vm: proVM, innerBlk: childCoreBlk, @@ -288,100 +418,137 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { }, } - err = childProBlk.Verify(context.Background()) - require.ErrorIs(err, errTimeNotMonotonic) + { + // child block timestamp cannot be lower than parent timestamp + newTime := parentTimestamp.Add(-1 * time.Second) + proVM.Clock.Set(newTime) + + childSlb, err := block.Build( + parentBlk.ID(), + newTime, + pChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, errTimeNotMonotonic) + } - // block cannot arrive before its creator window starts - blkWinDelay, err := proVM.Delay(context.Background(), childCoreBlk.Height(), pChainHeight, proVM.ctx.NodeID) + blkWinDelay, err := proVM.Delay(context.Background(), childCoreBlk.Height(), parentPChainHeight, proVM.ctx.NodeID, proposer.MaxVerifyWindows) require.NoError(err) - beforeWinStart := prntTimestamp.Add(blkWinDelay).Add(-1 * time.Second) - proVM.Clock.Set(beforeWinStart) - childSlb, err = block.Build( - prntProBlk.ID(), - beforeWinStart, - pChainHeight, - proVM.ctx.NodeID, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, - ) - require.NoError(err) - childProBlk.SignedBlock = childSlb - - err = childProBlk.Verify(context.Background()) - require.ErrorIs(err, errProposerWindowNotStarted) - - // block can arrive at its creator window starts - atWindowStart := prntTimestamp.Add(blkWinDelay) - proVM.Clock.Set(atWindowStart) - childSlb, err = block.Build( - prntProBlk.ID(), - atWindowStart, - pChainHeight, - proVM.ctx.NodeID, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, - ) - require.NoError(err) - childProBlk.SignedBlock = childSlb - - require.NoError(childProBlk.Verify(context.Background())) - - // block can arrive after its creator window starts - afterWindowStart := prntTimestamp.Add(blkWinDelay).Add(5 * time.Second) - proVM.Clock.Set(afterWindowStart) - childSlb, err = block.Build( - prntProBlk.ID(), - afterWindowStart, - pChainHeight, - proVM.ctx.NodeID, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, - ) - require.NoError(err) - childProBlk.SignedBlock = childSlb - require.NoError(childProBlk.Verify(context.Background())) - - // block can arrive within submission window - atSubWindowEnd := proVM.Time().Add(proposer.MaxDelay) - proVM.Clock.Set(atSubWindowEnd) - childSlb, err = block.BuildUnsigned( - prntProBlk.ID(), - atSubWindowEnd, - pChainHeight, - childCoreBlk.Bytes(), - ) - require.NoError(err) - childProBlk.SignedBlock = childSlb - require.NoError(childProBlk.Verify(context.Background())) - - // block timestamp cannot be too much in the future - afterSubWinEnd := proVM.Time().Add(maxSkew).Add(time.Second) - childSlb, err = block.Build( - prntProBlk.ID(), - afterSubWinEnd, - pChainHeight, - proVM.ctx.NodeID, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, - ) - require.NoError(err) - childProBlk.SignedBlock = childSlb - err = childProBlk.Verify(context.Background()) - require.ErrorIs(err, errTimeTooAdvanced) + + { + // block cannot arrive before its creator window starts + beforeWinStart := parentTimestamp.Add(blkWinDelay).Add(-1 * time.Second) + proVM.Clock.Set(beforeWinStart) + + childSlb, err := block.Build( + parentBlk.ID(), + beforeWinStart, + pChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, errProposerWindowNotStarted) + } + + { + // block can arrive at its creator window starts + atWindowStart := parentTimestamp.Add(blkWinDelay) + proVM.Clock.Set(atWindowStart) + + childSlb, err := block.Build( + parentBlk.ID(), + atWindowStart, + pChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } + + { + // block can arrive after its creator window starts + afterWindowStart := parentTimestamp.Add(blkWinDelay).Add(5 * time.Second) + proVM.Clock.Set(afterWindowStart) + + childSlb, err := block.Build( + parentBlk.ID(), + afterWindowStart, + pChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } + + { + // block can arrive within submission window + atSubWindowEnd := proVM.Time().Add(proposer.MaxVerifyDelay) + proVM.Clock.Set(atSubWindowEnd) + + childSlb, err := block.BuildUnsigned( + parentBlk.ID(), + atSubWindowEnd, + pChainHeight, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } + + { + // block timestamp cannot be too much in the future + afterSubWinEnd := proVM.Time().Add(maxSkew).Add(time.Second) + + childSlb, err := block.Build( + parentBlk.ID(), + afterSubWinEnd, + pChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, errTimeTooAdvanced) + } } func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { require := require.New(t) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -390,26 +557,29 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { valState.GetCurrentHeightF = func(context.Context) (uint64, error) { return pChainHeight, nil } + valState.GetMinimumHeightF = func(context.Context) (uint64, error) { + return pChainHeight / 50, nil + } // create parent block ... - prntCoreBlk := &snowman.TestBlock{ + parentCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { - return prntCoreBlk, nil + return parentCoreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil - case prntCoreBlk.ID(): - return prntCoreBlk, nil + case parentCoreBlk.ID(): + return parentCoreBlk, nil default: return nil, database.ErrNotFound } @@ -418,45 +588,34 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil - case bytes.Equal(b, prntCoreBlk.Bytes()): - return prntCoreBlk, nil + case bytes.Equal(b, parentCoreBlk.Bytes()): + return parentCoreBlk, nil default: return nil, errUnknownBlock } } - prntProBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(context.Background()) require.NoError(err) - require.NoError(prntProBlk.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), prntProBlk.ID())) + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) - prntBlkPChainHeight := pChainHeight + // set VM to be ready to build next block. We set it to generate unsigned blocks + // for simplicity. + parentBlkPChainHeight := parentBlk.(*postForkBlock).PChainHeight() + require.NoError(waitForProposerWindow(proVM, parentBlk, parentBlkPChainHeight)) childCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - ParentV: prntCoreBlk.ID(), - BytesV: []byte{2}, - TimestampV: prntProBlk.Timestamp().Add(proposer.MaxDelay), - } - - // child P-Chain height must not precede parent P-Chain height - childSlb, err := block.Build( - prntProBlk.ID(), - childCoreBlk.Timestamp(), - prntBlkPChainHeight-1, - proVM.ctx.NodeID, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, - ) - require.NoError(err) - childProBlk := postForkBlock{ - SignedBlock: childSlb, + ParentV: parentCoreBlk.ID(), + HeightV: parentBlk.Height() + 1, + BytesV: []byte{2}, + } + childBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ vm: proVM, innerBlk: childCoreBlk, @@ -464,63 +623,103 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { }, } - err = childProBlk.Verify(context.Background()) - require.ErrorIs(err, errTimeTooAdvanced) + { + // child P-Chain height must not precede parent P-Chain height + childSlb, err := block.Build( + parentBlk.ID(), + proVM.Time(), + parentBlkPChainHeight-1, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, errPChainHeightNotMonotonic) + } - // child P-Chain height can be equal to parent P-Chain height - childSlb, err = block.BuildUnsigned( - prntProBlk.ID(), - childCoreBlk.Timestamp(), - prntBlkPChainHeight, - childCoreBlk.Bytes(), - ) - require.NoError(err) - childProBlk.SignedBlock = childSlb - - proVM.Set(childCoreBlk.Timestamp()) - require.NoError(childProBlk.Verify(context.Background())) - - // child P-Chain height may follow parent P-Chain height - pChainHeight = prntBlkPChainHeight * 2 // move ahead pChainHeight - childSlb, err = block.BuildUnsigned( - prntProBlk.ID(), - childCoreBlk.Timestamp(), - prntBlkPChainHeight+1, - childCoreBlk.Bytes(), - ) - require.NoError(err) - childProBlk.SignedBlock = childSlb - require.NoError(childProBlk.Verify(context.Background())) + { + // child P-Chain height can be equal to parent P-Chain height + childSlb, err := block.Build( + parentBlk.ID(), + proVM.Time(), + parentBlkPChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } + + { + // child P-Chain height may follow parent P-Chain height + childSlb, err := block.Build( + parentBlk.ID(), + proVM.Time(), + parentBlkPChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } - // block P-Chain height can be equal to current P-Chain height currPChainHeight, _ := proVM.ctx.ValidatorState.GetCurrentHeight(context.Background()) - childSlb, err = block.BuildUnsigned( - prntProBlk.ID(), - childCoreBlk.Timestamp(), - currPChainHeight, - childCoreBlk.Bytes(), - ) - require.NoError(err) - childProBlk.SignedBlock = childSlb - require.NoError(childProBlk.Verify(context.Background())) - - // block P-Chain height cannot be at higher than current P-Chain height - childSlb, err = block.BuildUnsigned( - prntProBlk.ID(), - childCoreBlk.Timestamp(), - currPChainHeight*2, - childCoreBlk.Bytes(), - ) - require.NoError(err) - childProBlk.SignedBlock = childSlb - err = childProBlk.Verify(context.Background()) - require.ErrorIs(err, errPChainHeightNotReached) + { + // block P-Chain height can be equal to current P-Chain height + childSlb, err := block.Build( + parentBlk.ID(), + proVM.Time(), + currPChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } + + { + // block P-Chain height cannot be at higher than current P-Chain height + childSlb, err := block.Build( + parentBlk.ID(), + proVM.Time(), + currPChainHeight*2, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, errPChainHeightNotReached) + } } func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) { require := require.New(t) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -529,7 +728,9 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) valState.GetCurrentHeightF = func(context.Context) (uint64, error) { return pChainHeight, nil } - // proVM.SetStartTime(timer.MaxTime) // switch off scheduler for current test + valState.GetMinimumHeightF = func(context.Context) (uint64, error) { + return pChainHeight / 50, nil + } // create post fork oracle block ... oracleCoreBlk := &TestOptionsBlock{ @@ -538,9 +739,9 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, } oracleCoreBlk.opts = [2]snowman.Block{ @@ -549,18 +750,18 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(3333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, } @@ -612,32 +813,23 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) require.NoError(parentBlk.Verify(context.Background())) require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) - prntBlkPChainHeight := pChainHeight + // set VM to be ready to build next block. We set it to generate unsigned blocks + // for simplicity. + nextTime := parentBlk.Timestamp().Add(proposer.MaxVerifyDelay) + proVM.Set(nextTime) + + parentBlkPChainHeight := postForkOracleBlk.PChainHeight() // option takes proposal blocks' Pchain height childCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - ParentV: oracleCoreBlk.opts[0].ID(), - BytesV: []byte{2}, - TimestampV: parentBlk.Timestamp().Add(proposer.MaxDelay), - } - - // child P-Chain height must not precede parent P-Chain height - childSlb, err := block.Build( - parentBlk.ID(), - childCoreBlk.Timestamp(), - prntBlkPChainHeight-1, - proVM.ctx.NodeID, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, - ) - require.NoError(err) - childProBlk := postForkBlock{ - SignedBlock: childSlb, + ParentV: oracleCoreBlk.opts[0].ID(), + BytesV: []byte{2}, + HeightV: oracleCoreBlk.opts[0].Height() + 1, + } + childBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ vm: proVM, innerBlk: childCoreBlk, @@ -645,57 +837,77 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) }, } - err = childProBlk.Verify(context.Background()) - require.ErrorIs(err, errTimeTooAdvanced) + { + // child P-Chain height must not precede parent P-Chain height + childSlb, err := block.BuildUnsigned( + parentBlk.ID(), + nextTime, + parentBlkPChainHeight-1, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, errPChainHeightNotMonotonic) + } - // child P-Chain height can be equal to parent P-Chain height - childSlb, err = block.BuildUnsigned( - parentBlk.ID(), - childCoreBlk.Timestamp(), - prntBlkPChainHeight, - childCoreBlk.Bytes(), - ) - require.NoError(err) - childProBlk.SignedBlock = childSlb - - proVM.Set(childCoreBlk.Timestamp()) - require.NoError(childProBlk.Verify(context.Background())) - - // child P-Chain height may follow parent P-Chain height - pChainHeight = prntBlkPChainHeight * 2 // move ahead pChainHeight - childSlb, err = block.BuildUnsigned( - parentBlk.ID(), - childCoreBlk.Timestamp(), - prntBlkPChainHeight+1, - childCoreBlk.Bytes(), - ) - require.NoError(err) - childProBlk.SignedBlock = childSlb - require.NoError(childProBlk.Verify(context.Background())) + { + // child P-Chain height can be equal to parent P-Chain height + childSlb, err := block.BuildUnsigned( + parentBlk.ID(), + nextTime, + parentBlkPChainHeight, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } + + { + // child P-Chain height may follow parent P-Chain height + childSlb, err := block.BuildUnsigned( + parentBlk.ID(), + nextTime, + parentBlkPChainHeight+1, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } - // block P-Chain height can be equal to current P-Chain height currPChainHeight, _ := proVM.ctx.ValidatorState.GetCurrentHeight(context.Background()) - childSlb, err = block.BuildUnsigned( - parentBlk.ID(), - childCoreBlk.Timestamp(), - currPChainHeight, - childCoreBlk.Bytes(), - ) - require.NoError(err) - childProBlk.SignedBlock = childSlb - require.NoError(childProBlk.Verify(context.Background())) - - // block P-Chain height cannot be at higher than current P-Chain height - childSlb, err = block.BuildUnsigned( - parentBlk.ID(), - childCoreBlk.Timestamp(), - currPChainHeight*2, - childCoreBlk.Bytes(), - ) - require.NoError(err) - childProBlk.SignedBlock = childSlb - err = childProBlk.Verify(context.Background()) - require.ErrorIs(err, errPChainHeightNotReached) + { + // block P-Chain height can be equal to current P-Chain height + childSlb, err := block.BuildUnsigned( + parentBlk.ID(), + nextTime, + currPChainHeight, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } + + { + // block P-Chain height cannot be at higher than current P-Chain height + childSlb, err := block.BuildUnsigned( + parentBlk.ID(), + nextTime, + currPChainHeight*2, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, errPChainHeightNotReached) + } } func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { @@ -703,7 +915,11 @@ func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { // Verify a block once (in this test by building it). // Show that other verify call would not call coreBlk.Verify() - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -718,9 +934,9 @@ func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil @@ -766,7 +982,11 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { require := require.New(t) // setup - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -781,9 +1001,9 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil @@ -829,7 +1049,11 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { func TestBlockAccept_PostForkBlock_TwoProBlocksWithSameCoreBlock_OneIsAccepted(t *testing.T) { require := require.New(t) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -845,10 +1069,9 @@ func TestBlockAccept_PostForkBlock_TwoProBlocksWithSameCoreBlock_OneIsAccepted(t IDV: ids.Empty.Prefix(111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil @@ -877,7 +1100,11 @@ func TestBlockAccept_PostForkBlock_TwoProBlocksWithSameCoreBlock_OneIsAccepted(t func TestBlockReject_PostForkBlock_InnerBlockIsNotRejected(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -887,10 +1114,9 @@ func TestBlockReject_PostForkBlock_InnerBlockIsNotRejected(t *testing.T) { IDV: ids.Empty.Prefix(111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil @@ -910,8 +1136,11 @@ func TestBlockReject_PostForkBlock_InnerBlockIsNotRejected(t *testing.T) { func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -923,9 +1152,9 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, } coreOpt0 := &snowman.TestBlock{ @@ -933,18 +1162,18 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, } coreOpt1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(3333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, } oracleCoreBlk.opts = [2]snowman.Block{ coreOpt0, @@ -1005,11 +1234,10 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { postForkOracleBlk.ID(), postForkOracleBlk.Timestamp().Add(proposer.WindowDuration), postForkOracleBlk.PChainHeight(), - proVM.ctx.NodeID, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, oracleCoreBlk.opts[0].Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) @@ -1026,8 +1254,11 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 5) - proVM.Set(coreGenBlk.Timestamp()) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 5) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -1037,9 +1268,9 @@ func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { diff --git a/vms/proposervm/post_fork_option.go b/vms/proposervm/post_fork_option.go index 047a01c477fd..93cfd2550ca6 100644 --- a/vms/proposervm/post_fork_option.go +++ b/vms/proposervm/post_fork_option.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm diff --git a/vms/proposervm/post_fork_option_test.go b/vms/proposervm/post_fork_option_test.go index 09fe29730b6f..dd16f8cdb518 100644 --- a/vms/proposervm/post_fork_option_test.go +++ b/vms/proposervm/post_fork_option_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -18,7 +18,6 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/vms/proposervm/block" - "github.com/ava-labs/avalanchego/vms/proposervm/proposer" ) var _ snowman.OracleBlock = (*TestOptionsBlock)(nil) @@ -37,8 +36,11 @@ func (tob TestOptionsBlock) Options(context.Context) ([2]snowman.Block, error) { func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -50,9 +52,9 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, } oracleCoreBlk.opts = [2]snowman.Block{ @@ -61,18 +63,18 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(3333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, } @@ -133,14 +135,14 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { IDV: ids.Empty.Prefix(4444), StatusV: choices.Processing, }, - ParentV: oracleCoreBlk.opts[0].ID(), - BytesV: []byte{4}, - TimestampV: oracleCoreBlk.opts[0].Timestamp().Add(proposer.MaxDelay), + ParentV: oracleCoreBlk.opts[0].ID(), + BytesV: []byte{4}, + HeightV: oracleCoreBlk.opts[0].Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return childCoreBlk, nil } - proVM.Set(childCoreBlk.Timestamp()) + require.NoError(waitForProposerWindow(proVM, opts[0], postForkOracleBlk.PChainHeight())) proChild, err := proVM.BuildBlock(context.Background()) require.NoError(err) @@ -153,8 +155,11 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { require := require.New(t) // Verify an option once; then show that another verify call would not call coreBlk.Verify() - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -166,9 +171,9 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, } coreOpt0 := &snowman.TestBlock{ @@ -176,18 +181,18 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, } coreOpt1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(3333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, } oracleCoreBlk.opts = [2]snowman.Block{ coreOpt0, @@ -255,8 +260,11 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -268,9 +276,9 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, } oracleCoreBlk.opts = [2]snowman.Block{ @@ -279,18 +287,18 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(3333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, } @@ -365,8 +373,11 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -378,9 +389,9 @@ func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, } oracleCoreBlk.opts = [2]snowman.Block{ @@ -389,18 +400,18 @@ func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(3333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, } @@ -467,8 +478,11 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { require := require.New(t) // Verify an option once; then show that another verify call would not call coreBlk.Verify() - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -479,9 +493,9 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, optsErr: snowman.ErrNotOracle, } @@ -491,10 +505,9 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreBlk.ID(), - HeightV: coreBlk.Height() + 1, - TimestampV: coreBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: coreBlk.ID(), + HeightV: coreBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { @@ -553,7 +566,11 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { func TestOptionTimestampValidity(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, db := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, db := initTestProposerVM(t, activationTime, durangoTime, 0) coreOracleBlkID := ids.GenerateTestID() coreOracleBlk := &TestOptionsBlock{ @@ -562,10 +579,9 @@ func TestOptionTimestampValidity(t *testing.T) { IDV: coreOracleBlkID, StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp().Add(time.Second), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, opts: [2]snowman.Block{ &snowman.TestBlock{ @@ -573,24 +589,26 @@ func TestOptionTimestampValidity(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreOracleBlkID, - TimestampV: coreGenBlk.Timestamp().Add(time.Second), + BytesV: []byte{2}, + ParentV: coreOracleBlkID, + HeightV: coreGenBlk.Height() + 2, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreOracleBlkID, - TimestampV: coreGenBlk.Timestamp().Add(time.Second), + BytesV: []byte{3}, + ParentV: coreOracleBlkID, + HeightV: coreGenBlk.Height() + 2, }, }, } + + oracleBlkTime := proVM.Time().Truncate(time.Second) statelessBlock, err := block.BuildUnsigned( coreGenBlk.ID(), - coreGenBlk.Timestamp(), + oracleBlkTime, 0, coreOracleBlk.Bytes(), ) @@ -650,8 +668,7 @@ func TestOptionTimestampValidity(t *testing.T) { return nil, nil } - expectedTime := coreGenBlk.Timestamp() - require.Equal(expectedTime, option.Timestamp()) + require.Equal(oracleBlkTime, option.Timestamp()) require.NoError(option.Accept(context.Background())) require.NoError(proVM.Shutdown(context.Background())) @@ -660,12 +677,15 @@ func TestOptionTimestampValidity(t *testing.T) { ctx := proVM.ctx proVM = New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) coreVM.InitializeF = func( @@ -743,5 +763,5 @@ func TestOptionTimestampValidity(t *testing.T) { return nil, nil } - require.Equal(expectedTime, statefulOptionBlock.Timestamp()) + require.Equal(oracleBlkTime, statefulOptionBlock.Timestamp()) } diff --git a/vms/proposervm/pre_fork_block.go b/vms/proposervm/pre_fork_block.go index ed665e473910..199c1c98db7d 100644 --- a/vms/proposervm/pre_fork_block.go +++ b/vms/proposervm/pre_fork_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -97,7 +97,7 @@ func (b *preForkBlock) getInnerBlk() snowman.Block { func (b *preForkBlock) verifyPreForkChild(ctx context.Context, child *preForkBlock) error { parentTimestamp := b.Timestamp() - if !parentTimestamp.Before(b.vm.activationTime) { + if !parentTimestamp.Before(b.vm.ActivationTime) { if err := verifyIsOracleBlock(ctx, b.Block); err != nil { return err } @@ -135,7 +135,7 @@ func (b *preForkBlock) verifyPostForkChild(ctx context.Context, child *postForkB currentPChainHeight, ) } - if childPChainHeight < b.vm.minimumPChainHeight { + if childPChainHeight < b.vm.MinimumPChainHeight { return errPChainHeightTooLow } @@ -150,7 +150,7 @@ func (b *preForkBlock) verifyPostForkChild(ctx context.Context, child *postForkB // if the *preForkBlock is the last *preForkBlock before activation takes effect // (its timestamp is at or after the activation time) parentTimestamp := b.Timestamp() - if parentTimestamp.Before(b.vm.activationTime) { + if parentTimestamp.Before(b.vm.ActivationTime) { return errProposersNotActivated } @@ -181,7 +181,7 @@ func (*preForkBlock) verifyPostForkOption(context.Context, *postForkOption) erro func (b *preForkBlock) buildChild(ctx context.Context) (Block, error) { parentTimestamp := b.Timestamp() - if parentTimestamp.Before(b.vm.activationTime) { + if parentTimestamp.Before(b.vm.ActivationTime) { // The chain hasn't forked yet innerBlock, err := b.vm.ChainVM.BuildBlock(ctx) if err != nil { @@ -210,7 +210,7 @@ func (b *preForkBlock) buildChild(ctx context.Context) (Block, error) { // The child's P-Chain height is proposed as the optimal P-Chain height that // is at least the minimum height - pChainHeight, err := b.vm.optimalPChainHeight(ctx, b.vm.minimumPChainHeight) + pChainHeight, err := b.vm.optimalPChainHeight(ctx, b.vm.MinimumPChainHeight) if err != nil { b.vm.ctx.Log.Error("unexpected build block failure", zap.String("reason", "failed to calculate optimal P-chain height"), diff --git a/vms/proposervm/pre_fork_block_test.go b/vms/proposervm/pre_fork_block_test.go index 229bdf6f745b..cf269a39691a 100644 --- a/vms/proposervm/pre_fork_block_test.go +++ b/vms/proposervm/pre_fork_block_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -28,12 +28,12 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/vms/proposervm/block" - "github.com/ava-labs/avalanchego/vms/proposervm/proposer" + + statelessblock "github.com/ava-labs/avalanchego/vms/proposervm/block" ) func TestOracle_PreForkBlkImplementsInterface(t *testing.T) { @@ -61,7 +61,11 @@ func TestOracle_PreForkBlkImplementsInterface(t *testing.T) { func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, mockable.MaxTime, 0) + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -149,8 +153,11 @@ func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { require := require.New(t) - activationTime := genesisTimestamp.Add(10 * time.Second) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) + var ( + activationTime = genesisTimestamp.Add(10 * time.Second) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -243,8 +250,11 @@ func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { require := require.New(t) - activationTime := genesisTimestamp.Add(10 * time.Second) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) + var ( + activationTime = genesisTimestamp.Add(10 * time.Second) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -252,7 +262,7 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { require.True(coreGenBlk.Timestamp().Before(activationTime)) // create parent block ... - prntCoreBlk := &snowman.TestBlock{ + parentCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, @@ -262,14 +272,14 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { TimestampV: coreGenBlk.Timestamp(), } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { - return prntCoreBlk, nil + return parentCoreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil - case prntCoreBlk.ID(): - return prntCoreBlk, nil + case parentCoreBlk.ID(): + return parentCoreBlk, nil default: return nil, database.ErrNotFound } @@ -278,15 +288,14 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil - case bytes.Equal(b, prntCoreBlk.Bytes()): - return prntCoreBlk, nil + case bytes.Equal(b, parentCoreBlk.Bytes()): + return parentCoreBlk, nil default: return nil, database.ErrNotFound } } - proVM.Set(proVM.Time().Add(proposer.MaxDelay)) - prntProBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(context.Background()) require.NoError(err) // .. create child block ... @@ -296,28 +305,35 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { StatusV: choices.Processing, }, BytesV: []byte{2}, - TimestampV: prntCoreBlk.Timestamp().Add(proposer.MaxDelay), + TimestampV: parentCoreBlk.Timestamp(), } - childProBlk := preForkBlock{ + childBlk := preForkBlock{ Block: childCoreBlk, vm: proVM, } - // child block referring unknown parent does not verify - childCoreBlk.ParentV = ids.Empty - err = childProBlk.Verify(context.Background()) - require.ErrorIs(err, database.ErrNotFound) + { + // child block referring unknown parent does not verify + childCoreBlk.ParentV = ids.Empty + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, database.ErrNotFound) + } - // child block referring known parent does verify - childCoreBlk.ParentV = prntProBlk.ID() - require.NoError(childProBlk.Verify(context.Background())) + { + // child block referring known parent does verify + childCoreBlk.ParentV = parentBlk.ID() + require.NoError(childBlk.Verify(context.Background())) + } } func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { require := require.New(t) - activationTime := genesisTimestamp.Add(10 * time.Second) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) + var ( + activationTime = genesisTimestamp.Add(10 * time.Second) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -348,15 +364,14 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { require.NoError(preForkChild.Verify(context.Background())) // postFork block does NOT verify if parent is before fork activation time - postForkStatelessChild, err := block.Build( + postForkStatelessChild, err := statelessblock.Build( coreGenBlk.ID(), coreBlk.Timestamp(), 0, // pChainHeight - proVM.ctx.NodeID, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, coreBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) postForkChild := &postForkBlock{ @@ -448,8 +463,11 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { func TestBlockVerify_BlocksBuiltOnPostForkGenesis(t *testing.T) { require := require.New(t) - activationTime := genesisTimestamp.Add(-1 * time.Second) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) + var ( + activationTime = genesisTimestamp.Add(-1 * time.Second) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) proVM.Set(activationTime) defer func() { require.NoError(proVM.Shutdown(context.Background())) @@ -490,7 +508,11 @@ func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { require := require.New(t) // setup - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, mockable.MaxTime, 0) + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -548,7 +570,11 @@ func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { func TestBlockReject_PreForkBlock_InnerBlockIsRejected(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, mockable.MaxTime, 0) // disable ProBlks + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -579,8 +605,11 @@ func TestBlockReject_PreForkBlock_InnerBlockIsRejected(t *testing.T) { func TestBlockVerify_ForkBlockIsOracleBlock(t *testing.T) { require := require.New(t) - activationTime := genesisTimestamp.Add(10 * time.Second) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) + var ( + activationTime = genesisTimestamp.Add(10 * time.Second) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -670,8 +699,11 @@ func TestBlockVerify_ForkBlockIsOracleBlock(t *testing.T) { func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { require := require.New(t) - activationTime := genesisTimestamp.Add(10 * time.Second) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) + var ( + activationTime = genesisTimestamp.Add(10 * time.Second) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -747,15 +779,14 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { require.NoError(firstBlock.Verify(context.Background())) - slb, err := block.Build( + slb, err := statelessblock.Build( firstBlock.ID(), // refer unknown parent firstBlock.Timestamp(), 0, // pChainHeight, - proVM.ctx.NodeID, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, coreBlk.opts[0].Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) @@ -785,7 +816,7 @@ func TestPreForkBlock_BuildBlockWithContext(t *testing.T) { builtBlk.EXPECT().Bytes().Return([]byte{1, 2, 3}).AnyTimes() builtBlk.EXPECT().ID().Return(ids.GenerateTestID()).AnyTimes() builtBlk.EXPECT().Height().Return(pChainHeight).AnyTimes() - innerVM := mocks.NewMockChainVM(ctrl) + innerVM := block.NewMockChainVM(ctrl) innerVM.EXPECT().BuildBlock(gomock.Any()).Return(builtBlk, nil).AnyTimes() vdrState := validators.NewMockState(ctrl) vdrState.EXPECT().GetMinimumHeight(context.Background()).Return(pChainHeight, nil).AnyTimes() @@ -810,7 +841,7 @@ func TestPreForkBlock_BuildBlockWithContext(t *testing.T) { // Should call BuildBlock since proposervm is not activated innerBlk.EXPECT().Timestamp().Return(time.Time{}) - vm.activationTime = mockable.MaxTime + vm.ActivationTime = mockable.MaxTime gotChild, err = blk.buildChild(context.Background()) require.NoError(err) diff --git a/vms/proposervm/proposer/mock_windower.go b/vms/proposervm/proposer/mock_windower.go index bfa83998995c..5384da8ccf80 100644 --- a/vms/proposervm/proposer/mock_windower.go +++ b/vms/proposervm/proposer/mock_windower.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/proposervm/proposer (interfaces: Windower) +// +// Generated by this command: +// +// mockgen -package=proposer -destination=vms/proposervm/proposer/mock_windower.go github.com/ava-labs/avalanchego/vms/proposervm/proposer Windower +// // Package proposer is a generated GoMock package. package proposer @@ -40,31 +42,61 @@ func (m *MockWindower) EXPECT() *MockWindowerMockRecorder { } // Delay mocks base method. -func (m *MockWindower) Delay(arg0 context.Context, arg1, arg2 uint64, arg3 ids.NodeID) (time.Duration, error) { +func (m *MockWindower) Delay(arg0 context.Context, arg1, arg2 uint64, arg3 ids.NodeID, arg4 int) (time.Duration, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delay", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "Delay", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(time.Duration) ret1, _ := ret[1].(error) return ret0, ret1 } // Delay indicates an expected call of Delay. -func (mr *MockWindowerMockRecorder) Delay(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockWindowerMockRecorder) Delay(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delay", reflect.TypeOf((*MockWindower)(nil).Delay), arg0, arg1, arg2, arg3, arg4) +} + +// ExpectedProposer mocks base method. +func (m *MockWindower) ExpectedProposer(arg0 context.Context, arg1, arg2, arg3 uint64) (ids.NodeID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExpectedProposer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(ids.NodeID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExpectedProposer indicates an expected call of ExpectedProposer. +func (mr *MockWindowerMockRecorder) ExpectedProposer(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExpectedProposer", reflect.TypeOf((*MockWindower)(nil).ExpectedProposer), arg0, arg1, arg2, arg3) +} + +// MinDelayForProposer mocks base method. +func (m *MockWindower) MinDelayForProposer(arg0 context.Context, arg1, arg2 uint64, arg3 ids.NodeID, arg4 uint64) (time.Duration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MinDelayForProposer", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(time.Duration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MinDelayForProposer indicates an expected call of MinDelayForProposer. +func (mr *MockWindowerMockRecorder) MinDelayForProposer(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delay", reflect.TypeOf((*MockWindower)(nil).Delay), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinDelayForProposer", reflect.TypeOf((*MockWindower)(nil).MinDelayForProposer), arg0, arg1, arg2, arg3, arg4) } // Proposers mocks base method. -func (m *MockWindower) Proposers(arg0 context.Context, arg1, arg2 uint64) ([]ids.NodeID, error) { +func (m *MockWindower) Proposers(arg0 context.Context, arg1, arg2 uint64, arg3 int) ([]ids.NodeID, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Proposers", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "Proposers", arg0, arg1, arg2, arg3) ret0, _ := ret[0].([]ids.NodeID) ret1, _ := ret[1].(error) return ret0, ret1 } // Proposers indicates an expected call of Proposers. -func (mr *MockWindowerMockRecorder) Proposers(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockWindowerMockRecorder) Proposers(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Proposers", reflect.TypeOf((*MockWindower)(nil).Proposers), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Proposers", reflect.TypeOf((*MockWindower)(nil).Proposers), arg0, arg1, arg2, arg3) } diff --git a/vms/proposervm/proposer/validators.go b/vms/proposervm/proposer/validators.go index ba60a088003a..6af996187b69 100644 --- a/vms/proposervm/proposer/validators.go +++ b/vms/proposervm/proposer/validators.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposer @@ -15,6 +15,6 @@ type validatorData struct { weight uint64 } -func (d validatorData) Less(other validatorData) bool { - return d.id.Less(other.id) +func (d validatorData) Compare(other validatorData) int { + return d.id.Compare(other.id) } diff --git a/vms/proposervm/proposer/validators_test.go b/vms/proposervm/proposer/validators_test.go index a0703d498ec8..e86f2c806a14 100644 --- a/vms/proposervm/proposer/validators_test.go +++ b/vms/proposervm/proposer/validators_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposer import ( + "fmt" "testing" "github.com/stretchr/testify/require" @@ -11,16 +12,31 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -func TestValidatorDataLess(t *testing.T) { - require := require.New(t) - - var v1, v2 validatorData - require.False(v1.Less(v2)) - require.False(v2.Less(v1)) +func TestValidatorDataCompare(t *testing.T) { + tests := []struct { + a validatorData + b validatorData + expected int + }{ + { + a: validatorData{}, + b: validatorData{}, + expected: 0, + }, + { + a: validatorData{ + id: ids.BuildTestNodeID([]byte{1}), + }, + b: validatorData{}, + expected: 1, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%s_%s_%d", test.a.id, test.b.id, test.expected), func(t *testing.T) { + require := require.New(t) - v1 = validatorData{ - id: ids.NodeID{1}, + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) } - require.False(v1.Less(v2)) - require.True(v2.Less(v1)) } diff --git a/vms/proposervm/proposer/windower.go b/vms/proposervm/proposer/windower.go index 4f67b27903ea..6d1d958dd04a 100644 --- a/vms/proposervm/proposer/windower.go +++ b/vms/proposervm/proposer/windower.go @@ -1,12 +1,17 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposer import ( "context" + "errors" + "fmt" + "math/bits" "time" + "gonum.org/v1/gonum/mathext/prng" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" @@ -17,31 +22,76 @@ import ( // Proposer list constants const ( - MaxWindows = 6 WindowDuration = 5 * time.Second - MaxDelay = MaxWindows * WindowDuration + + MaxVerifyWindows = 6 + MaxVerifyDelay = MaxVerifyWindows * WindowDuration // 30 seconds + + MaxBuildWindows = 60 + MaxBuildDelay = MaxBuildWindows * WindowDuration // 5 minutes + + MaxLookAheadSlots = 720 + MaxLookAheadWindow = MaxLookAheadSlots * WindowDuration // 1 hour ) -var _ Windower = (*windower)(nil) +var ( + _ Windower = (*windower)(nil) + + ErrAnyoneCanPropose = errors.New("anyone can propose") +) type Windower interface { - // Proposers returns the proposer list for building a block at [chainHeight] + // Proposers returns the proposer list for building a block at [blockHeight] // when the validator set is defined at [pChainHeight]. The list is returned // in order. The minimum delay of a validator is the index they appear times // [WindowDuration]. Proposers( ctx context.Context, - chainHeight, + blockHeight, pChainHeight uint64, + maxWindows int, ) ([]ids.NodeID, error) + // Delay returns the amount of time that [validatorID] must wait before - // building a block at [chainHeight] when the validator set is defined at + // building a block at [blockHeight] when the validator set is defined at // [pChainHeight]. Delay( ctx context.Context, - chainHeight, + blockHeight, pChainHeight uint64, validatorID ids.NodeID, + maxWindows int, + ) (time.Duration, error) + + // In the Post-Durango windowing scheme, every validator active at + // [pChainHeight] gets specific slots it can propose in (instead of being + // able to propose from a given time on as it happens Pre-Durango). + // [ExpectedProposer] calculates which nodeID is scheduled to propose a + // block of height [blockHeight] at [slot]. + // If no validators are currently available, [ErrAnyoneCanPropose] is + // returned. + ExpectedProposer( + ctx context.Context, + blockHeight, + pChainHeight, + slot uint64, + ) (ids.NodeID, error) + + // In the Post-Durango windowing scheme, every validator active at + // [pChainHeight] gets specific slots it can propose in (instead of being + // able to propose from a given time on as it happens Pre-Durango). + // [MinDelayForProposer] specifies how long [nodeID] needs to wait for its + // slot to start. Delay is specified as starting from slot zero start. + // (which is parent timestamp). For efficiency reasons, we cap the slot + // search to [MaxLookAheadSlots]. + // If no validators are currently available, [ErrAnyoneCanPropose] is + // returned. + MinDelayForProposer( + ctx context.Context, + blockHeight, + pChainHeight uint64, + nodeID ids.NodeID, + startSlot uint64, ) (time.Duration, error) } @@ -51,7 +101,6 @@ type windower struct { state validators.State subnetID ids.ID chainSource uint64 - sampler sampler.WeightedWithoutReplacement } func New(state validators.State, subnetID, chainID ids.ID) Windower { @@ -60,56 +109,30 @@ func New(state validators.State, subnetID, chainID ids.ID) Windower { state: state, subnetID: subnetID, chainSource: w.UnpackLong(), - sampler: sampler.NewDeterministicWeightedWithoutReplacement(), } } -func (w *windower) Proposers(ctx context.Context, chainHeight, pChainHeight uint64) ([]ids.NodeID, error) { - // get the validator set by the p-chain height - validatorsMap, err := w.state.GetValidatorSet(ctx, pChainHeight, w.subnetID) +func (w *windower) Proposers(ctx context.Context, blockHeight, pChainHeight uint64, maxWindows int) ([]ids.NodeID, error) { + // Note: The 32-bit prng is used here for legacy reasons. All other usages + // of a prng in this file should use the 64-bit version. + source := prng.NewMT19937() + sampler, validators, err := w.makeSampler(ctx, pChainHeight, source) if err != nil { return nil, err } - // convert the map of validators to a slice - validators := make([]validatorData, 0, len(validatorsMap)) - weight := uint64(0) - for k, v := range validatorsMap { - validators = append(validators, validatorData{ - id: k, - weight: v.Weight, - }) - newWeight, err := math.Add64(weight, v.Weight) + var totalWeight uint64 + for _, validator := range validators { + totalWeight, err = math.Add64(totalWeight, validator.weight) if err != nil { return nil, err } - weight = newWeight - } - - // canonically sort validators - // Note: validators are sorted by ID, sorting by weight would not create a - // canonically sorted list - utils.Sort(validators) - - // convert the slice of validators to a slice of weights - validatorWeights := make([]uint64, len(validators)) - for i, v := range validators { - validatorWeights[i] = v.weight - } - - if err := w.sampler.Initialize(validatorWeights); err != nil { - return nil, err - } - - numToSample := MaxWindows - if weight < uint64(numToSample) { - numToSample = int(weight) } - seed := chainHeight ^ w.chainSource - w.sampler.Seed(int64(seed)) + source.Seed(w.chainSource ^ blockHeight) - indices, err := w.sampler.Sample(numToSample) + numToSample := int(math.Min(uint64(maxWindows), totalWeight)) + indices, err := sampler.Sample(numToSample) if err != nil { return nil, err } @@ -121,12 +144,12 @@ func (w *windower) Proposers(ctx context.Context, chainHeight, pChainHeight uint return nodeIDs, nil } -func (w *windower) Delay(ctx context.Context, chainHeight, pChainHeight uint64, validatorID ids.NodeID) (time.Duration, error) { +func (w *windower) Delay(ctx context.Context, blockHeight, pChainHeight uint64, validatorID ids.NodeID, maxWindows int) (time.Duration, error) { if validatorID == ids.EmptyNodeID { - return MaxDelay, nil + return time.Duration(maxWindows) * WindowDuration, nil } - proposers, err := w.Proposers(ctx, chainHeight, pChainHeight) + proposers, err := w.Proposers(ctx, blockHeight, pChainHeight, maxWindows) if err != nil { return 0, err } @@ -140,3 +163,124 @@ func (w *windower) Delay(ctx context.Context, chainHeight, pChainHeight uint64, } return delay, nil } + +func (w *windower) ExpectedProposer( + ctx context.Context, + blockHeight, + pChainHeight, + slot uint64, +) (ids.NodeID, error) { + source := prng.NewMT19937_64() + sampler, validators, err := w.makeSampler(ctx, pChainHeight, source) + if err != nil { + return ids.EmptyNodeID, err + } + if len(validators) == 0 { + return ids.EmptyNodeID, ErrAnyoneCanPropose + } + + return w.expectedProposer( + validators, + source, + sampler, + blockHeight, + slot, + ) +} + +func (w *windower) MinDelayForProposer( + ctx context.Context, + blockHeight, + pChainHeight uint64, + nodeID ids.NodeID, + startSlot uint64, +) (time.Duration, error) { + source := prng.NewMT19937_64() + sampler, validators, err := w.makeSampler(ctx, pChainHeight, source) + if err != nil { + return 0, err + } + if len(validators) == 0 { + return 0, ErrAnyoneCanPropose + } + + maxSlot := startSlot + MaxLookAheadSlots + for slot := startSlot; slot < maxSlot; slot++ { + expectedNodeID, err := w.expectedProposer( + validators, + source, + sampler, + blockHeight, + slot, + ) + if err != nil { + return 0, err + } + + if expectedNodeID == nodeID { + return time.Duration(slot) * WindowDuration, nil + } + } + + // no slots scheduled for the max window we inspect. Return max delay + return time.Duration(maxSlot) * WindowDuration, nil +} + +func (w *windower) makeSampler( + ctx context.Context, + pChainHeight uint64, + source sampler.Source, +) (sampler.WeightedWithoutReplacement, []validatorData, error) { + // Get the canconical representation of the validator set at the provided + // p-chain height. + validatorsMap, err := w.state.GetValidatorSet(ctx, pChainHeight, w.subnetID) + if err != nil { + return nil, nil, err + } + + validators := make([]validatorData, 0, len(validatorsMap)) + for k, v := range validatorsMap { + validators = append(validators, validatorData{ + id: k, + weight: v.Weight, + }) + } + + // Note: validators are sorted by ID. Sorting by weight would not create a + // canonically sorted list. + utils.Sort(validators) + + weights := make([]uint64, len(validators)) + for i, validator := range validators { + weights[i] = validator.weight + } + + sampler := sampler.NewDeterministicWeightedWithoutReplacement(source) + return sampler, validators, sampler.Initialize(weights) +} + +func (w *windower) expectedProposer( + validators []validatorData, + source *prng.MT19937_64, + sampler sampler.WeightedWithoutReplacement, + blockHeight, + slot uint64, +) (ids.NodeID, error) { + // Slot is reversed to utilize a different state space in the seed than the + // height. If the slot was not reversed the state space would collide; + // biasing the seed generation. For example, without reversing the slot + // height=0 and slot=1 would equal height=1 and slot=0. + source.Seed(w.chainSource ^ blockHeight ^ bits.Reverse64(slot)) + indices, err := sampler.Sample(1) + if err != nil { + return ids.EmptyNodeID, fmt.Errorf("failed sampling proposers: %w", err) + } + return validators[indices[0]].id, nil +} + +func TimeToSlot(start, now time.Time) uint64 { + if now.Before(start) { + return 0 + } + return uint64(now.Sub(start) / WindowDuration) +} diff --git a/vms/proposervm/proposer/windower_test.go b/vms/proposervm/proposer/windower_test.go index ec2225003230..d3e2ac68817a 100644 --- a/vms/proposervm/proposer/windower_test.go +++ b/vms/proposervm/proposer/windower_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposer import ( "context" + "math" "math/rand" "testing" "time" @@ -13,35 +14,49 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" + + safemath "github.com/ava-labs/avalanchego/utils/math" +) + +var ( + subnetID = ids.GenerateTestID() + randomChainID = ids.GenerateTestID() + fixedChainID = ids.ID{0, 2} ) func TestWindowerNoValidators(t *testing.T) { require := require.New(t) - subnetID := ids.GenerateTestID() - chainID := ids.GenerateTestID() - nodeID := ids.GenerateTestNodeID() - vdrState := &validators.TestState{ - T: t, - GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - return nil, nil - }, - } - - w := New(vdrState, subnetID, chainID) + _, vdrState := makeValidators(t, 0) + w := New(vdrState, subnetID, randomChainID) - delay, err := w.Delay(context.Background(), 1, 0, nodeID) + var ( + chainHeight uint64 = 1 + pChainHeight uint64 = 0 + nodeID = ids.GenerateTestNodeID() + slot uint64 = 1 + ) + delay, err := w.Delay(context.Background(), chainHeight, pChainHeight, nodeID, MaxVerifyWindows) require.NoError(err) require.Zero(delay) + + expectedProposer, err := w.ExpectedProposer(context.Background(), chainHeight, pChainHeight, slot) + require.ErrorIs(err, ErrAnyoneCanPropose) + require.Equal(ids.EmptyNodeID, expectedProposer) + + delay, err = w.MinDelayForProposer(context.Background(), chainHeight, pChainHeight, nodeID, slot) + require.ErrorIs(err, ErrAnyoneCanPropose) + require.Zero(delay) } func TestWindowerRepeatedValidator(t *testing.T) { require := require.New(t) - subnetID := ids.GenerateTestID() - chainID := ids.GenerateTestID() - validatorID := ids.GenerateTestNodeID() - nonValidatorID := ids.GenerateTestNodeID() + var ( + validatorID = ids.GenerateTestNodeID() + nonValidatorID = ids.GenerateTestNodeID() + ) + vdrState := &validators.TestState{ T: t, GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { @@ -54,41 +69,22 @@ func TestWindowerRepeatedValidator(t *testing.T) { }, } - w := New(vdrState, subnetID, chainID) + w := New(vdrState, subnetID, randomChainID) - validatorDelay, err := w.Delay(context.Background(), 1, 0, validatorID) + validatorDelay, err := w.Delay(context.Background(), 1, 0, validatorID, MaxVerifyWindows) require.NoError(err) require.Zero(validatorDelay) - nonValidatorDelay, err := w.Delay(context.Background(), 1, 0, nonValidatorID) + nonValidatorDelay, err := w.Delay(context.Background(), 1, 0, nonValidatorID, MaxVerifyWindows) require.NoError(err) - require.Equal(MaxDelay, nonValidatorDelay) + require.Equal(MaxVerifyDelay, nonValidatorDelay) } -func TestWindowerChangeByHeight(t *testing.T) { +func TestDelayChangeByHeight(t *testing.T) { require := require.New(t) - subnetID := ids.ID{0, 1} - chainID := ids.ID{0, 2} - validatorIDs := make([]ids.NodeID, MaxWindows) - for i := range validatorIDs { - validatorIDs[i] = ids.NodeID{byte(i + 1)} - } - vdrState := &validators.TestState{ - T: t, - GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - vdrs := make(map[ids.NodeID]*validators.GetValidatorOutput, MaxWindows) - for _, id := range validatorIDs { - vdrs[id] = &validators.GetValidatorOutput{ - NodeID: id, - Weight: 1, - } - } - return vdrs, nil - }, - } - - w := New(vdrState, subnetID, chainID) + validatorIDs, vdrState := makeValidators(t, MaxVerifyWindows) + w := New(vdrState, subnetID, fixedChainID) expectedDelays1 := []time.Duration{ 2 * WindowDuration, @@ -100,7 +96,7 @@ func TestWindowerChangeByHeight(t *testing.T) { } for i, expectedDelay := range expectedDelays1 { vdrID := validatorIDs[i] - validatorDelay, err := w.Delay(context.Background(), 1, 0, vdrID) + validatorDelay, err := w.Delay(context.Background(), 1, 0, vdrID, MaxVerifyWindows) require.NoError(err) require.Equal(expectedDelay, validatorDelay) } @@ -115,41 +111,27 @@ func TestWindowerChangeByHeight(t *testing.T) { } for i, expectedDelay := range expectedDelays2 { vdrID := validatorIDs[i] - validatorDelay, err := w.Delay(context.Background(), 2, 0, vdrID) + validatorDelay, err := w.Delay(context.Background(), 2, 0, vdrID, MaxVerifyWindows) require.NoError(err) require.Equal(expectedDelay, validatorDelay) } } -func TestWindowerChangeByChain(t *testing.T) { +func TestDelayChangeByChain(t *testing.T) { require := require.New(t) - subnetID := ids.ID{0, 1} + source := rand.NewSource(int64(0)) + rng := rand.New(source) // #nosec G404 - rand.Seed(0) chainID0 := ids.ID{} - _, _ = rand.Read(chainID0[:]) // #nosec G404 - chainID1 := ids.ID{} - _, _ = rand.Read(chainID1[:]) // #nosec G404 + _, err := rng.Read(chainID0[:]) + require.NoError(err) - validatorIDs := make([]ids.NodeID, MaxWindows) - for i := range validatorIDs { - validatorIDs[i] = ids.NodeID{byte(i + 1)} - } - vdrState := &validators.TestState{ - T: t, - GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - vdrs := make(map[ids.NodeID]*validators.GetValidatorOutput, MaxWindows) - for _, id := range validatorIDs { - vdrs[id] = &validators.GetValidatorOutput{ - NodeID: id, - Weight: 1, - } - } - return vdrs, nil - }, - } + chainID1 := ids.ID{} + _, err = rng.Read(chainID1[:]) + require.NoError(err) + validatorIDs, vdrState := makeValidators(t, MaxVerifyWindows) w0 := New(vdrState, subnetID, chainID0) w1 := New(vdrState, subnetID, chainID1) @@ -163,7 +145,7 @@ func TestWindowerChangeByChain(t *testing.T) { } for i, expectedDelay := range expectedDelays0 { vdrID := validatorIDs[i] - validatorDelay, err := w0.Delay(context.Background(), 1, 0, vdrID) + validatorDelay, err := w0.Delay(context.Background(), 1, 0, vdrID, MaxVerifyWindows) require.NoError(err) require.Equal(expectedDelay, validatorDelay) } @@ -178,8 +160,308 @@ func TestWindowerChangeByChain(t *testing.T) { } for i, expectedDelay := range expectedDelays1 { vdrID := validatorIDs[i] - validatorDelay, err := w1.Delay(context.Background(), 1, 0, vdrID) + validatorDelay, err := w1.Delay(context.Background(), 1, 0, vdrID, MaxVerifyWindows) require.NoError(err) require.Equal(expectedDelay, validatorDelay) } } + +func TestExpectedProposerChangeByHeight(t *testing.T) { + require := require.New(t) + + validatorIDs, vdrState := makeValidators(t, 10) + w := New(vdrState, subnetID, fixedChainID) + + var ( + dummyCtx = context.Background() + pChainHeight uint64 = 0 + slot uint64 = 0 + ) + + expectedProposers := map[uint64]ids.NodeID{ + 1: validatorIDs[2], + 2: validatorIDs[1], + } + + for chainHeight, expectedProposerID := range expectedProposers { + proposerID, err := w.ExpectedProposer(dummyCtx, chainHeight, pChainHeight, slot) + require.NoError(err) + require.Equal(expectedProposerID, proposerID) + } +} + +func TestExpectedProposerChangeByChain(t *testing.T) { + require := require.New(t) + + source := rand.NewSource(int64(0)) + rng := rand.New(source) // #nosec G404 + + chainID0 := ids.ID{} + _, err := rng.Read(chainID0[:]) + require.NoError(err) + + chainID1 := ids.ID{} + _, err = rng.Read(chainID1[:]) + require.NoError(err) + + validatorIDs, vdrState := makeValidators(t, 10) + + var ( + dummyCtx = context.Background() + chainHeight uint64 = 1 + pChainHeight uint64 = 0 + slot uint64 = 0 + ) + + expectedProposers := map[ids.ID]ids.NodeID{ + chainID0: validatorIDs[5], + chainID1: validatorIDs[3], + } + + for chainID, expectedProposerID := range expectedProposers { + w := New(vdrState, subnetID, chainID) + proposerID, err := w.ExpectedProposer(dummyCtx, chainHeight, pChainHeight, slot) + require.NoError(err) + require.Equal(expectedProposerID, proposerID) + } +} + +func TestExpectedProposerChangeBySlot(t *testing.T) { + require := require.New(t) + + validatorIDs, vdrState := makeValidators(t, 10) + w := New(vdrState, subnetID, fixedChainID) + + var ( + dummyCtx = context.Background() + chainHeight uint64 = 1 + pChainHeight uint64 = 0 + ) + + proposers := []ids.NodeID{ + validatorIDs[2], + validatorIDs[0], + validatorIDs[9], + validatorIDs[7], + validatorIDs[0], + validatorIDs[3], + validatorIDs[3], + validatorIDs[3], + validatorIDs[3], + validatorIDs[3], + validatorIDs[4], + validatorIDs[0], + validatorIDs[6], + validatorIDs[3], + validatorIDs[2], + validatorIDs[1], + validatorIDs[6], + validatorIDs[0], + validatorIDs[5], + validatorIDs[1], + validatorIDs[9], + validatorIDs[6], + validatorIDs[0], + validatorIDs[8], + } + expectedProposers := map[uint64]ids.NodeID{ + MaxLookAheadSlots: validatorIDs[4], + MaxLookAheadSlots + 1: validatorIDs[6], + } + for slot, expectedProposerID := range proposers { + expectedProposers[uint64(slot)] = expectedProposerID + } + + for slot, expectedProposerID := range expectedProposers { + actualProposerID, err := w.ExpectedProposer(dummyCtx, chainHeight, pChainHeight, slot) + require.NoError(err) + require.Equal(expectedProposerID, actualProposerID) + } +} + +func TestCoherenceOfExpectedProposerAndMinDelayForProposer(t *testing.T) { + require := require.New(t) + + _, vdrState := makeValidators(t, 10) + w := New(vdrState, subnetID, fixedChainID) + + var ( + dummyCtx = context.Background() + chainHeight uint64 = 1 + pChainHeight uint64 = 0 + ) + + for slot := uint64(0); slot < 3*MaxLookAheadSlots; slot++ { + proposerID, err := w.ExpectedProposer(dummyCtx, chainHeight, pChainHeight, slot) + require.NoError(err) + + // proposerID is the scheduled proposer. It should start with the + // expected delay + delay, err := w.MinDelayForProposer(dummyCtx, chainHeight, pChainHeight, proposerID, slot) + require.NoError(err) + require.Equal(time.Duration(slot)*WindowDuration, delay) + } +} + +func TestMinDelayForProposer(t *testing.T) { + require := require.New(t) + + validatorIDs, vdrState := makeValidators(t, 10) + w := New(vdrState, subnetID, fixedChainID) + + var ( + dummyCtx = context.Background() + chainHeight uint64 = 1 + pChainHeight uint64 = 0 + slot uint64 = 0 + ) + + expectedDelays := map[ids.NodeID]time.Duration{ + validatorIDs[0]: 1 * WindowDuration, + validatorIDs[1]: 15 * WindowDuration, + validatorIDs[2]: 0 * WindowDuration, + validatorIDs[3]: 5 * WindowDuration, + validatorIDs[4]: 10 * WindowDuration, + validatorIDs[5]: 18 * WindowDuration, + validatorIDs[6]: 12 * WindowDuration, + validatorIDs[7]: 3 * WindowDuration, + validatorIDs[8]: 23 * WindowDuration, + validatorIDs[9]: 2 * WindowDuration, + ids.GenerateTestNodeID(): MaxLookAheadWindow, + } + + for nodeID, expectedDelay := range expectedDelays { + delay, err := w.MinDelayForProposer(dummyCtx, chainHeight, pChainHeight, nodeID, slot) + require.NoError(err) + require.Equal(expectedDelay, delay) + } +} + +func BenchmarkMinDelayForProposer(b *testing.B) { + require := require.New(b) + + _, vdrState := makeValidators(b, 10) + w := New(vdrState, subnetID, fixedChainID) + + var ( + dummyCtx = context.Background() + pChainHeight uint64 = 0 + chainHeight uint64 = 1 + nodeID = ids.GenerateTestNodeID() // Ensure to exhaust the search + slot uint64 = 0 + ) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := w.MinDelayForProposer(dummyCtx, chainHeight, pChainHeight, nodeID, slot) + require.NoError(err) + } +} + +func TestTimeToSlot(t *testing.T) { + parentTime := time.Now() + tests := []struct { + timeOffset time.Duration + expectedSlot uint64 + }{ + { + timeOffset: -WindowDuration, + expectedSlot: 0, + }, + { + timeOffset: -time.Second, + expectedSlot: 0, + }, + { + timeOffset: 0, + expectedSlot: 0, + }, + { + timeOffset: WindowDuration, + expectedSlot: 1, + }, + { + timeOffset: 2 * WindowDuration, + expectedSlot: 2, + }, + } + for _, test := range tests { + t.Run(test.timeOffset.String(), func(t *testing.T) { + slot := TimeToSlot(parentTime, parentTime.Add(test.timeOffset)) + require.Equal(t, test.expectedSlot, slot) + }) + } +} + +// Ensure that the proposer distribution is within 3 standard deviations of the +// expected value assuming a truly random binomial distribution. +func TestProposerDistribution(t *testing.T) { + require := require.New(t) + + validatorIDs, vdrState := makeValidators(t, 10) + w := New(vdrState, subnetID, fixedChainID) + + var ( + dummyCtx = context.Background() + pChainHeight uint64 = 0 + numChainHeights uint64 = 100 + numSlots uint64 = 100 + ) + + proposerFrequency := make(map[ids.NodeID]int) + for _, validatorID := range validatorIDs { + // Initialize the map to 0s to include validators that are never sampled + // in the analysis. + proposerFrequency[validatorID] = 0 + } + for chainHeight := uint64(0); chainHeight < numChainHeights; chainHeight++ { + for slot := uint64(0); slot < numSlots; slot++ { + proposerID, err := w.ExpectedProposer(dummyCtx, chainHeight, pChainHeight, slot) + require.NoError(err) + proposerFrequency[proposerID]++ + } + } + + var ( + totalNumberOfSamples = numChainHeights * numSlots + probabilityOfBeingSampled = 1 / float64(len(validatorIDs)) + expectedNumberOfSamples = uint64(probabilityOfBeingSampled * float64(totalNumberOfSamples)) + variance = float64(totalNumberOfSamples) * probabilityOfBeingSampled * (1 - probabilityOfBeingSampled) + stdDeviation = math.Sqrt(variance) + maxDeviation uint64 + ) + for _, sampled := range proposerFrequency { + maxDeviation = safemath.Max( + maxDeviation, + safemath.AbsDiff( + uint64(sampled), + expectedNumberOfSamples, + ), + ) + } + + maxSTDDeviation := float64(maxDeviation) / stdDeviation + require.Less(maxSTDDeviation, 3.) +} + +func makeValidators(t testing.TB, count int) ([]ids.NodeID, *validators.TestState) { + validatorIDs := make([]ids.NodeID, count) + for i := range validatorIDs { + validatorIDs[i] = ids.BuildTestNodeID([]byte{byte(i) + 1}) + } + + vdrState := &validators.TestState{ + T: t, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + vdrs := make(map[ids.NodeID]*validators.GetValidatorOutput, MaxVerifyWindows) + for _, id := range validatorIDs { + vdrs[id] = &validators.GetValidatorOutput{ + NodeID: id, + Weight: 1, + } + } + return vdrs, nil + }, + } + return validatorIDs, vdrState +} diff --git a/vms/proposervm/scheduler/mock_scheduler.go b/vms/proposervm/scheduler/mock_scheduler.go new file mode 100644 index 000000000000..f4a8f1e62197 --- /dev/null +++ b/vms/proposervm/scheduler/mock_scheduler.go @@ -0,0 +1,76 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/vms/proposervm/scheduler (interfaces: Scheduler) +// +// Generated by this command: +// +// mockgen -package=scheduler -destination=vms/proposervm/scheduler/mock_scheduler.go github.com/ava-labs/avalanchego/vms/proposervm/scheduler Scheduler +// + +// Package scheduler is a generated GoMock package. +package scheduler + +import ( + reflect "reflect" + time "time" + + gomock "go.uber.org/mock/gomock" +) + +// MockScheduler is a mock of Scheduler interface. +type MockScheduler struct { + ctrl *gomock.Controller + recorder *MockSchedulerMockRecorder +} + +// MockSchedulerMockRecorder is the mock recorder for MockScheduler. +type MockSchedulerMockRecorder struct { + mock *MockScheduler +} + +// NewMockScheduler creates a new mock instance. +func NewMockScheduler(ctrl *gomock.Controller) *MockScheduler { + mock := &MockScheduler{ctrl: ctrl} + mock.recorder = &MockSchedulerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockScheduler) EXPECT() *MockSchedulerMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockScheduler) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockSchedulerMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockScheduler)(nil).Close)) +} + +// Dispatch mocks base method. +func (m *MockScheduler) Dispatch(arg0 time.Time) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Dispatch", arg0) +} + +// Dispatch indicates an expected call of Dispatch. +func (mr *MockSchedulerMockRecorder) Dispatch(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Dispatch", reflect.TypeOf((*MockScheduler)(nil).Dispatch), arg0) +} + +// SetBuildBlockTime mocks base method. +func (m *MockScheduler) SetBuildBlockTime(arg0 time.Time) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetBuildBlockTime", arg0) +} + +// SetBuildBlockTime indicates an expected call of SetBuildBlockTime. +func (mr *MockSchedulerMockRecorder) SetBuildBlockTime(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBuildBlockTime", reflect.TypeOf((*MockScheduler)(nil).SetBuildBlockTime), arg0) +} diff --git a/vms/proposervm/scheduler/scheduler.go b/vms/proposervm/scheduler/scheduler.go index 5946a67b9b77..8395596a55a8 100644 --- a/vms/proposervm/scheduler/scheduler.go +++ b/vms/proposervm/scheduler/scheduler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package scheduler diff --git a/vms/proposervm/scheduler/scheduler_test.go b/vms/proposervm/scheduler/scheduler_test.go index 821a36883e90..77ed39a67330 100644 --- a/vms/proposervm/scheduler/scheduler_test.go +++ b/vms/proposervm/scheduler/scheduler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package scheduler diff --git a/vms/proposervm/state/block_height_index.go b/vms/proposervm/state/block_height_index.go index e16100bddd89..b60fca0c363d 100644 --- a/vms/proposervm/state/block_height_index.go +++ b/vms/proposervm/state/block_height_index.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/proposervm/state/block_state.go b/vms/proposervm/state/block_state.go index fa4c67e1ecf5..0c5e210a8d81 100644 --- a/vms/proposervm/state/block_state.go +++ b/vms/proposervm/state/block_state.go @@ -1,11 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state import ( "errors" - "fmt" "github.com/prometheus/client_golang/prometheus" @@ -15,8 +14,10 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/proposervm/block" ) @@ -68,7 +69,7 @@ func NewBlockState(db database.Database) BlockState { func NewMeteredBlockState(db database.Database, namespace string, metrics prometheus.Registerer) (BlockState, error) { blkCache, err := metercacher.New[ids.ID, *blockWrapper]( - fmt.Sprintf("%s_block_cache", namespace), + metric.AppendNamespace(namespace, "block_cache"), metrics, cache.NewSizedLRU[ids.ID, *blockWrapper]( blockCacheSize, @@ -100,16 +101,20 @@ func (s *blockState) GetBlock(blkID ids.ID) (block.Block, choices.Status, error) } blkWrapper := blockWrapper{} - parsedVersion, err := c.Unmarshal(blkWrapperBytes, &blkWrapper) + parsedVersion, err := Codec.Unmarshal(blkWrapperBytes, &blkWrapper) if err != nil { return nil, choices.Unknown, err } - if parsedVersion != version { + if parsedVersion != CodecVersion { return nil, choices.Unknown, errBlockWrongVersion } // The key was in the database - blk, err := block.Parse(blkWrapper.Block) + // + // Invariant: Blocks stored on disk were previously accepted by this node. + // Because the durango activation relaxes TLS cert parsing rules, we assume + // it is always activated here. + blk, err := block.Parse(blkWrapper.Block, version.DefaultUpgradeTime) if err != nil { return nil, choices.Unknown, err } @@ -126,7 +131,7 @@ func (s *blockState) PutBlock(blk block.Block, status choices.Status) error { block: blk, } - bytes, err := c.Marshal(version, &blkWrapper) + bytes, err := Codec.Marshal(CodecVersion, &blkWrapper) if err != nil { return err } diff --git a/vms/proposervm/state/block_state_test.go b/vms/proposervm/state/block_state_test.go index 573b95356fdd..8efe014bd11c 100644 --- a/vms/proposervm/state/block_state_test.go +++ b/vms/proposervm/state/block_state_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -27,8 +27,6 @@ import ( "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/vms/proposervm/block" - - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" ) func testBlockState(a *require.Assertions, bs BlockState) { @@ -41,19 +39,14 @@ func testBlockState(a *require.Assertions, bs BlockState) { tlsCert, err := staking.NewTLSCert() a.NoError(err) - cert := staking.CertificateFromX509(tlsCert.Leaf) + cert, err := staking.CertificateFromX509(tlsCert.Leaf) key := tlsCert.PrivateKey.(crypto.Signer) - - nodeIDBytes, err := secp256k1.RecoverSecp256PublicKey(tlsCert.Leaf) - a.NoError(err) - nodeID, err := ids.ToNodeID(nodeIDBytes) a.NoError(err) b, err := block.Build( parentID, timestamp, pChainHeight, - nodeID, cert, innerBlockBytes, chainID, @@ -111,7 +104,7 @@ func TestGetBlockWithUncachedBlock(t *testing.T) { block: blk, } - bytes, err := c.Marshal(version, &blkWrapper) + bytes, err := Codec.Marshal(CodecVersion, &blkWrapper) a.NoError(err) blkID := blk.ID() @@ -133,20 +126,15 @@ func initCommonTestData(a *require.Assertions) (database.Database, BlockState, b chainID := ids.ID{4} tlsCert, _ := staking.NewTLSCert() - cert := staking.CertificateFromX509(tlsCert.Leaf) + cert, err := staking.CertificateFromX509(tlsCert.Leaf) + a.NoError(err) key := tlsCert.PrivateKey.(crypto.Signer) - nodeIDBytes, err := secp256k1.RecoverSecp256PublicKey(tlsCert.Leaf) - a.NoError(err) - nodeID, err := ids.ToNodeID(nodeIDBytes) - a.NoError(err) - blk, err := block.Build( parentID, timestamp, pChainHeight, - nodeID, cert, innerBlockBytes, chainID, diff --git a/vms/proposervm/state/chain_state.go b/vms/proposervm/state/chain_state.go index 0f1a1bfba4e8..e4ed34ddcb78 100644 --- a/vms/proposervm/state/chain_state.go +++ b/vms/proposervm/state/chain_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/proposervm/state/chain_state_test.go b/vms/proposervm/state/chain_state_test.go index ab14f4228281..6b45585f6041 100644 --- a/vms/proposervm/state/chain_state_test.go +++ b/vms/proposervm/state/chain_state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/proposervm/state/codec.go b/vms/proposervm/state/codec.go index f73523806e53..63727894e356 100644 --- a/vms/proposervm/state/codec.go +++ b/vms/proposervm/state/codec.go @@ -1,24 +1,25 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" ) -const version = 0 +const CodecVersion = 0 -var c codec.Manager +var Codec codec.Manager func init() { - lc := linearcodec.NewCustomMaxLength(math.MaxUint32) - c = codec.NewManager(math.MaxInt32) + lc := linearcodec.NewDefault(time.Time{}) + Codec = codec.NewManager(math.MaxInt32) - err := c.RegisterCodec(version, lc) + err := Codec.RegisterCodec(CodecVersion, lc) if err != nil { panic(err) } diff --git a/vms/proposervm/state/mock_state.go b/vms/proposervm/state/mock_state.go index 40ef830a1365..6384528a61dd 100644 --- a/vms/proposervm/state/mock_state.go +++ b/vms/proposervm/state/mock_state.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/proposervm/state (interfaces: State) +// +// Generated by this command: +// +// mockgen -package=state -destination=vms/proposervm/state/mock_state.go github.com/ava-labs/avalanchego/vms/proposervm/state State +// // Package state is a generated GoMock package. package state @@ -62,7 +64,7 @@ func (m *MockState) DeleteBlock(arg0 ids.ID) error { } // DeleteBlock indicates an expected call of DeleteBlock. -func (mr *MockStateMockRecorder) DeleteBlock(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) DeleteBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBlock", reflect.TypeOf((*MockState)(nil).DeleteBlock), arg0) } @@ -76,7 +78,7 @@ func (m *MockState) DeleteBlockIDAtHeight(arg0 uint64) error { } // DeleteBlockIDAtHeight indicates an expected call of DeleteBlockIDAtHeight. -func (mr *MockStateMockRecorder) DeleteBlockIDAtHeight(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) DeleteBlockIDAtHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBlockIDAtHeight", reflect.TypeOf((*MockState)(nil).DeleteBlockIDAtHeight), arg0) } @@ -120,7 +122,7 @@ func (m *MockState) GetBlock(arg0 ids.ID) (block.Block, choices.Status, error) { } // GetBlock indicates an expected call of GetBlock. -func (mr *MockStateMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockState)(nil).GetBlock), arg0) } @@ -135,7 +137,7 @@ func (m *MockState) GetBlockIDAtHeight(arg0 uint64) (ids.ID, error) { } // GetBlockIDAtHeight indicates an expected call of GetBlockIDAtHeight. -func (mr *MockStateMockRecorder) GetBlockIDAtHeight(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetBlockIDAtHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockState)(nil).GetBlockIDAtHeight), arg0) } @@ -209,7 +211,7 @@ func (m *MockState) PutBlock(arg0 block.Block, arg1 choices.Status) error { } // PutBlock indicates an expected call of PutBlock. -func (mr *MockStateMockRecorder) PutBlock(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) PutBlock(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBlock", reflect.TypeOf((*MockState)(nil).PutBlock), arg0, arg1) } @@ -223,7 +225,7 @@ func (m *MockState) SetBlockIDAtHeight(arg0 uint64, arg1 ids.ID) error { } // SetBlockIDAtHeight indicates an expected call of SetBlockIDAtHeight. -func (mr *MockStateMockRecorder) SetBlockIDAtHeight(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetBlockIDAtHeight(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBlockIDAtHeight", reflect.TypeOf((*MockState)(nil).SetBlockIDAtHeight), arg0, arg1) } @@ -237,7 +239,7 @@ func (m *MockState) SetCheckpoint(arg0 ids.ID) error { } // SetCheckpoint indicates an expected call of SetCheckpoint. -func (mr *MockStateMockRecorder) SetCheckpoint(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetCheckpoint(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCheckpoint", reflect.TypeOf((*MockState)(nil).SetCheckpoint), arg0) } @@ -251,7 +253,7 @@ func (m *MockState) SetForkHeight(arg0 uint64) error { } // SetForkHeight indicates an expected call of SetForkHeight. -func (mr *MockStateMockRecorder) SetForkHeight(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetForkHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetForkHeight", reflect.TypeOf((*MockState)(nil).SetForkHeight), arg0) } @@ -265,7 +267,7 @@ func (m *MockState) SetLastAccepted(arg0 ids.ID) error { } // SetLastAccepted indicates an expected call of SetLastAccepted. -func (mr *MockStateMockRecorder) SetLastAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetLastAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockState)(nil).SetLastAccepted), arg0) } diff --git a/vms/proposervm/state/state.go b/vms/proposervm/state/state.go index c8b80b947920..487e64f71f08 100644 --- a/vms/proposervm/state/state.go +++ b/vms/proposervm/state/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/proposervm/state/state_test.go b/vms/proposervm/state/state_test.go index 97980fc36b9b..9ef1e291e539 100644 --- a/vms/proposervm/state/state_test.go +++ b/vms/proposervm/state/state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/vms/proposervm/state_summary.go b/vms/proposervm/state_summary.go index 629d2c6491d1..f61c29d6f426 100644 --- a/vms/proposervm/state_summary.go +++ b/vms/proposervm/state_summary.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm diff --git a/vms/proposervm/state_syncable_vm.go b/vms/proposervm/state_syncable_vm.go index da86d8c36e5c..08a321cab7bb 100644 --- a/vms/proposervm/state_syncable_vm.go +++ b/vms/proposervm/state_syncable_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm diff --git a/vms/proposervm/state_syncable_vm_test.go b/vms/proposervm/state_syncable_vm_test.go index a4ee986fad2a..bdef0e7b3221 100644 --- a/vms/proposervm/state_syncable_vm_test.go +++ b/vms/proposervm/state_syncable_vm_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -29,6 +29,7 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/vms/proposervm/summary" statelessblock "github.com/ava-labs/avalanchego/vms/proposervm/block" @@ -80,16 +81,19 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { // create the VM vm := New( innerVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) - ctx := snow.DefaultContextTest() - ctx.NodeID = NodeIDFromCert(pTestCert) + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = pTestCert.NodeID require.NoError(vm.Initialize( context.Background(), @@ -183,9 +187,9 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { // store post fork block associated with summary innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - TimestampV: vm.Time(), - HeightV: innerSummary.Height(), + BytesV: []byte{1}, + ParentV: ids.GenerateTestID(), + HeightV: innerSummary.Height(), } innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(innerBlk.Bytes(), b) @@ -196,11 +200,10 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.ctx.NodeID, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -269,9 +272,9 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { // store post fork block associated with summary innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - TimestampV: vm.Time(), - HeightV: innerSummary.Height(), + BytesV: []byte{1}, + ParentV: ids.GenerateTestID(), + HeightV: innerSummary.Height(), } innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(innerBlk.Bytes(), b) @@ -282,11 +285,10 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.ctx.NodeID, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -358,9 +360,9 @@ func TestStateSyncGetStateSummary(t *testing.T) { // store post fork block associated with summary innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - TimestampV: vm.Time(), - HeightV: innerSummary.Height(), + BytesV: []byte{1}, + ParentV: ids.GenerateTestID(), + HeightV: innerSummary.Height(), } innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(innerBlk.Bytes(), b) @@ -371,11 +373,10 @@ func TestStateSyncGetStateSummary(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.ctx.NodeID, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -432,9 +433,9 @@ func TestParseStateSummary(t *testing.T) { // store post fork block associated with summary innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - TimestampV: vm.Time(), - HeightV: innerSummary.Height(), + BytesV: []byte{1}, + ParentV: ids.GenerateTestID(), + HeightV: innerSummary.Height(), } innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(innerBlk.Bytes(), b) @@ -445,11 +446,10 @@ func TestParseStateSummary(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.ctx.NodeID, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -492,20 +492,19 @@ func TestStateSummaryAccept(t *testing.T) { // store post fork block associated with summary innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - TimestampV: vm.Time(), - HeightV: innerSummary.Height(), + BytesV: []byte{1}, + ParentV: ids.GenerateTestID(), + HeightV: innerSummary.Height(), } slb, err := statelessblock.Build( vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.ctx.NodeID, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) @@ -565,9 +564,9 @@ func TestStateSummaryAcceptOlderBlock(t *testing.T) { // store post fork block associated with summary innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - TimestampV: vm.Time(), - HeightV: innerSummary.Height(), + BytesV: []byte{1}, + ParentV: ids.GenerateTestID(), + HeightV: innerSummary.Height(), } innerVM.GetStateSummaryF = func(_ context.Context, h uint64) (block.StateSummary, error) { require.Equal(reqHeight, h) @@ -582,11 +581,10 @@ func TestStateSummaryAcceptOlderBlock(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.ctx.NodeID, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -615,7 +613,11 @@ func TestNoStateSummariesServedWhileRepairingHeightIndex(t *testing.T) { require := require.New(t) // Note: by default proVM is built such that heightIndex will be considered complete - coreVM, _, proVM, _, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, _, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() diff --git a/vms/proposervm/summary/build.go b/vms/proposervm/summary/build.go index 35e2e179f0e3..516f9d1a9e72 100644 --- a/vms/proposervm/summary/build.go +++ b/vms/proposervm/summary/build.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary @@ -20,7 +20,7 @@ func Build( InnerSummary: coreSummary, } - bytes, err := c.Marshal(codecVersion, &summary) + bytes, err := Codec.Marshal(CodecVersion, &summary) if err != nil { return nil, fmt.Errorf("cannot marshal proposer summary due to: %w", err) } diff --git a/vms/proposervm/summary/build_test.go b/vms/proposervm/summary/build_test.go index 0e15ac3cd1d7..ad7e5df52748 100644 --- a/vms/proposervm/summary/build_test.go +++ b/vms/proposervm/summary/build_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary diff --git a/vms/proposervm/summary/codec.go b/vms/proposervm/summary/codec.go index a71350f37d0f..41a9eb9a37d0 100644 --- a/vms/proposervm/summary/codec.go +++ b/vms/proposervm/summary/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary @@ -6,23 +6,24 @@ package summary import ( "errors" "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" ) -const codecVersion = 0 +const CodecVersion = 0 var ( - c codec.Manager + Codec codec.Manager errWrongCodecVersion = errors.New("wrong codec version") ) func init() { - lc := linearcodec.NewCustomMaxLength(math.MaxUint32) - c = codec.NewManager(math.MaxInt32) - if err := c.RegisterCodec(codecVersion, lc); err != nil { + lc := linearcodec.NewDefault(time.Time{}) + Codec = codec.NewManager(math.MaxInt32) + if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { panic(err) } } diff --git a/vms/proposervm/summary/parse.go b/vms/proposervm/summary/parse.go index 3d9295444782..670bd43a8d77 100644 --- a/vms/proposervm/summary/parse.go +++ b/vms/proposervm/summary/parse.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary @@ -14,11 +14,11 @@ func Parse(bytes []byte) (StateSummary, error) { id: hashing.ComputeHash256Array(bytes), bytes: bytes, } - version, err := c.Unmarshal(bytes, &summary) + version, err := Codec.Unmarshal(bytes, &summary) if err != nil { return nil, fmt.Errorf("could not unmarshal summary due to: %w", err) } - if version != codecVersion { + if version != CodecVersion { return nil, errWrongCodecVersion } return &summary, nil diff --git a/vms/proposervm/summary/parse_test.go b/vms/proposervm/summary/parse_test.go index b22be83582e2..16fb2aec9f6b 100644 --- a/vms/proposervm/summary/parse_test.go +++ b/vms/proposervm/summary/parse_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary diff --git a/vms/proposervm/summary/state_summary.go b/vms/proposervm/summary/state_summary.go index 59269beb112c..14213a665fa6 100644 --- a/vms/proposervm/summary/state_summary.go +++ b/vms/proposervm/summary/state_summary.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary diff --git a/vms/proposervm/tree/tree.go b/vms/proposervm/tree/tree.go index 8d1e7333d32e..38125ba9d0e2 100644 --- a/vms/proposervm/tree/tree.go +++ b/vms/proposervm/tree/tree.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tree diff --git a/vms/proposervm/tree/tree_test.go b/vms/proposervm/tree/tree_test.go index 55b6e129341f..1e826e418c21 100644 --- a/vms/proposervm/tree/tree_test.go +++ b/vms/proposervm/tree/tree_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tree diff --git a/vms/proposervm/vm.go b/vms/proposervm/vm.go index 85f3388b11ae..c6bef64e86ad 100644 --- a/vms/proposervm/vm.go +++ b/vms/proposervm/vm.go @@ -1,11 +1,20 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2022, Chain4Travel AG. All rights reserved. +// +// This file is a derived work, based on ava-labs code whose +// original notices appear below. +// +// It is distributed under the same license conditions as the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********************************************************** +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( "context" - "crypto" "errors" "fmt" "time" @@ -26,7 +35,6 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/math" @@ -98,19 +106,11 @@ func cachedBlockSize(_ ids.ID, blk snowman.Block) int { type VM struct { block.ChainVM + Config blockBuilderVM block.BuildBlockWithContextChainVM batchedVM block.BatchedChainVM ssVM block.StateSyncableVM - activationTime time.Time - minimumPChainHeight uint64 - minBlkDelay time.Duration - numHistoricalBlocks uint64 - // block signer - stakingLeafSigner crypto.Signer - // block certificate - stakingCertLeaf *staking.Certificate - state.State hIndexer indexer.HeightIndexer @@ -150,28 +150,17 @@ type VM struct { // timestamps are only specific to the second. func New( vm block.ChainVM, - activationTime time.Time, - minimumPChainHeight uint64, - minBlkDelay time.Duration, - numHistoricalBlocks uint64, - stakingLeafSigner crypto.Signer, - stakingCertLeaf *staking.Certificate, + config Config, ) *VM { blockBuilderVM, _ := vm.(block.BuildBlockWithContextChainVM) batchedVM, _ := vm.(block.BatchedChainVM) ssVM, _ := vm.(block.StateSyncableVM) return &VM{ ChainVM: vm, + Config: config, blockBuilderVM: blockBuilderVM, batchedVM: batchedVM, ssVM: ssVM, - - activationTime: activationTime, - minimumPChainHeight: minimumPChainHeight, - minBlkDelay: minBlkDelay, - numHistoricalBlocks: numHistoricalBlocks, - stakingLeafSigner: stakingLeafSigner, - stakingCertLeaf: stakingCertLeaf, } } @@ -212,10 +201,10 @@ func (vm *VM) Initialize( vm.State = baseState vm.Windower = proposer.New(chainCtx.ValidatorState, chainCtx.SubnetID, chainCtx.ChainID) vm.Tree = tree.New() - innerBlkCache, err := metercacher.New[ids.ID, snowman.Block]( + innerBlkCache, err := metercacher.New( "inner_block_cache", registerer, - cache.NewSizedLRU[ids.ID, snowman.Block]( + cache.NewSizedLRU( innerBlkCacheSize, cachedBlockSize, ), @@ -366,18 +355,59 @@ func (vm *VM) SetPreference(ctx context.Context, preferred ids.ID) error { return err } - // reset scheduler - minDelay, err := vm.Windower.Delay(ctx, blk.Height()+1, pChainHeight, vm.ctx.NodeID) + var ( + childBlockHeight = blk.Height() + 1 + parentTimestamp = blk.Timestamp() + nextStartTime time.Time + ) + if vm.IsDurangoActivated(parentTimestamp) { + currentTime := vm.Clock.Time().Truncate(time.Second) + nextStartTime, err = vm.getPostDurangoSlotTime( + ctx, + childBlockHeight, + pChainHeight, + proposer.TimeToSlot(parentTimestamp, currentTime), + parentTimestamp, + ) + } else { + nextStartTime, err = vm.getPreDurangoSlotTime( + ctx, + childBlockHeight, + pChainHeight, + parentTimestamp, + ) + } if err != nil { vm.ctx.Log.Debug("failed to fetch the expected delay", zap.Error(err), ) + // A nil error is returned here because it is possible that // bootstrapping caused the last accepted block to move past the latest // P-chain height. This will cause building blocks to return an error // until the P-chain's height has advanced. return nil } + vm.Scheduler.SetBuildBlockTime(nextStartTime) + + vm.ctx.Log.Debug("set preference", + zap.Stringer("blkID", blk.ID()), + zap.Time("blockTimestamp", parentTimestamp), + zap.Time("nextStartTime", nextStartTime), + ) + return nil +} + +func (vm *VM) getPreDurangoSlotTime( + ctx context.Context, + blkHeight, + pChainHeight uint64, + parentTimestamp time.Time, +) (time.Time, error) { + delay, err := vm.Windower.Delay(ctx, blkHeight, pChainHeight, vm.ctx.NodeID, proposer.MaxBuildWindows) + if err != nil { + return time.Time{}, err + } // Note: The P-chain does not currently try to target any block time. It // notifies the consensus engine as soon as a new block may be built. To @@ -385,20 +415,39 @@ func (vm *VM) SetPreference(ctx context.Context, preferred ids.ID) error { // validators can specify. This delay may be an issue for high performance, // custom VMs. Until the P-chain is modified to target a specific block // time, ProposerMinBlockDelay can be configured in the subnet config. - if minDelay < vm.minBlkDelay { - minDelay = vm.minBlkDelay - } - - preferredTime := blk.Timestamp() - nextStartTime := preferredTime.Add(minDelay) - vm.Scheduler.SetBuildBlockTime(nextStartTime) + delay = math.Max(delay, vm.MinBlkDelay) + return parentTimestamp.Add(delay), nil +} - vm.ctx.Log.Debug("set preference", - zap.Stringer("blkID", blk.ID()), - zap.Time("blockTimestamp", preferredTime), - zap.Time("nextStartTime", nextStartTime), +func (vm *VM) getPostDurangoSlotTime( + ctx context.Context, + blkHeight, + pChainHeight, + slot uint64, + parentTimestamp time.Time, +) (time.Time, error) { + delay, err := vm.Windower.MinDelayForProposer( + ctx, + blkHeight, + pChainHeight, + vm.ctx.NodeID, + slot, ) - return nil + // Note: The P-chain does not currently try to target any block time. It + // notifies the consensus engine as soon as a new block may be built. To + // avoid fast runs of blocks there is an additional minimum delay that + // validators can specify. This delay may be an issue for high performance, + // custom VMs. Until the P-chain is modified to target a specific block + // time, ProposerMinBlockDelay can be configured in the subnet config. + switch { + case err == nil: + delay = math.Max(delay, vm.MinBlkDelay) + return parentTimestamp.Add(delay), err + case errors.Is(err, proposer.ErrAnyoneCanPropose): + return parentTimestamp.Add(vm.MinBlkDelay), err + default: + return time.Time{}, err + } } func (vm *VM) LastAccepted(ctx context.Context) (ids.ID, error) { @@ -430,7 +479,7 @@ func (vm *VM) repair(ctx context.Context) error { return err } - if vm.numHistoricalBlocks != 0 { + if vm.NumHistoricalBlocks != 0 { vm.ctx.Log.Fatal("block height index must be valid when pruning historical blocks") return errHeightIndexInvalidWhilePruning } @@ -681,7 +730,7 @@ func (vm *VM) setLastAcceptedMetadata(ctx context.Context) error { } func (vm *VM) parsePostForkBlock(ctx context.Context, b []byte) (PostForkBlock, error) { - statelessBlock, err := statelessblock.Parse(b) + statelessBlock, err := statelessblock.Parse(b, vm.DurangoTime) if err != nil { return nil, err } diff --git a/vms/proposervm/vm_byzantine_test.go b/vms/proposervm/vm_byzantine_test.go index c53830077bca..c9ad1b98c79b 100644 --- a/vms/proposervm/vm_byzantine_test.go +++ b/vms/proposervm/vm_byzantine_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -18,7 +18,6 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/vms/proposervm/block" - "github.com/ava-labs/avalanchego/vms/proposervm/proposer" ) // Ensure that a byzantine node issuing an invalid PreForkBlock (Y) when the @@ -33,8 +32,11 @@ import ( func TestInvalidByzantineProposerParent(t *testing.T) { require := require.New(t) - forkTime := time.Unix(0, 0) // enable ProBlks - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -44,10 +46,9 @@ func TestInvalidByzantineProposerParent(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - TimestampV: gBlock.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: gBlock.ID(), + HeightV: gBlock.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil @@ -67,10 +68,9 @@ func TestInvalidByzantineProposerParent(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: yBlockBytes, - ParentV: xBlock.ID(), - HeightV: xBlock.Height() + 1, - TimestampV: xBlock.Timestamp().Add(proposer.MaxDelay), + BytesV: yBlockBytes, + ParentV: xBlock.ID(), + HeightV: xBlock.Height() + 1, } coreVM.ParseBlockF = func(_ context.Context, blockBytes []byte) (snowman.Block, error) { @@ -103,7 +103,11 @@ func TestInvalidByzantineProposerParent(t *testing.T) { func TestInvalidByzantineProposerOracleParent(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) proVM.Set(coreGenBlk.Timestamp()) defer func() { require.NoError(proVM.Shutdown(context.Background())) @@ -116,9 +120,8 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { IDV: xBlockID, StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), }, opts: [2]snowman.Block{ &snowman.TestBlock{ @@ -126,18 +129,16 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: xBlockID, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: xBlockID, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: xBlockID, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: xBlockID, }, }, } @@ -211,8 +212,11 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { func TestInvalidByzantineProposerPreForkParent(t *testing.T) { require := require.New(t) - forkTime := time.Unix(0, 0) // enable ProBlks - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -222,10 +226,9 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - TimestampV: gBlock.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: gBlock.ID(), + HeightV: gBlock.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil @@ -237,10 +240,9 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: yBlockBytes, - ParentV: xBlock.ID(), - HeightV: xBlock.Height() + 1, - TimestampV: xBlock.Timestamp().Add(proposer.MaxDelay), + BytesV: yBlockBytes, + ParentV: xBlock.ID(), + HeightV: xBlock.Height() + 1, } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -300,7 +302,11 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) proVM.Set(coreGenBlk.Timestamp()) defer func() { require.NoError(proVM.Shutdown(context.Background())) @@ -312,9 +318,8 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), }, opts: [2]snowman.Block{ &snowman.TestBlock{ @@ -322,18 +327,16 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreGenBlk.ID(), // valid block should reference xBlock - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: coreGenBlk.ID(), // valid block should reference xBlock }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreGenBlk.ID(), // valid block should reference xBlock - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: coreGenBlk.ID(), // valid block should reference xBlock }, }, } @@ -399,7 +402,11 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { func TestBlockVerify_InvalidPostForkOption(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) proVM.Set(coreGenBlk.Timestamp()) defer func() { require.NoError(proVM.Shutdown(context.Background())) @@ -413,9 +420,8 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { IDV: xBlockID, StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), }, opts: [2]snowman.Block{ &snowman.TestBlock{ @@ -423,18 +429,16 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: xBlockID, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: xBlockID, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: xBlockID, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: xBlockID, }, }, } @@ -449,10 +453,9 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } ySlb, err := block.BuildUnsigned( @@ -529,9 +532,8 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { IDV: zBlockID, StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), }, opts: [2]snowman.Block{ &snowman.TestBlock{ @@ -539,18 +541,16 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: zBlockID, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: zBlockID, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: zBlockID, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: zBlockID, }, }, } @@ -586,7 +586,11 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { func TestGetBlock_MutatedSignature(t *testing.T) { require := require.New(t) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -609,10 +613,9 @@ func TestGetBlock_MutatedSignature(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreBlk1 := &snowman.TestBlock{ @@ -620,10 +623,9 @@ func TestGetBlock_MutatedSignature(t *testing.T) { IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreBlk0.ID(), - HeightV: coreBlk0.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: coreBlk0.ID(), + HeightV: coreBlk0.Height() + 1, } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -663,7 +665,7 @@ func TestGetBlock_MutatedSignature(t *testing.T) { require.NoError(proVM.SetPreference(context.Background(), builtBlk0.ID())) - // The second propsal block will need to be signed because the timestamp + // The second proposal block will need to be signed because the timestamp // hasn't moved forward // Craft what would be the next block, but with an invalid signature: diff --git a/vms/proposervm/vm_regression_test.go b/vms/proposervm/vm_regression_test.go index 0a27c43e112a..ac34df120641 100644 --- a/vms/proposervm/vm_regression_test.go +++ b/vms/proposervm/vm_regression_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/snowtest" ) func TestProposerVMInitializeShouldFailIfInnerVMCantVerifyItsHeightIndex(t *testing.T) { @@ -46,19 +47,23 @@ func TestProposerVMInitializeShouldFailIfInnerVMCantVerifyItsHeightIndex(t *test proVM := New( innerVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) + defer func() { // avoids leaking goroutines require.NoError(proVM.Shutdown(context.Background())) }() - ctx := snow.DefaultContextTest() + ctx := snowtest.Context(t, snowtest.CChainID) initialState := []byte("genesis state") err := proVM.Initialize( @@ -72,5 +77,5 @@ func TestProposerVMInitializeShouldFailIfInnerVMCantVerifyItsHeightIndex(t *test nil, nil, ) - require.ErrorIs(customError, err) + require.ErrorIs(err, customError) } diff --git a/vms/proposervm/vm_test.go b/vms/proposervm/vm_test.go index 50940633bfe3..3c1ae9d45977 100644 --- a/vms/proposervm/vm_test.go +++ b/vms/proposervm/vm_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -35,16 +35,14 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/proposervm/proposer" "github.com/ava-labs/avalanchego/vms/proposervm/state" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" statelessblock "github.com/ava-labs/avalanchego/vms/proposervm/block" ) @@ -59,7 +57,6 @@ type fullVM struct { } var ( - pTestNodeID ids.NodeID pTestSigner crypto.Signer pTestCert *staking.Certificate @@ -81,12 +78,7 @@ func init() { panic(err) } pTestSigner = tlsCert.PrivateKey.(crypto.Signer) - pTestCert = staking.CertificateFromX509(tlsCert.Leaf) - nodeIDBytes, err := secp256k1.RecoverSecp256PublicKey(tlsCert.Leaf) - if err != nil { - panic(err) - } - pTestNodeID, err = ids.ToNodeID(nodeIDBytes) + pTestCert, err = staking.CertificateFromX509(tlsCert.Leaf) if err != nil { panic(err) } @@ -95,6 +87,7 @@ func init() { func initTestProposerVM( t *testing.T, proBlkStartTime time.Time, + durangoTime time.Time, minPChainHeight uint64, ) ( *fullVM, @@ -155,12 +148,15 @@ func initTestProposerVM( proVM := New( coreVM, - proBlkStartTime, - minPChainHeight, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: proBlkStartTime, + DurangoTime: durangoTime, + MinimumPChainHeight: minPChainHeight, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) valState := &validators.TestState{ @@ -173,29 +169,34 @@ func initTestProposerVM( return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil } - ctx := snow.DefaultContextTest() - ctx.ChainID = ids.ID{1} - ctx.NodeID = NodeIDFromCert(pTestCert) + ctx := snowtest.Context(t, ids.ID{1}) + ctx.NodeID = pTestCert.NodeID ctx.ValidatorState = valState db := prefixdb.New([]byte{0}, memdb.New()) @@ -223,16 +224,49 @@ func initTestProposerVM( require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) + proVM.Set(coreGenBlk.Timestamp()) + return coreVM, valState, proVM, coreGenBlk, db } +func waitForProposerWindow(vm *VM, chainTip snowman.Block, pchainHeight uint64) error { + var ( + ctx = context.Background() + childBlockHeight = chainTip.Height() + 1 + parentTimestamp = chainTip.Timestamp() + ) + + for { + slot := proposer.TimeToSlot(parentTimestamp, vm.Clock.Time().Truncate(time.Second)) + delay, err := vm.MinDelayForProposer( + ctx, + childBlockHeight, + pchainHeight, + vm.ctx.NodeID, + slot, + ) + if err != nil { + return err + } + + vm.Clock.Set(parentTimestamp.Add(delay)) + if delay < proposer.MaxLookAheadWindow { + return nil + } + } +} + // VM.BuildBlock tests section func TestBuildBlockTimestampAreRoundedToSeconds(t *testing.T) { require := require.New(t) // given the same core block, BuildBlock returns the same proposer block - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -245,10 +279,9 @@ func TestBuildBlockTimestampAreRoundedToSeconds(t *testing.T) { IDV: ids.Empty.Prefix(111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil @@ -265,7 +298,11 @@ func TestBuildBlockIsIdempotent(t *testing.T) { require := require.New(t) // given the same core block, BuildBlock returns the same proposer block - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -275,10 +312,9 @@ func TestBuildBlockIsIdempotent(t *testing.T) { IDV: ids.Empty.Prefix(111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil @@ -300,7 +336,11 @@ func TestFirstProposerBlockIsBuiltOnTopOfGenesis(t *testing.T) { require := require.New(t) // setup - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -310,10 +350,9 @@ func TestFirstProposerBlockIsBuiltOnTopOfGenesis(t *testing.T) { IDV: ids.Empty.Prefix(111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil @@ -334,7 +373,11 @@ func TestFirstProposerBlockIsBuiltOnTopOfGenesis(t *testing.T) { func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -345,10 +388,9 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { IDV: ids.Empty.Prefix(111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil @@ -361,10 +403,9 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { IDV: ids.Empty.Prefix(222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil @@ -409,16 +450,15 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { IDV: ids.Empty.Prefix(333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: prefcoreBlk.ID(), - HeightV: prefcoreBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: prefcoreBlk.ID(), + HeightV: prefcoreBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } - proVM.Set(proVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proVM, proBlk2, proBlk2.(*postForkBlock).PChainHeight())) builtBlk, err := proVM.BuildBlock(context.Background()) require.NoError(err) @@ -429,7 +469,11 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -439,10 +483,9 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { IDV: ids.Empty.Prefix(111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil @@ -455,10 +498,9 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { IDV: ids.Empty.Prefix(222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil @@ -504,16 +546,15 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { IDV: ids.Empty.Prefix(333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: wronglyPreferredcoreBlk.ID(), - HeightV: wronglyPreferredcoreBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: wronglyPreferredcoreBlk.ID(), + HeightV: wronglyPreferredcoreBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } - proVM.Set(proVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proVM, proBlk2, proBlk2.(*postForkBlock).PChainHeight())) blk, err := proVM.BuildBlock(context.Background()) require.NoError(err) @@ -525,27 +566,29 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { func TestCoreBlockFailureCauseProposerBlockParseFailure(t *testing.T) { require := require.New(t) - coreVM, _, proVM, _, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, _, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - TimestampV: proVM.Time(), + BytesV: []byte{1}, } coreVM.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { return nil, errMarshallingFailed } slb, err := statelessblock.Build( proVM.preferred, - innerBlk.Timestamp(), + proVM.Time(), 100, // pChainHeight, - proVM.ctx.NodeID, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, innerBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) proBlk := postForkBlock{ @@ -565,32 +608,36 @@ func TestCoreBlockFailureCauseProposerBlockParseFailure(t *testing.T) { func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { require := require.New(t) - coreVM, _, proVM, gencoreBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, gencoreBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // create two Proposer blocks at the same height innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - ParentV: gencoreBlk.ID(), - HeightV: gencoreBlk.Height() + 1, - TimestampV: proVM.Time(), + BytesV: []byte{1}, + ParentV: gencoreBlk.ID(), + HeightV: gencoreBlk.Height() + 1, } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(innerBlk.Bytes(), b) return innerBlk, nil } + blkTimestamp := proVM.Time() + slb1, err := statelessblock.Build( proVM.preferred, - innerBlk.Timestamp(), + blkTimestamp, 100, // pChainHeight, - proVM.ctx.NodeID, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, innerBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) proBlk1 := postForkBlock{ @@ -604,13 +651,12 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { slb2, err := statelessblock.Build( proVM.preferred, - innerBlk.Timestamp(), + blkTimestamp, 200, // pChainHeight, - proVM.ctx.NodeID, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, innerBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) proBlk2 := postForkBlock{ @@ -638,17 +684,20 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // one block is built from this proVM localcoreBlk := &snowman.TestBlock{ - BytesV: []byte{111}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: genesisTimestamp, + BytesV: []byte{111}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return localcoreBlk, nil @@ -660,10 +709,9 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { // another block with same parent comes from network and is parsed netcoreBlk := &snowman.TestBlock{ - BytesV: []byte{222}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: genesisTimestamp, + BytesV: []byte{222}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { @@ -684,7 +732,7 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { netSlb, err := statelessblock.BuildUnsigned( proVM.preferred, - netcoreBlk.Timestamp(), + proVM.Time(), pChainHeight, netcoreBlk.Bytes(), ) @@ -706,7 +754,11 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { func TestPreFork_Initialize(t *testing.T) { require := require.New(t) - _, _, proVM, coreGenBlk, _ := initTestProposerVM(t, mockable.MaxTime, 0) // disable ProBlks + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + _, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -725,7 +777,11 @@ func TestPreFork_Initialize(t *testing.T) { func TestPreFork_BuildBlock(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, mockable.MaxTime, 0) // disable ProBlks + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -735,10 +791,9 @@ func TestPreFork_BuildBlock(t *testing.T) { IDV: ids.Empty.Prefix(333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{3}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil @@ -763,8 +818,11 @@ func TestPreFork_BuildBlock(t *testing.T) { func TestPreFork_ParseBlock(t *testing.T) { require := require.New(t) - // setup - coreVM, _, proVM, _, _ := initTestProposerVM(t, mockable.MaxTime, 0) // disable ProBlks + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, _, proVM, _, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -799,7 +857,11 @@ func TestPreFork_ParseBlock(t *testing.T) { func TestPreFork_SetPreference(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, mockable.MaxTime, 0) // disable ProBlks + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -898,12 +960,15 @@ func TestExpiredBuildBlock(t *testing.T) { proVM := New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + DurangoTime: mockable.MaxTime, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) valState := &validators.TestState{ @@ -916,16 +981,17 @@ func TestExpiredBuildBlock(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + nodeID := ids.BuildTestNodeID([]byte{1}) return map[ids.NodeID]*validators.GetValidatorOutput{ - {1}: { - NodeID: ids.NodeID{1}, + nodeID: { + NodeID: nodeID, Weight: 100, }, }, nil } - ctx := snow.DefaultContextTest() - ctx.NodeID = NodeIDFromCert(pTestCert) + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = pTestCert.NodeID ctx.ValidatorState = valState toEngine := make(chan common.Message, 1) @@ -969,29 +1035,27 @@ func TestExpiredBuildBlock(t *testing.T) { coreVM.InitializeF = nil require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) - // Make sure that passing a message works - toScheduler <- common.PendingTxs - <-toEngine - // Notify the proposer VM of a new block on the inner block side toScheduler <- common.PendingTxs + // The first notification will be read from the consensus engine + <-toEngine + // Before calling BuildBlock, verify a remote block and set it as the + // preferred block. coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } statelessBlock, err := statelessblock.BuildUnsigned( coreGenBlk.ID(), - coreBlk.Timestamp(), + proVM.Time(), 0, coreBlk.Bytes(), ) @@ -1024,7 +1088,6 @@ func TestExpiredBuildBlock(t *testing.T) { require.NoError(err) require.NoError(parsedBlock.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), parsedBlock.ID())) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { @@ -1032,17 +1095,18 @@ func TestExpiredBuildBlock(t *testing.T) { return nil, errUnexpectedCall } - // The first notification will be read from the consensus engine - <-toEngine - + // Because we are now building on a different block, the proposer window + // shouldn't have started. _, err = proVM.BuildBlock(context.Background()) require.ErrorIs(err, errProposerWindowNotStarted) - proVM.Set(statelessBlock.Timestamp().Add(proposer.MaxDelay)) + proVM.Set(statelessBlock.Timestamp().Add(proposer.MaxBuildDelay)) proVM.Scheduler.SetBuildBlockTime(time.Now()) // The engine should have been notified to attempt to build a block now that - // the window has started again + // the window has started again. This is to guarantee that the inner VM has + // build block called after it sent a pendingTxs message on its internal + // engine channel. <-toEngine } @@ -1069,7 +1133,11 @@ func (b *wrappedBlock) Verify(ctx context.Context) error { func TestInnerBlockDeduplication(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // disable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -1184,9 +1252,10 @@ func TestInnerVMRollback(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + nodeID := ids.BuildTestNodeID([]byte{1}) return map[ids.NodeID]*validators.GetValidatorOutput{ - {1}: { - NodeID: ids.NodeID{1}, + nodeID: { + NodeID: nodeID, Weight: 100, }, }, nil @@ -1215,8 +1284,8 @@ func TestInnerVMRollback(t *testing.T) { } } - ctx := snow.DefaultContextTest() - ctx.NodeID = NodeIDFromCert(pTestCert) + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = pTestCert.NodeID ctx.ValidatorState = valState coreVM.InitializeF = func( @@ -1240,12 +1309,15 @@ func TestInnerVMRollback(t *testing.T) { proVM := New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + DurangoTime: mockable.MaxTime, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) require.NoError(proVM.Initialize( @@ -1327,12 +1399,15 @@ func TestInnerVMRollback(t *testing.T) { proVM = New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + DurangoTime: mockable.MaxTime, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) require.NoError(proVM.Initialize( @@ -1364,7 +1439,11 @@ func TestInnerVMRollback(t *testing.T) { func TestBuildBlockDuringWindow(t *testing.T) { require := require.New(t) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -1383,24 +1462,22 @@ func TestBuildBlockDuringWindow(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreBlk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreBlk0.ID(), - HeightV: coreBlk0.Height() + 1, - TimestampV: coreBlk0.Timestamp(), + BytesV: []byte{2}, + ParentV: coreBlk0.ID(), + HeightV: coreBlk0.Height() + 1, } statelessBlock0, err := statelessblock.BuildUnsigned( coreGenBlk.ID(), - coreBlk0.Timestamp(), + proVM.Time(), 0, coreBlk0.Bytes(), ) @@ -1467,8 +1544,11 @@ func TestBuildBlockDuringWindow(t *testing.T) { func TestTwoForks_OneIsAccepted(t *testing.T) { require := require.New(t) - forkTime := time.Unix(0, 0) - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime + ) + coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -1479,10 +1559,9 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - TimestampV: gBlock.Timestamp(), + BytesV: []byte{1}, + ParentV: gBlock.ID(), + HeightV: gBlock.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { @@ -1499,15 +1578,14 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - TimestampV: gBlock.Timestamp(), + BytesV: []byte{2}, + ParentV: gBlock.ID(), + HeightV: gBlock.Height() + 1, } ySlb, err := statelessblock.BuildUnsigned( gBlock.ID(), - gBlock.Timestamp(), + proVM.Time(), defaultPChainHeight, yBlock.Bytes(), ) @@ -1530,16 +1608,16 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: yBlock.ID(), - HeightV: yBlock.Height() + 1, - TimestampV: yBlock.Timestamp(), + BytesV: []byte{3}, + ParentV: yBlock.ID(), + HeightV: yBlock.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return zBlock, nil } require.NoError(proVM.SetPreference(context.Background(), bBlock.ID())) + proVM.Set(proVM.Time().Add(proposer.MaxBuildDelay)) cBlock, err := proVM.BuildBlock(context.Background()) require.NoError(err) coreVM.BuildBlockF = nil @@ -1563,8 +1641,11 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { func TestTooFarAdvanced(t *testing.T) { require := require.New(t) - forkTime := time.Unix(0, 0) - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime + ) + coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -1620,7 +1701,7 @@ func TestTooFarAdvanced(t *testing.T) { ySlb, err = statelessblock.BuildUnsigned( aBlock.ID(), - aBlock.Timestamp().Add(proposer.MaxDelay), + aBlock.Timestamp().Add(proposer.MaxVerifyDelay), defaultPChainHeight, yBlock.Bytes(), ) @@ -1654,8 +1735,11 @@ func TestTooFarAdvanced(t *testing.T) { func TestTwoOptions_OneIsAccepted(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -1729,8 +1813,11 @@ func TestTwoOptions_OneIsAccepted(t *testing.T) { func TestLaggedPChainHeight(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -1819,12 +1906,15 @@ func TestRejectedHeightNotIndexed(t *testing.T) { proVM := New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) valState := &validators.TestState{ @@ -1837,28 +1927,34 @@ func TestRejectedHeightNotIndexed(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil } - ctx := snow.DefaultContextTest() - ctx.NodeID = NodeIDFromCert(pTestCert) + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = pTestCert.NodeID ctx.ValidatorState = valState require.NoError(proVM.Initialize( @@ -2020,12 +2116,15 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { proVM := New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) valState := &validators.TestState{ @@ -2038,28 +2137,34 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil } - ctx := snow.DefaultContextTest() - ctx.NodeID = NodeIDFromCert(pTestCert) + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = pTestCert.NodeID ctx.ValidatorState = valState require.NoError(proVM.Initialize( @@ -2174,15 +2279,18 @@ func TestVMInnerBlkCache(t *testing.T) { ctrl := gomock.NewController(t) // Create a VM - innerVM := mocks.NewMockChainVM(ctrl) + innerVM := block.NewMockChainVM(ctrl) vm := New( innerVM, - time.Time{}, // fork is active - 0, // minimum P-Chain height - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) innerVM.EXPECT().Initialize( @@ -2206,8 +2314,8 @@ func TestVMInnerBlkCache(t *testing.T) { innerVM.EXPECT().GetBlock(gomock.Any(), innerBlkID).Return(innerBlk, nil) } - ctx := snow.DefaultContextTest() - ctx.NodeID = NodeIDFromCert(pTestCert) + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = pTestCert.NodeID require.NoError(vm.Initialize( context.Background(), @@ -2233,11 +2341,10 @@ func TestVMInnerBlkCache(t *testing.T) { ids.GenerateTestID(), // parent time.Time{}, // timestamp 1, // pChainHeight, - vm.ctx.NodeID, - vm.stakingCertLeaf, // cert + vm.StakingCertLeaf, // cert blkNearTipInnerBytes, // inner blk bytes vm.ctx.ChainID, // chain ID - vm.stakingLeafSigner, // key + vm.StakingLeafSigner, // key ) require.NoError(err) @@ -2277,8 +2384,11 @@ func TestVMInnerBlkCache(t *testing.T) { func TestVMInnerBlkCacheDeduplicationRegression(t *testing.T) { require := require.New(t) - forkTime := time.Unix(0, 0) - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -2353,8 +2463,11 @@ func TestVMInnerBlkCacheDeduplicationRegression(t *testing.T) { func TestVMInnerBlkMarkedAcceptedRegression(t *testing.T) { require := require.New(t) - forkTime := time.Unix(0, 0) - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -2393,7 +2506,7 @@ func TestVMInnerBlkMarkedAcceptedRegression(t *testing.T) { type blockWithVerifyContext struct { *snowman.MockBlock - *mocks.MockWithVerifyContext + *block.MockWithVerifyContext } // Ensures that we call [VerifyWithContext] rather than [Verify] on blocks that @@ -2404,15 +2517,18 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { ctrl := gomock.NewController(t) // Create a VM - innerVM := mocks.NewMockChainVM(ctrl) + innerVM := block.NewMockChainVM(ctrl) vm := New( innerVM, - time.Time{}, // fork is active - 0, // minimum P-Chain height - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) // make sure that DBs are compressed correctly @@ -2439,8 +2555,8 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { innerVM.EXPECT().GetBlock(gomock.Any(), innerBlkID).Return(innerBlk, nil) } - snowCtx := snow.DefaultContextTest() - snowCtx.NodeID = NodeIDFromCert(pTestCert) + snowCtx := snowtest.Context(t, snowtest.CChainID) + snowCtx.NodeID = pTestCert.NodeID require.NoError(vm.Initialize( context.Background(), @@ -2461,7 +2577,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { pChainHeight := uint64(0) innerBlk := blockWithVerifyContext{ MockBlock: snowman.NewMockBlock(ctrl), - MockWithVerifyContext: mocks.NewMockWithVerifyContext(ctrl), + MockWithVerifyContext: block.NewMockWithVerifyContext(ctrl), } innerBlk.MockWithVerifyContext.EXPECT().ShouldVerifyWithContext(gomock.Any()).Return(true, nil).Times(2) innerBlk.MockWithVerifyContext.EXPECT().VerifyWithContext(context.Background(), @@ -2509,7 +2625,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { // false for ShouldVerifyWithContext innerBlk := blockWithVerifyContext{ MockBlock: snowman.NewMockBlock(ctrl), - MockWithVerifyContext: mocks.NewMockWithVerifyContext(ctrl), + MockWithVerifyContext: block.NewMockWithVerifyContext(ctrl), } innerBlk.MockWithVerifyContext.EXPECT().ShouldVerifyWithContext(gomock.Any()).Return(false, nil) innerBlk.MockBlock.EXPECT().Verify(gomock.Any()).Return(nil) @@ -2532,7 +2648,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { // Ensure we call Verify on a block that doesn't have a valid context innerBlk := blockWithVerifyContext{ MockBlock: snowman.NewMockBlock(ctrl), - MockWithVerifyContext: mocks.NewMockWithVerifyContext(ctrl), + MockWithVerifyContext: block.NewMockWithVerifyContext(ctrl), } innerBlk.MockBlock.EXPECT().Verify(gomock.Any()).Return(nil) innerBlk.MockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() @@ -2598,8 +2714,8 @@ func TestHistoricalBlockDeletion(t *testing.T) { }, } - ctx := snow.DefaultContextTest() - ctx.NodeID = NodeIDFromCert(pTestCert) + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = pTestCert.NodeID ctx.ValidatorState = &validators.TestState{ T: t, GetMinimumHeightF: func(context.Context) (uint64, error) { @@ -2618,12 +2734,15 @@ func TestHistoricalBlockDeletion(t *testing.T) { proVM := New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: mockable.MaxTime, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) require.NoError(proVM.Initialize( @@ -2716,12 +2835,15 @@ func TestHistoricalBlockDeletion(t *testing.T) { numHistoricalBlocks := uint64(2) proVM = New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - numHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + DurangoTime: mockable.MaxTime, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: numHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) require.NoError(proVM.Initialize( @@ -2758,12 +2880,15 @@ func TestHistoricalBlockDeletion(t *testing.T) { newNumHistoricalBlocks := numHistoricalBlocks + 2 proVM = New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - newNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + DurangoTime: mockable.MaxTime, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: newNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) require.NoError(proVM.Initialize( @@ -2802,9 +2927,3 @@ func TestHistoricalBlockDeletion(t *testing.T) { issueBlock() requireNumHeights(newNumHistoricalBlocks) } - -func NodeIDFromCert(cert *staking.Certificate) ids.NodeID { - return hashing.ComputeHash160Array( - hashing.ComputeHash256(cert.Raw), - ) -} diff --git a/vms/registry/mock_vm_getter.go b/vms/registry/mock_vm_getter.go index 52a1d67fc61a..30c38f1b6a74 100644 --- a/vms/registry/mock_vm_getter.go +++ b/vms/registry/mock_vm_getter.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/registry (interfaces: VMGetter) +// +// Generated by this command: +// +// mockgen -package=registry -destination=vms/registry/mock_vm_getter.go github.com/ava-labs/avalanchego/vms/registry VMGetter +// // Package registry is a generated GoMock package. package registry diff --git a/vms/registry/mock_vm_registerer.go b/vms/registry/mock_vm_registerer.go deleted file mode 100644 index 563893d765ed..000000000000 --- a/vms/registry/mock_vm_registerer.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/registry (interfaces: VMRegisterer) - -// Package registry is a generated GoMock package. -package registry - -import ( - context "context" - reflect "reflect" - - ids "github.com/ava-labs/avalanchego/ids" - vms "github.com/ava-labs/avalanchego/vms" - gomock "go.uber.org/mock/gomock" -) - -// MockVMRegisterer is a mock of VMRegisterer interface. -type MockVMRegisterer struct { - ctrl *gomock.Controller - recorder *MockVMRegistererMockRecorder -} - -// MockVMRegistererMockRecorder is the mock recorder for MockVMRegisterer. -type MockVMRegistererMockRecorder struct { - mock *MockVMRegisterer -} - -// NewMockVMRegisterer creates a new mock instance. -func NewMockVMRegisterer(ctrl *gomock.Controller) *MockVMRegisterer { - mock := &MockVMRegisterer{ctrl: ctrl} - mock.recorder = &MockVMRegistererMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockVMRegisterer) EXPECT() *MockVMRegistererMockRecorder { - return m.recorder -} - -// Register mocks base method. -func (m *MockVMRegisterer) Register(arg0 context.Context, arg1 ids.ID, arg2 vms.Factory) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Register", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// Register indicates an expected call of Register. -func (mr *MockVMRegistererMockRecorder) Register(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockVMRegisterer)(nil).Register), arg0, arg1, arg2) -} - -// RegisterWithReadLock mocks base method. -func (m *MockVMRegisterer) RegisterWithReadLock(arg0 context.Context, arg1 ids.ID, arg2 vms.Factory) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterWithReadLock", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// RegisterWithReadLock indicates an expected call of RegisterWithReadLock. -func (mr *MockVMRegistererMockRecorder) RegisterWithReadLock(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterWithReadLock", reflect.TypeOf((*MockVMRegisterer)(nil).RegisterWithReadLock), arg0, arg1, arg2) -} diff --git a/vms/registry/mock_vm_registry.go b/vms/registry/mock_vm_registry.go index 32360ab1b169..43efd85a6015 100644 --- a/vms/registry/mock_vm_registry.go +++ b/vms/registry/mock_vm_registry.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/registry (interfaces: VMRegistry) +// +// Generated by this command: +// +// mockgen -package=registry -destination=vms/registry/mock_vm_registry.go github.com/ava-labs/avalanchego/vms/registry VMRegistry +// // Package registry is a generated GoMock package. package registry @@ -49,23 +51,7 @@ func (m *MockVMRegistry) Reload(arg0 context.Context) ([]ids.ID, map[ids.ID]erro } // Reload indicates an expected call of Reload. -func (mr *MockVMRegistryMockRecorder) Reload(arg0 interface{}) *gomock.Call { +func (mr *MockVMRegistryMockRecorder) Reload(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reload", reflect.TypeOf((*MockVMRegistry)(nil).Reload), arg0) } - -// ReloadWithReadLock mocks base method. -func (m *MockVMRegistry) ReloadWithReadLock(arg0 context.Context) ([]ids.ID, map[ids.ID]error, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReloadWithReadLock", arg0) - ret0, _ := ret[0].([]ids.ID) - ret1, _ := ret[1].(map[ids.ID]error) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// ReloadWithReadLock indicates an expected call of ReloadWithReadLock. -func (mr *MockVMRegistryMockRecorder) ReloadWithReadLock(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReloadWithReadLock", reflect.TypeOf((*MockVMRegistry)(nil).ReloadWithReadLock), arg0) -} diff --git a/vms/registry/vm_getter.go b/vms/registry/vm_getter.go index 5115af9e635f..826624744e38 100644 --- a/vms/registry/vm_getter.go +++ b/vms/registry/vm_getter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package registry diff --git a/vms/registry/vm_getter_test.go b/vms/registry/vm_getter_test.go index 9cea55b2c82f..30bab4232be9 100644 --- a/vms/registry/vm_getter_test.go +++ b/vms/registry/vm_getter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package registry diff --git a/vms/registry/vm_registerer.go b/vms/registry/vm_registerer.go deleted file mode 100644 index 785b2b8cf828..000000000000 --- a/vms/registry/vm_registerer.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package registry - -import ( - "context" - "errors" - "fmt" - "net/http" - "path" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/api/server" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" -) - -var ( - _ VMRegisterer = (*vmRegisterer)(nil) - - errNotVM = errors.New("not a VM") -) - -// VMRegisterer defines functionality to install a virtual machine. -type VMRegisterer interface { - registerer - // RegisterWithReadLock installs the VM assuming that the http read-lock is - // held. - RegisterWithReadLock(context.Context, ids.ID, vms.Factory) error -} - -type registerer interface { - // Register installs the VM. - Register(context.Context, ids.ID, vms.Factory) error -} - -// VMRegistererConfig configures settings for VMRegisterer. -type VMRegistererConfig struct { - APIServer server.Server - Log logging.Logger - VMFactoryLog logging.Logger - VMManager vms.Manager -} - -type vmRegisterer struct { - config VMRegistererConfig -} - -// NewVMRegisterer returns an instance of VMRegisterer -func NewVMRegisterer(config VMRegistererConfig) VMRegisterer { - return &vmRegisterer{ - config: config, - } -} - -func (r *vmRegisterer) Register(ctx context.Context, vmID ids.ID, factory vms.Factory) error { - return r.register(ctx, r.config.APIServer, vmID, factory) -} - -func (r *vmRegisterer) RegisterWithReadLock(ctx context.Context, vmID ids.ID, factory vms.Factory) error { - return r.register(ctx, server.PathWriterFromWithReadLock(r.config.APIServer), vmID, factory) -} - -func (r *vmRegisterer) register(ctx context.Context, pathAdder server.PathAdder, vmID ids.ID, factory vms.Factory) error { - if err := r.config.VMManager.RegisterFactory(ctx, vmID, factory); err != nil { - return err - } - handlers, err := r.createStaticHandlers(ctx, vmID, factory) - if err != nil { - return err - } - - // all static endpoints go to the vm endpoint, defaulting to the vm id - defaultEndpoint := path.Join(constants.VMAliasPrefix, vmID.String()) - - if err := r.createStaticEndpoints(pathAdder, handlers, defaultEndpoint); err != nil { - return err - } - urlAliases, err := r.getURLAliases(vmID, defaultEndpoint) - if err != nil { - return err - } - return pathAdder.AddAliases(defaultEndpoint, urlAliases...) -} - -// Creates a dedicated VM instance for the sole purpose of serving the static -// handlers. -func (r *vmRegisterer) createStaticHandlers( - ctx context.Context, - vmID ids.ID, - factory vms.Factory, -) (map[string]http.Handler, error) { - vm, err := factory.New(r.config.VMFactoryLog) - if err != nil { - return nil, err - } - - commonVM, ok := vm.(common.VM) - if !ok { - return nil, fmt.Errorf("%s is %w", vmID, errNotVM) - } - - handlers, err := commonVM.CreateStaticHandlers(ctx) - if err != nil { - r.config.Log.Error("failed to create static API endpoints", - zap.Stringer("vmID", vmID), - zap.Error(err), - ) - - if err := commonVM.Shutdown(ctx); err != nil { - return nil, fmt.Errorf("shutting down VM errored with: %w", err) - } - return nil, err - } - return handlers, nil -} - -func (r *vmRegisterer) createStaticEndpoints(pathAdder server.PathAdder, handlers map[string]http.Handler, defaultEndpoint string) error { - // register the static endpoints - for extension, service := range handlers { - r.config.Log.Verbo("adding static API endpoint", - zap.String("endpoint", defaultEndpoint), - zap.String("extension", extension), - ) - if err := pathAdder.AddRoute(service, defaultEndpoint, extension); err != nil { - return fmt.Errorf( - "failed to add static API endpoint %s%s: %w", - defaultEndpoint, - extension, - err, - ) - } - } - return nil -} - -func (r vmRegisterer) getURLAliases(vmID ids.ID, defaultEndpoint string) ([]string, error) { - aliases, err := r.config.VMManager.Aliases(vmID) - if err != nil { - return nil, err - } - - var urlAliases []string - for _, alias := range aliases { - urlAlias := path.Join(constants.VMAliasPrefix, alias) - if urlAlias != defaultEndpoint { - urlAliases = append(urlAliases, urlAlias) - } - } - return urlAliases, err -} - -type readRegisterer struct { - registerer VMRegisterer -} - -func (r readRegisterer) Register(ctx context.Context, vmID ids.ID, factory vms.Factory) error { - return r.registerer.RegisterWithReadLock(ctx, vmID, factory) -} diff --git a/vms/registry/vm_registerer_test.go b/vms/registry/vm_registerer_test.go deleted file mode 100644 index baaa569a89ba..000000000000 --- a/vms/registry/vm_registerer_test.go +++ /dev/null @@ -1,446 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package registry - -import ( - "context" - "net/http" - "path" - "testing" - - "github.com/stretchr/testify/require" - - "go.uber.org/mock/gomock" - - "github.com/ava-labs/avalanchego/api/server" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" -) - -var id = ids.GenerateTestID() - -// Register should succeed even if we can't register a VM -func TestRegisterRegisterVMFails(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - - // We fail to register the VM - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(errTest) - - err := resources.registerer.Register(context.Background(), id, vmFactory) - require.ErrorIs(t, err, errTest) -} - -// Tests Register if a VM doesn't actually implement VM. -func TestRegisterBadVM(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := "this is not a vm..." - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - // Since this factory produces a bad vm, we should get an error. - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - - err := resources.registerer.Register(context.Background(), id, vmFactory) - require.ErrorIs(t, err, errNotVM) -} - -// Tests Register if creating endpoints for a VM fails + shutdown fails -func TestRegisterCreateHandlersAndShutdownFails(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - // We fail to create handlers + fail to shutdown - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) - vm.EXPECT().Shutdown(gomock.Any()).Return(errTest).Times(1) - - err := resources.registerer.Register(context.Background(), id, vmFactory) - require.ErrorIs(t, err, errTest) -} - -// Tests Register if creating endpoints for a VM fails + shutdown succeeds -func TestRegisterCreateHandlersFails(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - // We fail to create handlers + but succeed our shutdown - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) - vm.EXPECT().Shutdown(gomock.Any()).Return(nil).Times(1) - - err := resources.registerer.Register(context.Background(), id, vmFactory) - require.ErrorIs(t, err, errTest) -} - -// Tests Register if we fail to register the new endpoint on the server. -func TestRegisterAddRouteFails(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]http.Handler{ - "foo": nil, - } - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - // We fail to create an endpoint for the handler - resources.mockServer.EXPECT(). - AddRoute( - handlers["foo"], - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(errTest) - - err := resources.registerer.Register(context.Background(), id, vmFactory) - require.ErrorIs(t, err, errTest) -} - -// Tests Register we can't find the alias for the newly registered vm -func TestRegisterAliasLookupFails(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]http.Handler{ - "foo": nil, - } - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - // Registering the route fails - resources.mockServer.EXPECT(). - AddRoute( - handlers["foo"], - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(nil) - resources.mockManager.EXPECT().Aliases(id).Times(1).Return(nil, errTest) - - err := resources.registerer.Register(context.Background(), id, vmFactory) - require.ErrorIs(t, err, errTest) -} - -// Tests Register if adding aliases for the newly registered vm fails -func TestRegisterAddAliasesFails(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]http.Handler{ - "foo": nil, - } - aliases := []string{"alias-1", "alias-2"} - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - resources.mockServer.EXPECT(). - AddRoute( - handlers["foo"], - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(nil) - resources.mockManager.EXPECT().Aliases(id).Times(1).Return(aliases, nil) - // Adding aliases fails - resources.mockServer.EXPECT(). - AddAliases( - path.Join(constants.VMAliasPrefix, id.String()), - path.Join(constants.VMAliasPrefix, aliases[0]), - path.Join(constants.VMAliasPrefix, aliases[1]), - ). - Return(errTest) - - err := resources.registerer.Register(context.Background(), id, vmFactory) - require.ErrorIs(t, err, errTest) -} - -// Tests Register if no errors are thrown -func TestRegisterHappyCase(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]http.Handler{ - "foo": nil, - } - aliases := []string{"alias-1", "alias-2"} - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - resources.mockServer.EXPECT(). - AddRoute( - handlers["foo"], - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(nil) - resources.mockManager.EXPECT().Aliases(id).Times(1).Return(aliases, nil) - resources.mockServer.EXPECT(). - AddAliases( - path.Join(constants.VMAliasPrefix, id.String()), - path.Join(constants.VMAliasPrefix, aliases[0]), - path.Join(constants.VMAliasPrefix, aliases[1]), - ). - Times(1). - Return(nil) - - require.NoError(t, resources.registerer.Register(context.Background(), id, vmFactory)) -} - -// RegisterWithReadLock should succeed even if we can't register a VM -func TestRegisterWithReadLockRegisterVMFails(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - - // We fail to register the VM - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(errTest) - - err := resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory) - require.ErrorIs(t, err, errTest) -} - -// Tests RegisterWithReadLock if a VM doesn't actually implement VM. -func TestRegisterWithReadLockBadVM(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := "this is not a vm..." - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - // Since this factory produces a bad vm, we should get an error. - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - - err := resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory) - require.ErrorIs(t, err, errNotVM) -} - -// Tests RegisterWithReadLock if creating endpoints for a VM fails + shutdown fails -func TestRegisterWithReadLockCreateHandlersAndShutdownFails(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - // We fail to create handlers + fail to shutdown - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) - vm.EXPECT().Shutdown(gomock.Any()).Return(errTest).Times(1) - - err := resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory) - require.ErrorIs(t, err, errTest) -} - -// Tests RegisterWithReadLock if creating endpoints for a VM fails + shutdown succeeds -func TestRegisterWithReadLockCreateHandlersFails(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - // We fail to create handlers + but succeed our shutdown - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) - vm.EXPECT().Shutdown(gomock.Any()).Return(nil).Times(1) - - err := resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory) - require.ErrorIs(t, err, errTest) -} - -// Tests RegisterWithReadLock if we fail to register the new endpoint on the server. -func TestRegisterWithReadLockAddRouteWithReadLockFails(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]http.Handler{ - "foo": nil, - } - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - // We fail to create an endpoint for the handler - resources.mockServer.EXPECT(). - AddRouteWithReadLock( - handlers["foo"], - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(errTest) - - err := resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory) - require.ErrorIs(t, err, errTest) -} - -// Tests RegisterWithReadLock we can't find the alias for the newly registered vm -func TestRegisterWithReadLockAliasLookupFails(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]http.Handler{ - "foo": nil, - } - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - // RegisterWithReadLocking the route fails - resources.mockServer.EXPECT(). - AddRouteWithReadLock( - handlers["foo"], - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(nil) - resources.mockManager.EXPECT().Aliases(id).Times(1).Return(nil, errTest) - - err := resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory) - require.ErrorIs(t, err, errTest) -} - -// Tests RegisterWithReadLock if adding aliases for the newly registered vm fails -func TestRegisterWithReadLockAddAliasesFails(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]http.Handler{ - "foo": nil, - } - aliases := []string{"alias-1", "alias-2"} - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - resources.mockServer.EXPECT(). - AddRouteWithReadLock( - handlers["foo"], - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(nil) - resources.mockManager.EXPECT().Aliases(id).Times(1).Return(aliases, nil) - // Adding aliases fails - resources.mockServer.EXPECT(). - AddAliasesWithReadLock( - path.Join(constants.VMAliasPrefix, id.String()), - path.Join(constants.VMAliasPrefix, aliases[0]), - path.Join(constants.VMAliasPrefix, aliases[1]), - ). - Return(errTest) - - err := resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory) - require.ErrorIs(t, err, errTest) -} - -// Tests RegisterWithReadLock if no errors are thrown -func TestRegisterWithReadLockHappyCase(t *testing.T) { - resources := initRegistererTest(t) - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]http.Handler{ - "foo": nil, - } - aliases := []string{"alias-1", "alias-2"} - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - resources.mockServer.EXPECT(). - AddRouteWithReadLock( - handlers["foo"], - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(nil) - resources.mockManager.EXPECT().Aliases(id).Times(1).Return(aliases, nil) - resources.mockServer.EXPECT(). - AddAliasesWithReadLock( - path.Join(constants.VMAliasPrefix, id.String()), - path.Join(constants.VMAliasPrefix, aliases[0]), - path.Join(constants.VMAliasPrefix, aliases[1]), - ). - Times(1). - Return(nil) - - require.NoError(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory)) -} - -type vmRegistererTestResources struct { - ctrl *gomock.Controller - mockManager *vms.MockManager - mockServer *server.MockServer - mockLogger *logging.MockLogger - registerer VMRegisterer -} - -func initRegistererTest(t *testing.T) *vmRegistererTestResources { - ctrl := gomock.NewController(t) - - mockManager := vms.NewMockManager(ctrl) - mockServer := server.NewMockServer(ctrl) - mockLog := logging.NewMockLogger(ctrl) - - registerer := NewVMRegisterer(VMRegistererConfig{ - APIServer: mockServer, - Log: mockLog, - VMFactoryLog: logging.NoLog{}, - VMManager: mockManager, - }) - - mockLog.EXPECT().Error(gomock.Any(), gomock.Any()).AnyTimes() - mockLog.EXPECT().Warn(gomock.Any(), gomock.Any()).AnyTimes() - mockLog.EXPECT().Info(gomock.Any(), gomock.Any()).AnyTimes() - mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).AnyTimes() - mockLog.EXPECT().Trace(gomock.Any(), gomock.Any()).AnyTimes() - mockLog.EXPECT().Verbo(gomock.Any(), gomock.Any()).AnyTimes() - - return &vmRegistererTestResources{ - ctrl: ctrl, - mockManager: mockManager, - mockServer: mockServer, - mockLogger: mockLog, - registerer: registerer, - } -} diff --git a/vms/registry/vm_registry.go b/vms/registry/vm_registry.go index dd6f96d4e719..1374c4d46b8e 100644 --- a/vms/registry/vm_registry.go +++ b/vms/registry/vm_registry.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package registry @@ -7,6 +7,7 @@ import ( "context" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms" ) var _ VMRegistry = (*vmRegistry)(nil) @@ -16,15 +17,12 @@ var _ VMRegistry = (*vmRegistry)(nil) type VMRegistry interface { // Reload installs all non-installed vms on the node. Reload(ctx context.Context) ([]ids.ID, map[ids.ID]error, error) - // ReloadWithReadLock installs all non-installed vms on the node assuming - // the http read lock is currently held. - ReloadWithReadLock(ctx context.Context) ([]ids.ID, map[ids.ID]error, error) } // VMRegistryConfig defines configurations for VMRegistry type VMRegistryConfig struct { - VMGetter VMGetter - VMRegisterer VMRegisterer + VMGetter VMGetter + VMManager vms.Manager } type vmRegistry struct { @@ -39,16 +37,6 @@ func NewVMRegistry(config VMRegistryConfig) VMRegistry { } func (r *vmRegistry) Reload(ctx context.Context) ([]ids.ID, map[ids.ID]error, error) { - return r.reload(ctx, r.config.VMRegisterer) -} - -func (r *vmRegistry) ReloadWithReadLock(ctx context.Context) ([]ids.ID, map[ids.ID]error, error) { - return r.reload(ctx, readRegisterer{ - registerer: r.config.VMRegisterer, - }) -} - -func (r *vmRegistry) reload(ctx context.Context, registerer registerer) ([]ids.ID, map[ids.ID]error, error) { _, unregisteredVMs, err := r.config.VMGetter.Get() if err != nil { return nil, nil, err @@ -58,7 +46,7 @@ func (r *vmRegistry) reload(ctx context.Context, registerer registerer) ([]ids.I failedVMs := make(map[ids.ID]error) for vmID, factory := range unregisteredVMs { - if err := registerer.Register(ctx, vmID, factory); err != nil { + if err := r.config.VMManager.RegisterFactory(ctx, vmID, factory); err != nil { failedVMs[vmID] = err continue } diff --git a/vms/registry/vm_registry_test.go b/vms/registry/vm_registry_test.go index ecda1c4f5546..12e39a7c29c9 100644 --- a/vms/registry/vm_registry_test.go +++ b/vms/registry/vm_registry_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package registry @@ -47,12 +47,12 @@ func TestReload_Success(t *testing.T) { Get(). Times(1). Return(registeredVms, unregisteredVms, nil) - resources.mockVMRegisterer.EXPECT(). - Register(gomock.Any(), id3, factory3). + resources.mockVMManager.EXPECT(). + RegisterFactory(gomock.Any(), id3, factory3). Times(1). Return(nil) - resources.mockVMRegisterer.EXPECT(). - Register(gomock.Any(), id4, factory4). + resources.mockVMManager.EXPECT(). + RegisterFactory(gomock.Any(), id4, factory4). Times(1). Return(nil) @@ -101,12 +101,12 @@ func TestReload_PartialRegisterFailure(t *testing.T) { Get(). Times(1). Return(registeredVms, unregisteredVms, nil) - resources.mockVMRegisterer.EXPECT(). - Register(gomock.Any(), id3, factory3). + resources.mockVMManager.EXPECT(). + RegisterFactory(gomock.Any(), id3, factory3). Times(1). Return(errTest) - resources.mockVMRegisterer.EXPECT(). - Register(gomock.Any(), id4, factory4). + resources.mockVMManager.EXPECT(). + RegisterFactory(gomock.Any(), id4, factory4). Times(1). Return(nil) @@ -118,126 +118,30 @@ func TestReload_PartialRegisterFailure(t *testing.T) { require.Equal(id4, installedVMs[0]) } -// Tests the happy case where Reload succeeds. -func TestReloadWithReadLock_Success(t *testing.T) { - require := require.New(t) - - resources := initVMRegistryTest(t) - - factory1 := vms.NewMockFactory(resources.ctrl) - factory2 := vms.NewMockFactory(resources.ctrl) - factory3 := vms.NewMockFactory(resources.ctrl) - factory4 := vms.NewMockFactory(resources.ctrl) - - registeredVms := map[ids.ID]vms.Factory{ - id1: factory1, - id2: factory2, - } - - unregisteredVms := map[ids.ID]vms.Factory{ - id3: factory3, - id4: factory4, - } - - resources.mockVMGetter.EXPECT(). - Get(). - Times(1). - Return(registeredVms, unregisteredVms, nil) - resources.mockVMRegisterer.EXPECT(). - RegisterWithReadLock(gomock.Any(), id3, factory3). - Times(1). - Return(nil) - resources.mockVMRegisterer.EXPECT(). - RegisterWithReadLock(gomock.Any(), id4, factory4). - Times(1). - Return(nil) - - installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background()) - require.NoError(err) - require.ElementsMatch([]ids.ID{id3, id4}, installedVMs) - require.Empty(failedVMs) -} - -// Tests that we fail if we're not able to get the vms on disk -func TestReloadWithReadLock_GetNewVMsFails(t *testing.T) { - require := require.New(t) - - resources := initVMRegistryTest(t) - - resources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errTest) - - installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background()) - require.ErrorIs(err, errTest) - require.Empty(installedVMs) - require.Empty(failedVMs) -} - -// Tests that if we fail to register a VM, we fail. -func TestReloadWithReadLock_PartialRegisterFailure(t *testing.T) { - require := require.New(t) - - resources := initVMRegistryTest(t) - - factory1 := vms.NewMockFactory(resources.ctrl) - factory2 := vms.NewMockFactory(resources.ctrl) - factory3 := vms.NewMockFactory(resources.ctrl) - factory4 := vms.NewMockFactory(resources.ctrl) - - registeredVms := map[ids.ID]vms.Factory{ - id1: factory1, - id2: factory2, - } - - unregisteredVms := map[ids.ID]vms.Factory{ - id3: factory3, - id4: factory4, - } - - resources.mockVMGetter.EXPECT(). - Get(). - Times(1). - Return(registeredVms, unregisteredVms, nil) - resources.mockVMRegisterer.EXPECT(). - RegisterWithReadLock(gomock.Any(), id3, factory3). - Times(1). - Return(errTest) - resources.mockVMRegisterer.EXPECT(). - RegisterWithReadLock(gomock.Any(), id4, factory4). - Times(1). - Return(nil) - - installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background()) - require.NoError(err) - require.Len(failedVMs, 1) - require.ErrorIs(failedVMs[id3], errTest) - require.Len(installedVMs, 1) - require.Equal(id4, installedVMs[0]) -} - type registryTestResources struct { - ctrl *gomock.Controller - mockVMGetter *MockVMGetter - mockVMRegisterer *MockVMRegisterer - vmRegistry VMRegistry + ctrl *gomock.Controller + mockVMGetter *MockVMGetter + mockVMManager *vms.MockManager + vmRegistry VMRegistry } func initVMRegistryTest(t *testing.T) *registryTestResources { ctrl := gomock.NewController(t) mockVMGetter := NewMockVMGetter(ctrl) - mockVMRegisterer := NewMockVMRegisterer(ctrl) + mockVMManager := vms.NewMockManager(ctrl) vmRegistry := NewVMRegistry( VMRegistryConfig{ - VMGetter: mockVMGetter, - VMRegisterer: mockVMRegisterer, + VMGetter: mockVMGetter, + VMManager: mockVMManager, }, ) return ®istryTestResources{ - ctrl: ctrl, - mockVMGetter: mockVMGetter, - mockVMRegisterer: mockVMRegisterer, - vmRegistry: vmRegistry, + ctrl: ctrl, + mockVMGetter: mockVMGetter, + mockVMManager: mockVMManager, + vmRegistry: vmRegistry, } } diff --git a/vms/rpcchainvm/batched_vm_test.go b/vms/rpcchainvm/batched_vm_test.go index 817037dc6e3e..f74785ebc5bc 100644 --- a/vms/rpcchainvm/batched_vm_test.go +++ b/vms/rpcchainvm/batched_vm_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -14,11 +14,10 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/vms/components/chain" ) @@ -42,7 +41,7 @@ func batchedParseBlockCachingTestPlugin(t *testing.T, loadExpectations bool) blo // create mock ctrl := gomock.NewController(t) - vm := mocks.NewMockChainVM(ctrl) + vm := block.NewMockChainVM(ctrl) if loadExpectations { blk1 := snowman.NewMockBlock(ctrl) @@ -86,7 +85,7 @@ func TestBatchedParseBlockCaching(t *testing.T) { vm, stopper := buildClientHelper(require, testKey) defer stopper.Stop(context.Background()) - ctx := snow.DefaultContextTest() + ctx := snowtest.Context(t, snowtest.CChainID) require.NoError(vm.Initialize(context.Background(), ctx, memdb.New(), nil, nil, nil, nil, nil, nil)) diff --git a/vms/rpcchainvm/errors.go b/vms/rpcchainvm/errors.go index 3795024378c4..4b434b51d425 100644 --- a/vms/rpcchainvm/errors.go +++ b/vms/rpcchainvm/errors.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm diff --git a/vms/rpcchainvm/factory.go b/vms/rpcchainvm/factory.go index f7ef19749ad1..d61c41d11af8 100644 --- a/vms/rpcchainvm/factory.go +++ b/vms/rpcchainvm/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm diff --git a/vms/rpcchainvm/ghttp/gconn/conn_client.go b/vms/rpcchainvm/ghttp/gconn/conn_client.go index b4bc5a5a4de0..cfa3094bfefe 100644 --- a/vms/rpcchainvm/ghttp/gconn/conn_client.go +++ b/vms/rpcchainvm/ghttp/gconn/conn_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gconn diff --git a/vms/rpcchainvm/ghttp/gconn/conn_server.go b/vms/rpcchainvm/ghttp/gconn/conn_server.go index 07ca0f5a2b3a..57f1cfdb064b 100644 --- a/vms/rpcchainvm/ghttp/gconn/conn_server.go +++ b/vms/rpcchainvm/ghttp/gconn/conn_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gconn diff --git a/vms/rpcchainvm/ghttp/greader/reader_client.go b/vms/rpcchainvm/ghttp/greader/reader_client.go index c06bdce9ba01..be0f2a1a7ee6 100644 --- a/vms/rpcchainvm/ghttp/greader/reader_client.go +++ b/vms/rpcchainvm/ghttp/greader/reader_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package greader diff --git a/vms/rpcchainvm/ghttp/greader/reader_server.go b/vms/rpcchainvm/ghttp/greader/reader_server.go index a5f8f5d76f30..4d85f674ffc6 100644 --- a/vms/rpcchainvm/ghttp/greader/reader_server.go +++ b/vms/rpcchainvm/ghttp/greader/reader_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package greader diff --git a/vms/rpcchainvm/ghttp/gresponsewriter/locked_writer.go b/vms/rpcchainvm/ghttp/gresponsewriter/locked_writer.go index c89eb5099cc6..40528dc79d2c 100644 --- a/vms/rpcchainvm/ghttp/gresponsewriter/locked_writer.go +++ b/vms/rpcchainvm/ghttp/gresponsewriter/locked_writer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gresponsewriter diff --git a/vms/rpcchainvm/ghttp/gresponsewriter/writer_client.go b/vms/rpcchainvm/ghttp/gresponsewriter/writer_client.go index 769d8edce555..1c45567097cf 100644 --- a/vms/rpcchainvm/ghttp/gresponsewriter/writer_client.go +++ b/vms/rpcchainvm/ghttp/gresponsewriter/writer_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gresponsewriter diff --git a/vms/rpcchainvm/ghttp/gresponsewriter/writer_server.go b/vms/rpcchainvm/ghttp/gresponsewriter/writer_server.go index a78e6b002913..b73d24f21024 100644 --- a/vms/rpcchainvm/ghttp/gresponsewriter/writer_server.go +++ b/vms/rpcchainvm/ghttp/gresponsewriter/writer_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gresponsewriter diff --git a/vms/rpcchainvm/ghttp/gwriter/writer_client.go b/vms/rpcchainvm/ghttp/gwriter/writer_client.go index d9a561f2dd4e..f68cefa7c2a6 100644 --- a/vms/rpcchainvm/ghttp/gwriter/writer_client.go +++ b/vms/rpcchainvm/ghttp/gwriter/writer_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gwriter diff --git a/vms/rpcchainvm/ghttp/gwriter/writer_server.go b/vms/rpcchainvm/ghttp/gwriter/writer_server.go index ce85aaced16e..1b216dc2a4ee 100644 --- a/vms/rpcchainvm/ghttp/gwriter/writer_server.go +++ b/vms/rpcchainvm/ghttp/gwriter/writer_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gwriter diff --git a/vms/rpcchainvm/ghttp/http_client.go b/vms/rpcchainvm/ghttp/http_client.go index 62a6b705a338..cd06c46ca156 100644 --- a/vms/rpcchainvm/ghttp/http_client.go +++ b/vms/rpcchainvm/ghttp/http_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ghttp diff --git a/vms/rpcchainvm/ghttp/http_server.go b/vms/rpcchainvm/ghttp/http_server.go index adece6f93679..c602965323fd 100644 --- a/vms/rpcchainvm/ghttp/http_server.go +++ b/vms/rpcchainvm/ghttp/http_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ghttp diff --git a/vms/rpcchainvm/ghttp/http_test.go b/vms/rpcchainvm/ghttp/http_test.go index 2bcf5f3150d8..22d5095d6c6d 100644 --- a/vms/rpcchainvm/ghttp/http_test.go +++ b/vms/rpcchainvm/ghttp/http_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ghttp diff --git a/vms/rpcchainvm/grpcutils/client.go b/vms/rpcchainvm/grpcutils/client.go index 0a9dfcffef6c..eb9501019768 100644 --- a/vms/rpcchainvm/grpcutils/client.go +++ b/vms/rpcchainvm/grpcutils/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package grpcutils diff --git a/vms/rpcchainvm/grpcutils/client_test.go b/vms/rpcchainvm/grpcutils/client_test.go index 9ef2fa1d6731..e02552995295 100644 --- a/vms/rpcchainvm/grpcutils/client_test.go +++ b/vms/rpcchainvm/grpcutils/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package grpcutils diff --git a/vms/rpcchainvm/grpcutils/server.go b/vms/rpcchainvm/grpcutils/server.go index a6746207e427..dbcc439c9ef1 100644 --- a/vms/rpcchainvm/grpcutils/server.go +++ b/vms/rpcchainvm/grpcutils/server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package grpcutils diff --git a/vms/rpcchainvm/grpcutils/server_closer.go b/vms/rpcchainvm/grpcutils/server_closer.go index 35ca2b735a86..67a4141ddfc3 100644 --- a/vms/rpcchainvm/grpcutils/server_closer.go +++ b/vms/rpcchainvm/grpcutils/server_closer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package grpcutils diff --git a/vms/rpcchainvm/grpcutils/util.go b/vms/rpcchainvm/grpcutils/util.go index 8ad042ea55a9..880faf4d2a63 100644 --- a/vms/rpcchainvm/grpcutils/util.go +++ b/vms/rpcchainvm/grpcutils/util.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package grpcutils diff --git a/vms/rpcchainvm/gruntime/runtime_client.go b/vms/rpcchainvm/gruntime/runtime_client.go index 67a1e9864908..8db4adbeb204 100644 --- a/vms/rpcchainvm/gruntime/runtime_client.go +++ b/vms/rpcchainvm/gruntime/runtime_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gruntime diff --git a/vms/rpcchainvm/gruntime/runtime_server.go b/vms/rpcchainvm/gruntime/runtime_server.go index 882c62f5ca02..09be6c121eef 100644 --- a/vms/rpcchainvm/gruntime/runtime_server.go +++ b/vms/rpcchainvm/gruntime/runtime_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gruntime diff --git a/vms/rpcchainvm/messenger/messenger_client.go b/vms/rpcchainvm/messenger/messenger_client.go index e7910eb05d2e..d392b9af79c6 100644 --- a/vms/rpcchainvm/messenger/messenger_client.go +++ b/vms/rpcchainvm/messenger/messenger_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package messenger diff --git a/vms/rpcchainvm/messenger/messenger_server.go b/vms/rpcchainvm/messenger/messenger_server.go index 273ffdfd25b0..fc28a0757bb2 100644 --- a/vms/rpcchainvm/messenger/messenger_server.go +++ b/vms/rpcchainvm/messenger/messenger_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package messenger diff --git a/vms/rpcchainvm/runtime/README.md b/vms/rpcchainvm/runtime/README.md index 6e09e41f8ace..1a2fe264bc1c 100644 --- a/vms/rpcchainvm/runtime/README.md +++ b/vms/rpcchainvm/runtime/README.md @@ -17,7 +17,7 @@ It works by starting the VM's as a subprocess of AvalancheGo by `os.Exec`. ## Workflow - `VMRegistry` calls the RPC Chain VM `Factory`. -- Factory Starts an instanace of a `VMRE` server that consumes a `runtime.Initializer` interface implementation. +- Factory Starts an instance of a `VMRE` server that consumes a `runtime.Initializer` interface implementation. - The address of this server is passed as a ENV variable `AVALANCHE_VM_RUNTIME_ENGINE_ADDR` via `os.Exec` which starts the VM binary. - The VM uses the address of the `VMRE` server to create a client. - Client sends a `Initialize` RPC informing the server of the `Protocol Version` and future `Address` of the RPC Chain VM server allowing it to perform a validation `Handshake`. diff --git a/vms/rpcchainvm/runtime/manager.go b/vms/rpcchainvm/runtime/manager.go index 3e1a9eaac903..425faa731850 100644 --- a/vms/rpcchainvm/runtime/manager.go +++ b/vms/rpcchainvm/runtime/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package runtime diff --git a/vms/rpcchainvm/runtime/runtime.go b/vms/rpcchainvm/runtime/runtime.go index d5be95d96471..1a1a198acbda 100644 --- a/vms/rpcchainvm/runtime/runtime.go +++ b/vms/rpcchainvm/runtime/runtime.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package runtime diff --git a/vms/rpcchainvm/runtime/subprocess/initializer.go b/vms/rpcchainvm/runtime/subprocess/initializer.go index 5ade5f619bd1..bc8d4e41c63a 100644 --- a/vms/rpcchainvm/runtime/subprocess/initializer.go +++ b/vms/rpcchainvm/runtime/subprocess/initializer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subprocess diff --git a/vms/rpcchainvm/runtime/subprocess/linux_stopper.go b/vms/rpcchainvm/runtime/subprocess/linux_stopper.go index 80a47fa7da3c..5205ea40596f 100644 --- a/vms/rpcchainvm/runtime/subprocess/linux_stopper.go +++ b/vms/rpcchainvm/runtime/subprocess/linux_stopper.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build linux diff --git a/vms/rpcchainvm/runtime/subprocess/non_linux_stopper.go b/vms/rpcchainvm/runtime/subprocess/non_linux_stopper.go index 8c3ce6a138e9..c1a590e31fe7 100644 --- a/vms/rpcchainvm/runtime/subprocess/non_linux_stopper.go +++ b/vms/rpcchainvm/runtime/subprocess/non_linux_stopper.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build !linux diff --git a/vms/rpcchainvm/runtime/subprocess/runtime.go b/vms/rpcchainvm/runtime/subprocess/runtime.go index 7711d377127c..2cd92a00b04e 100644 --- a/vms/rpcchainvm/runtime/subprocess/runtime.go +++ b/vms/rpcchainvm/runtime/subprocess/runtime.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subprocess diff --git a/vms/rpcchainvm/runtime/subprocess/stopper.go b/vms/rpcchainvm/runtime/subprocess/stopper.go index b4d026590421..4dfd33c24caa 100644 --- a/vms/rpcchainvm/runtime/subprocess/stopper.go +++ b/vms/rpcchainvm/runtime/subprocess/stopper.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subprocess diff --git a/vms/rpcchainvm/state_syncable_vm_test.go b/vms/rpcchainvm/state_syncable_vm_test.go index 241062616c9b..45512bbec04d 100644 --- a/vms/rpcchainvm/state_syncable_vm_test.go +++ b/vms/rpcchainvm/state_syncable_vm_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -20,7 +20,7 @@ import ( "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" @@ -66,8 +66,8 @@ var ( ) type StateSyncEnabledMock struct { - *mocks.MockChainVM - *mocks.MockStateSyncableVM + *block.MockChainVM + *block.MockStateSyncableVM } func stateSyncEnabledTestPlugin(t *testing.T, loadExpectations bool) block.ChainVM { @@ -76,8 +76,8 @@ func stateSyncEnabledTestPlugin(t *testing.T, loadExpectations bool) block.Chain // create mock ctrl := gomock.NewController(t) ssVM := StateSyncEnabledMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } if loadExpectations { @@ -98,8 +98,8 @@ func getOngoingSyncStateSummaryTestPlugin(t *testing.T, loadExpectations bool) b // create mock ctrl := gomock.NewController(t) ssVM := StateSyncEnabledMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } if loadExpectations { @@ -119,8 +119,8 @@ func getLastStateSummaryTestPlugin(t *testing.T, loadExpectations bool) block.Ch // create mock ctrl := gomock.NewController(t) ssVM := StateSyncEnabledMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } if loadExpectations { @@ -140,8 +140,8 @@ func parseStateSummaryTestPlugin(t *testing.T, loadExpectations bool) block.Chai // create mock ctrl := gomock.NewController(t) ssVM := StateSyncEnabledMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } if loadExpectations { @@ -162,8 +162,8 @@ func getStateSummaryTestPlugin(t *testing.T, loadExpectations bool) block.ChainV // create mock ctrl := gomock.NewController(t) ssVM := StateSyncEnabledMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } if loadExpectations { @@ -183,8 +183,8 @@ func acceptStateSummaryTestPlugin(t *testing.T, loadExpectations bool) block.Cha // create mock ctrl := gomock.NewController(t) ssVM := StateSyncEnabledMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } if loadExpectations { @@ -229,8 +229,8 @@ func lastAcceptedBlockPostStateSummaryAcceptTestPlugin(t *testing.T, loadExpecta // create mock ctrl := gomock.NewController(t) ssVM := StateSyncEnabledMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } if loadExpectations { @@ -470,7 +470,7 @@ func TestLastAcceptedBlockPostStateSummaryAccept(t *testing.T) { defer stopper.Stop(context.Background()) // Step 1: initialize VM and check initial LastAcceptedBlock - ctx := snow.DefaultContextTest() + ctx := snowtest.Context(t, snowtest.CChainID) require.NoError(vm.Initialize(context.Background(), ctx, prefixdb.New([]byte{}, memdb.New()), nil, nil, nil, nil, nil, nil)) diff --git a/vms/rpcchainvm/vm.go b/vms/rpcchainvm/vm.go index e2e57f0284d3..ee2869989575 100644 --- a/vms/rpcchainvm/vm.go +++ b/vms/rpcchainvm/vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm diff --git a/vms/rpcchainvm/vm_client.go b/vms/rpcchainvm/vm_client.go index 9a7823979348..3ca090011dd3 100644 --- a/vms/rpcchainvm/vm_client.go +++ b/vms/rpcchainvm/vm_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -375,36 +375,20 @@ func (vm *VMClient) CreateHandlers(ctx context.Context) (map[string]http.Handler return handlers, nil } -func (vm *VMClient) CreateStaticHandlers(ctx context.Context) (map[string]http.Handler, error) { - resp, err := vm.client.CreateStaticHandlers(ctx, &emptypb.Empty{}) - if err != nil { - return nil, err - } - - handlers := make(map[string]http.Handler, len(resp.Handlers)) - for _, handler := range resp.Handlers { - clientConn, err := grpcutils.Dial(handler.ServerAddr) - if err != nil { - return nil, err - } - - vm.conns = append(vm.conns, clientConn) - handlers[handler.Prefix] = ghttp.NewClient(httppb.NewHTTPClient(clientConn)) - } - return handlers, nil -} - func (vm *VMClient) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { _, err := vm.client.Connected(ctx, &vmpb.ConnectedRequest{ - NodeId: nodeID[:], - Version: nodeVersion.String(), + NodeId: nodeID.Bytes(), + Name: nodeVersion.Name, + Major: uint32(nodeVersion.Major), + Minor: uint32(nodeVersion.Minor), + Patch: uint32(nodeVersion.Patch), }) return err } func (vm *VMClient) Disconnected(ctx context.Context, nodeID ids.NodeID) error { _, err := vm.client.Disconnected(ctx, &vmpb.DisconnectedRequest{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), }) return err } @@ -541,14 +525,15 @@ func (vm *VMClient) CrossChainAppRequest(ctx context.Context, chainID ids.ID, re return err } -func (vm *VMClient) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { - _, err := vm.client.CrossChainAppRequestFailed( - ctx, - &vmpb.CrossChainAppRequestFailedMsg{ - ChainId: chainID[:], - RequestId: requestID, - }, - ) +func (vm *VMClient) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32, appErr *common.AppError) error { + msg := &vmpb.CrossChainAppRequestFailedMsg{ + ChainId: chainID[:], + RequestId: requestID, + ErrorCode: appErr.Code, + ErrorMessage: appErr.Message, + } + + _, err := vm.client.CrossChainAppRequestFailed(ctx, msg) return err } @@ -568,7 +553,7 @@ func (vm *VMClient) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID _, err := vm.client.AppRequest( ctx, &vmpb.AppRequestMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), RequestId: requestID, Request: request, Deadline: grpcutils.TimestampFromTime(deadline), @@ -581,7 +566,7 @@ func (vm *VMClient) AppResponse(ctx context.Context, nodeID ids.NodeID, requestI _, err := vm.client.AppResponse( ctx, &vmpb.AppResponseMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), RequestId: requestID, Response: response, }, @@ -589,14 +574,15 @@ func (vm *VMClient) AppResponse(ctx context.Context, nodeID ids.NodeID, requestI return err } -func (vm *VMClient) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - _, err := vm.client.AppRequestFailed( - ctx, - &vmpb.AppRequestFailedMsg{ - NodeId: nodeID[:], - RequestId: requestID, - }, - ) +func (vm *VMClient) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *common.AppError) error { + msg := &vmpb.AppRequestFailedMsg{ + NodeId: nodeID.Bytes(), + RequestId: requestID, + ErrorCode: appErr.Code, + ErrorMessage: appErr.Message, + } + + _, err := vm.client.AppRequestFailed(ctx, msg) return err } @@ -604,7 +590,7 @@ func (vm *VMClient) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte _, err := vm.client.AppGossip( ctx, &vmpb.AppGossipMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), Msg: msg, }, ) diff --git a/vms/rpcchainvm/vm_server.go b/vms/rpcchainvm/vm_server.go index 7ee82a241506..5c0c22de1ae9 100644 --- a/vms/rpcchainvm/vm_server.go +++ b/vms/rpcchainvm/vm_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -337,43 +337,18 @@ func (vm *VMServer) CreateHandlers(ctx context.Context, _ *emptypb.Empty) (*vmpb return resp, nil } -func (vm *VMServer) CreateStaticHandlers(ctx context.Context, _ *emptypb.Empty) (*vmpb.CreateStaticHandlersResponse, error) { - handlers, err := vm.vm.CreateStaticHandlers(ctx) - if err != nil { - return nil, err - } - resp := &vmpb.CreateStaticHandlersResponse{} - for prefix, handler := range handlers { - serverListener, err := grpcutils.NewListener() - if err != nil { - return nil, err - } - server := grpcutils.NewServer() - vm.serverCloser.Add(server) - httppb.RegisterHTTPServer(server, ghttp.NewServer(handler)) - - // Start HTTP service - go grpcutils.Serve(serverListener, server) - - resp.Handlers = append(resp.Handlers, &vmpb.Handler{ - Prefix: prefix, - ServerAddr: serverListener.Addr().String(), - }) - } - return resp, nil -} - func (vm *VMServer) Connected(ctx context.Context, req *vmpb.ConnectedRequest) (*emptypb.Empty, error) { nodeID, err := ids.ToNodeID(req.NodeId) if err != nil { return nil, err } - peerVersion, err := version.ParseApplication(req.Version) - if err != nil { - return nil, err + peerVersion := &version.Application{ + Name: req.Name, + Major: int(req.Major), + Minor: int(req.Minor), + Patch: int(req.Patch), } - return &emptypb.Empty{}, vm.vm.Connected(ctx, nodeID, peerVersion) } @@ -536,7 +511,12 @@ func (vm *VMServer) CrossChainAppRequestFailed(ctx context.Context, msg *vmpb.Cr if err != nil { return nil, err } - return &emptypb.Empty{}, vm.vm.CrossChainAppRequestFailed(ctx, chainID, msg.RequestId) + + appErr := &common.AppError{ + Code: msg.ErrorCode, + Message: msg.ErrorMessage, + } + return &emptypb.Empty{}, vm.vm.CrossChainAppRequestFailed(ctx, chainID, msg.RequestId, appErr) } func (vm *VMServer) CrossChainAppResponse(ctx context.Context, msg *vmpb.CrossChainAppResponseMsg) (*emptypb.Empty, error) { @@ -564,7 +544,12 @@ func (vm *VMServer) AppRequestFailed(ctx context.Context, req *vmpb.AppRequestFa if err != nil { return nil, err } - return &emptypb.Empty{}, vm.vm.AppRequestFailed(ctx, nodeID, req.RequestId) + + appErr := &common.AppError{ + Code: req.ErrorCode, + Message: req.ErrorMessage, + } + return &emptypb.Empty{}, vm.vm.AppRequestFailed(ctx, nodeID, req.RequestId, appErr) } func (vm *VMServer) AppResponse(ctx context.Context, req *vmpb.AppResponseMsg) (*emptypb.Empty, error) { diff --git a/vms/rpcchainvm/vm_test.go b/vms/rpcchainvm/vm_test.go index 1e299c35ee5e..b21cd503a74a 100644 --- a/vms/rpcchainvm/vm_test.go +++ b/vms/rpcchainvm/vm_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -19,7 +19,6 @@ import ( "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" @@ -172,7 +171,7 @@ func TestRuntimeSubprocessBootstrap(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - vm := mocks.NewMockChainVM(ctrl) + vm := block.NewMockChainVM(ctrl) listener, err := grpcutils.NewListener() require.NoError(err) diff --git a/vms/rpcchainvm/with_context_vm_test.go b/vms/rpcchainvm/with_context_vm_test.go index 65d1e4396964..8796ff60941b 100644 --- a/vms/rpcchainvm/with_context_vm_test.go +++ b/vms/rpcchainvm/with_context_vm_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -14,10 +14,9 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/snow/snowtest" ) var ( @@ -37,13 +36,13 @@ var ( ) type ContextEnabledVMMock struct { - *mocks.MockChainVM - *mocks.MockBuildBlockWithContextChainVM + *block.MockChainVM + *block.MockBuildBlockWithContextChainVM } type ContextEnabledBlockMock struct { *snowman.MockBlock - *mocks.MockWithVerifyContext + *block.MockWithVerifyContext } func contextEnabledTestPlugin(t *testing.T, loadExpectations bool) block.ChainVM { @@ -52,14 +51,14 @@ func contextEnabledTestPlugin(t *testing.T, loadExpectations bool) block.ChainVM // create mock ctrl := gomock.NewController(t) ctxVM := ContextEnabledVMMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockBuildBlockWithContextChainVM: mocks.NewMockBuildBlockWithContextChainVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockBuildBlockWithContextChainVM: block.NewMockBuildBlockWithContextChainVM(ctrl), } if loadExpectations { ctxBlock := ContextEnabledBlockMock{ MockBlock: snowman.NewMockBlock(ctrl), - MockWithVerifyContext: mocks.NewMockWithVerifyContext(ctrl), + MockWithVerifyContext: block.NewMockWithVerifyContext(ctrl), } gomock.InOrder( // Initialize @@ -98,7 +97,7 @@ func TestContextVMSummary(t *testing.T) { vm, stopper := buildClientHelper(require, testKey) defer stopper.Stop(context.Background()) - ctx := snow.DefaultContextTest() + ctx := snowtest.Context(t, snowtest.CChainID) require.NoError(vm.Initialize(context.Background(), ctx, memdb.New(), nil, nil, nil, nil, nil, nil)) diff --git a/vms/secp256k1fx/camino_credential_test.go b/vms/secp256k1fx/camino_credential_test.go index 89e698430a10..d69ee7b70980 100644 --- a/vms/secp256k1fx/camino_credential_test.go +++ b/vms/secp256k1fx/camino_credential_test.go @@ -5,6 +5,7 @@ package secp256k1fx import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -29,7 +30,7 @@ func TestMultisigCredentialUnordered(t *testing.T) { func TestMultisigCredentialSerialize(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(0, c)) diff --git a/vms/secp256k1fx/camino_fx_test.go b/vms/secp256k1fx/camino_fx_test.go index 491b87daa483..89ec12634b97 100644 --- a/vms/secp256k1fx/camino_fx_test.go +++ b/vms/secp256k1fx/camino_fx_test.go @@ -313,7 +313,7 @@ func TestCollectMultisigAliases(t *testing.T) { func defaultFx(t *testing.T) *Fx { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) diff --git a/vms/secp256k1fx/camino_transfer_output_test.go b/vms/secp256k1fx/camino_transfer_output_test.go index e39ffb1d867e..a6ea8de92d7f 100644 --- a/vms/secp256k1fx/camino_transfer_output_test.go +++ b/vms/secp256k1fx/camino_transfer_output_test.go @@ -5,6 +5,7 @@ package secp256k1fx import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -54,7 +55,7 @@ func TestCrossOutputVerifyEmpty(t *testing.T) { func TestCrossOutputSerialize(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(0, c)) diff --git a/vms/secp256k1fx/credential.go b/vms/secp256k1fx/credential.go index 707a6b3f43d8..0367c9af96af 100644 --- a/vms/secp256k1fx/credential.go +++ b/vms/secp256k1fx/credential.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/vms/secp256k1fx/credential_test.go b/vms/secp256k1fx/credential_test.go index 15496e1d7fac..e69b98b286e7 100644 --- a/vms/secp256k1fx/credential_test.go +++ b/vms/secp256k1fx/credential_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -27,7 +28,7 @@ func TestCredentialVerifyNil(t *testing.T) { func TestCredentialSerialize(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(0, c)) diff --git a/vms/secp256k1fx/factory.go b/vms/secp256k1fx/factory.go index ae2463a19deb..9630795ea378 100644 --- a/vms/secp256k1fx/factory.go +++ b/vms/secp256k1fx/factory.go @@ -1,16 +1,17 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/fx" ) +const Name = "secp256k1fx" + var ( - _ vms.Factory = (*Factory)(nil) + _ fx.Factory = (*Factory)(nil) // ID that this Fx uses when labeled ID = ids.ID{'s', 'e', 'c', 'p', '2', '5', '6', 'k', '1', 'f', 'x'} @@ -18,6 +19,6 @@ var ( type Factory struct{} -func (*Factory) New(logging.Logger) (interface{}, error) { - return &Fx{}, nil +func (*Factory) New() any { + return &Fx{} } diff --git a/vms/secp256k1fx/factory_test.go b/vms/secp256k1fx/factory_test.go index 435164998581..d7653d361f59 100644 --- a/vms/secp256k1fx/factory_test.go +++ b/vms/secp256k1fx/factory_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -7,14 +7,10 @@ import ( "testing" "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/utils/logging" ) func TestFactory(t *testing.T) { require := require.New(t) factory := Factory{} - fx, err := factory.New(logging.NoLog{}) - require.NoError(err) - require.NotNil(fx) + require.Equal(&Fx{}, factory.New()) } diff --git a/vms/secp256k1fx/fx.go b/vms/secp256k1fx/fx.go index c969c9593976..8c1cfa53bf9a 100644 --- a/vms/secp256k1fx/fx.go +++ b/vms/secp256k1fx/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/vms/secp256k1fx/fx_test.go b/vms/secp256k1fx/fx_test.go index c0e2663e6a23..dcdf78385404 100644 --- a/vms/secp256k1fx/fx_test.go +++ b/vms/secp256k1fx/fx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -53,7 +53,7 @@ func init() { func TestFxInitialize(t *testing.T) { vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } fx := Fx{} @@ -69,7 +69,7 @@ func TestFxInitializeInvalid(t *testing.T) { func TestFxVerifyTransfer(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -107,7 +107,7 @@ func TestFxVerifyTransfer(t *testing.T) { func TestFxVerifyTransferNilTx(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -143,7 +143,7 @@ func TestFxVerifyTransferNilTx(t *testing.T) { func TestFxVerifyTransferNilOutput(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -170,7 +170,7 @@ func TestFxVerifyTransferNilOutput(t *testing.T) { func TestFxVerifyTransferNilInput(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -201,7 +201,7 @@ func TestFxVerifyTransferNilInput(t *testing.T) { func TestFxVerifyTransferNilCredential(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -233,7 +233,7 @@ func TestFxVerifyTransferNilCredential(t *testing.T) { func TestFxVerifyTransferInvalidOutput(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -270,7 +270,7 @@ func TestFxVerifyTransferInvalidOutput(t *testing.T) { func TestFxVerifyTransferWrongAmounts(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -307,7 +307,7 @@ func TestFxVerifyTransferWrongAmounts(t *testing.T) { func TestFxVerifyTransferTimelocked(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -344,7 +344,7 @@ func TestFxVerifyTransferTimelocked(t *testing.T) { func TestFxVerifyTransferTooManySigners(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -382,7 +382,7 @@ func TestFxVerifyTransferTooManySigners(t *testing.T) { func TestFxVerifyTransferTooFewSigners(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -417,7 +417,7 @@ func TestFxVerifyTransferTooFewSigners(t *testing.T) { func TestFxVerifyTransferMismatchedSigners(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -455,7 +455,7 @@ func TestFxVerifyTransferMismatchedSigners(t *testing.T) { func TestFxVerifyTransferInvalidSignature(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -495,7 +495,7 @@ func TestFxVerifyTransferInvalidSignature(t *testing.T) { func TestFxVerifyTransferWrongSigner(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -535,7 +535,7 @@ func TestFxVerifyTransferWrongSigner(t *testing.T) { func TestFxVerifyTransferSigIndexOOB(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -575,7 +575,7 @@ func TestFxVerifyTransferSigIndexOOB(t *testing.T) { func TestFxVerifyOperation(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -627,7 +627,7 @@ func TestFxVerifyOperation(t *testing.T) { func TestFxVerifyOperationUnknownTx(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -679,7 +679,7 @@ func TestFxVerifyOperationUnknownTx(t *testing.T) { func TestFxVerifyOperationUnknownOperation(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -709,7 +709,7 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { func TestFxVerifyOperationUnknownCredential(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -757,7 +757,7 @@ func TestFxVerifyOperationUnknownCredential(t *testing.T) { func TestFxVerifyOperationWrongNumberOfUTXOs(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -810,7 +810,7 @@ func TestFxVerifyOperationWrongNumberOfUTXOs(t *testing.T) { func TestFxVerifyOperationUnknownUTXOType(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -855,7 +855,7 @@ func TestFxVerifyOperationUnknownUTXOType(t *testing.T) { func TestFxVerifyOperationInvalidOperationVerify(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -905,7 +905,7 @@ func TestFxVerifyOperationInvalidOperationVerify(t *testing.T) { func TestFxVerifyOperationMismatchedMintOutputs(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -952,7 +952,7 @@ func TestFxVerifyOperationMismatchedMintOutputs(t *testing.T) { func TestVerifyPermission(t *testing.T) { vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } fx := Fx{} diff --git a/vms/secp256k1fx/input.go b/vms/secp256k1fx/input.go index d5943a6b686e..7e11556be582 100644 --- a/vms/secp256k1fx/input.go +++ b/vms/secp256k1fx/input.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/vms/secp256k1fx/input_test.go b/vms/secp256k1fx/input_test.go index 632004799e91..f80b824d7711 100644 --- a/vms/secp256k1fx/input_test.go +++ b/vms/secp256k1fx/input_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/vms/secp256k1fx/keychain.go b/vms/secp256k1fx/keychain.go index 3246ef95722d..ecb42f209970 100644 --- a/vms/secp256k1fx/keychain.go +++ b/vms/secp256k1fx/keychain.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/vms/secp256k1fx/keychain_test.go b/vms/secp256k1fx/keychain_test.go index 3fcb064b18a4..cbd4d992d209 100644 --- a/vms/secp256k1fx/keychain_test.go +++ b/vms/secp256k1fx/keychain_test.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/vms/secp256k1fx/mint_operation.go b/vms/secp256k1fx/mint_operation.go index a21f3061290b..80728ca7588e 100644 --- a/vms/secp256k1fx/mint_operation.go +++ b/vms/secp256k1fx/mint_operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/vms/secp256k1fx/mint_operation_test.go b/vms/secp256k1fx/mint_operation_test.go index 60b60b25f10a..3b751c8dcb33 100644 --- a/vms/secp256k1fx/mint_operation_test.go +++ b/vms/secp256k1fx/mint_operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/vms/secp256k1fx/mint_output.go b/vms/secp256k1fx/mint_output.go index 996f05171bbe..e52ba47025ae 100644 --- a/vms/secp256k1fx/mint_output.go +++ b/vms/secp256k1fx/mint_output.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/vms/secp256k1fx/mint_output_test.go b/vms/secp256k1fx/mint_output_test.go index 7d092a6772ea..60a72dfc95c4 100644 --- a/vms/secp256k1fx/mint_output_test.go +++ b/vms/secp256k1fx/mint_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/vms/secp256k1fx/mock_alias_getter.go b/vms/secp256k1fx/mock_alias_getter.go index 9c49c53717b0..247ca869ebb7 100644 --- a/vms/secp256k1fx/mock_alias_getter.go +++ b/vms/secp256k1fx/mock_alias_getter.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/secp256k1fx (interfaces: AliasGetter) +// +// Generated by this command: +// +// mockgen -package=secp256k1fx -destination=vms/secp256k1fx/mock_alias_getter.go github.com/ava-labs/avalanchego/vms/secp256k1fx AliasGetter +// // Package secp256k1fx is a generated GoMock package. package secp256k1fx @@ -48,7 +50,7 @@ func (m *MockAliasGetter) GetMultisigAlias(arg0 ids.ShortID) (*multisig.AliasWit } // GetMultisigAlias indicates an expected call of GetMultisigAlias. -func (mr *MockAliasGetterMockRecorder) GetMultisigAlias(arg0 interface{}) *gomock.Call { +func (mr *MockAliasGetterMockRecorder) GetMultisigAlias(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMultisigAlias", reflect.TypeOf((*MockAliasGetter)(nil).GetMultisigAlias), arg0) } diff --git a/vms/secp256k1fx/output_owners.go b/vms/secp256k1fx/output_owners.go index d7978d9e3f3f..78409743c742 100644 --- a/vms/secp256k1fx/output_owners.go +++ b/vms/secp256k1fx/output_owners.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -21,7 +21,6 @@ var ( ErrOutputUnspendable = errors.New("output is unspendable") ErrOutputUnoptimized = errors.New("output representation should be optimized") ErrAddrsNotSortedUnique = errors.New("addresses not sorted and unique") - ErrMarshal = errors.New("cannot marshal without ctx") ) type OutputOwners struct { @@ -37,8 +36,8 @@ type OutputOwners struct { ctx *snow.Context } -// InitCtx assigns the OutputOwners.ctx object to given [ctx] object -// Must be called at least once for MarshalJSON to work successfully +// InitCtx allows addresses to be formatted into their human readable format +// during json marshalling. func (out *OutputOwners) InitCtx(ctx *snow.Context) { out.ctx = ctx } @@ -59,14 +58,7 @@ func (out *OutputOwners) MarshalJSON() ([]byte, error) { // Fields returns JSON keys in a map that can be used with marshal JSON // to serialize OutputOwners struct func (out *OutputOwners) Fields() (map[string]interface{}, error) { - addrsLen := len(out.Addrs) - - // we need out.ctx to do this, if its absent, throw error - if addrsLen > 0 && out.ctx == nil { - return nil, ErrMarshal - } - - addresses := make([]string, addrsLen) + addresses := make([]string, len(out.Addrs)) for i, addr := range out.Addrs { // for each [addr] in [Addrs] we attempt to format it given // the [out.ctx] object @@ -142,8 +134,13 @@ func (out *OutputOwners) Sort() { } // formatAddress formats a given [addr] into human readable format using -// [ChainID] and [NetworkID] from the provided [ctx]. +// [ChainID] and [NetworkID] if a non-nil [ctx] is provided. If [ctx] is not +// provided, the address will be returned in cb58 format. func formatAddress(ctx *snow.Context, addr ids.ShortID) (string, error) { + if ctx == nil { + return addr.String(), nil + } + chainIDAlias, err := ctx.BCLookup.PrimaryAlias(ctx.ChainID) if err != nil { return "", err diff --git a/vms/secp256k1fx/output_owners_test.go b/vms/secp256k1fx/output_owners_test.go index b09e28bea923..e042726bce64 100644 --- a/vms/secp256k1fx/output_owners_test.go +++ b/vms/secp256k1fx/output_owners_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -149,31 +149,20 @@ func TestOutputOwnerEquals(t *testing.T) { } } -func TestMarshalJSONRequiresCtxWhenAddrsArePresent(t *testing.T) { +func TestMarshalJSONDoesNotRequireCtx(t *testing.T) { require := require.New(t) out := &OutputOwners{ Threshold: 1, + Locktime: 2, Addrs: []ids.ShortID{ {1}, {0}, }, } - _, err := out.MarshalJSON() - require.ErrorIs(err, ErrMarshal) -} - -func TestMarshalJSONDoesNotRequireCtxWhenAddrsAreAbsent(t *testing.T) { - require := require.New(t) - out := &OutputOwners{ - Threshold: 1, - Locktime: 2, - Addrs: []ids.ShortID{}, - } - b, err := out.MarshalJSON() require.NoError(err) jsonData := string(b) - require.Equal(jsonData, "{\"addresses\":[],\"locktime\":2,\"threshold\":1}") + require.Equal(jsonData, `{"addresses":["6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt","111111111111111111116DBWJs"],"locktime":2,"threshold":1}`) } diff --git a/vms/secp256k1fx/transfer_input.go b/vms/secp256k1fx/transfer_input.go index 6dadd558fc49..1659820c26f7 100644 --- a/vms/secp256k1fx/transfer_input.go +++ b/vms/secp256k1fx/transfer_input.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/vms/secp256k1fx/transfer_input_test.go b/vms/secp256k1fx/transfer_input_test.go index c3019a25a12f..c155d848e559 100644 --- a/vms/secp256k1fx/transfer_input_test.go +++ b/vms/secp256k1fx/transfer_input_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -80,7 +81,7 @@ func TestTransferInputVerifyUnsorted(t *testing.T) { func TestTransferInputSerialize(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(0, c)) diff --git a/vms/secp256k1fx/transfer_output.go b/vms/secp256k1fx/transfer_output.go index 234cc1f68dab..ee4c5796c413 100644 --- a/vms/secp256k1fx/transfer_output.go +++ b/vms/secp256k1fx/transfer_output.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/vms/secp256k1fx/transfer_output_test.go b/vms/secp256k1fx/transfer_output_test.go index 767f93925b7c..864fb85b9ff0 100644 --- a/vms/secp256k1fx/transfer_output_test.go +++ b/vms/secp256k1fx/transfer_output_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -135,7 +136,7 @@ func TestOutputVerifyDuplicated(t *testing.T) { func TestOutputSerialize(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(0, c)) diff --git a/vms/secp256k1fx/tx.go b/vms/secp256k1fx/tx.go index 81f4ee4d11e0..5cc483c7c3fc 100644 --- a/vms/secp256k1fx/tx.go +++ b/vms/secp256k1fx/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/vms/secp256k1fx/vm.go b/vms/secp256k1fx/vm.go index ba8a9f1d6c0a..07da2d394236 100644 --- a/vms/secp256k1fx/vm.go +++ b/vms/secp256k1fx/vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/vms/tracedvm/batched_vm.go b/vms/tracedvm/batched_vm.go index 47f81c9fa114..22dfb212ec6d 100644 --- a/vms/tracedvm/batched_vm.go +++ b/vms/tracedvm/batched_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracedvm diff --git a/vms/tracedvm/block.go b/vms/tracedvm/block.go index a90a110302fe..81949d777225 100644 --- a/vms/tracedvm/block.go +++ b/vms/tracedvm/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracedvm diff --git a/vms/tracedvm/block_vm.go b/vms/tracedvm/block_vm.go index 969a6bc09637..10931bf8c287 100644 --- a/vms/tracedvm/block_vm.go +++ b/vms/tracedvm/block_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracedvm diff --git a/vms/tracedvm/build_block_with_context_vm.go b/vms/tracedvm/build_block_with_context_vm.go index 1d9e9319605e..b069b471f26b 100644 --- a/vms/tracedvm/build_block_with_context_vm.go +++ b/vms/tracedvm/build_block_with_context_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracedvm diff --git a/vms/tracedvm/state_syncable_vm.go b/vms/tracedvm/state_syncable_vm.go index 75738462368b..e31507d55735 100644 --- a/vms/tracedvm/state_syncable_vm.go +++ b/vms/tracedvm/state_syncable_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracedvm diff --git a/vms/tracedvm/tx.go b/vms/tracedvm/tx.go index 7e18efcb23b4..638ecd8f5914 100644 --- a/vms/tracedvm/tx.go +++ b/vms/tracedvm/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracedvm diff --git a/vms/tracedvm/vertex_vm.go b/vms/tracedvm/vertex_vm.go index 53189f5cee70..4bc162c6c6ae 100644 --- a/vms/tracedvm/vertex_vm.go +++ b/vms/tracedvm/vertex_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracedvm diff --git a/vms/types/blob_data.go b/vms/types/blob_data.go index 1cbf7743eca9..df13cc31276e 100644 --- a/vms/types/blob_data.go +++ b/vms/types/blob_data.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package types diff --git a/wallet/chain/c/backend.go b/wallet/chain/c/backend.go index 0a735116b646..cefe6befcf6a 100644 --- a/wallet/chain/c/backend.go +++ b/wallet/chain/c/backend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package c diff --git a/wallet/chain/c/builder.go b/wallet/chain/c/builder.go index d2d088e88a53..3e387ba3c27b 100644 --- a/wallet/chain/c/builder.go +++ b/wallet/chain/c/builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package c @@ -201,6 +201,7 @@ func (b *builder) NewImportTx( importedInputs = append(importedInputs, &avax.TransferableInput{ UTXOID: utxo.UTXOID, Asset: utxo.Asset, + FxID: secp256k1fx.ID, In: &secp256k1fx.TransferInput{ Amt: amount, Input: secp256k1fx.Input{ @@ -267,6 +268,7 @@ func (b *builder) NewExportTx( for i, output := range outputs { exportedOutputs[i] = &avax.TransferableOutput{ Asset: avax.Asset{ID: avaxAssetID}, + FxID: secp256k1fx.ID, Out: output, } @@ -376,6 +378,14 @@ func (b *builder) NewExportTx( utils.Sort(inputs) tx.Ins = inputs + + snowCtx, err := newSnowContext(b.backend) + if err != nil { + return nil, err + } + for _, out := range tx.ExportedOutputs { + out.InitCtx(snowCtx) + } return tx, nil } diff --git a/wallet/chain/c/builder_with_options.go b/wallet/chain/c/builder_with_options.go index 8416dddf9928..fa98725450a6 100644 --- a/wallet/chain/c/builder_with_options.go +++ b/wallet/chain/c/builder_with_options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package c diff --git a/wallet/chain/c/context.go b/wallet/chain/c/context.go index d506b42f81fa..9e4712b8f7c9 100644 --- a/wallet/chain/c/context.go +++ b/wallet/chain/c/context.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package c @@ -8,9 +8,14 @@ import ( "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/avm" ) +const Alias = "C" + var _ Context = (*context)(nil) type Context interface { @@ -41,7 +46,7 @@ func NewContextFromClients( return nil, err } - chainID, err := infoClient.GetBlockchainID(ctx, "C") + chainID, err := infoClient.GetBlockchainID(ctx, Alias) if err != nil { return nil, err } @@ -81,3 +86,17 @@ func (c *context) BlockchainID() ids.ID { func (c *context) AVAXAssetID() ids.ID { return c.avaxAssetID } + +func newSnowContext(c Context) (*snow.Context, error) { + chainID := c.BlockchainID() + lookup := ids.NewAliaser() + return &snow.Context{ + NetworkID: c.NetworkID(), + SubnetID: constants.PrimaryNetworkID, + ChainID: chainID, + CChainID: chainID, + AVAXAssetID: c.AVAXAssetID(), + Log: logging.NoLog{}, + BCLookup: lookup, + }, lookup.Alias(chainID, Alias) +} diff --git a/wallet/chain/c/signer.go b/wallet/chain/c/signer.go index 4fd85ed3b532..7be1a149fb36 100644 --- a/wallet/chain/c/signer.go +++ b/wallet/chain/c/signer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package c diff --git a/wallet/chain/c/wallet.go b/wallet/chain/c/wallet.go index fb1a83d53dad..304fbe4cf7c8 100644 --- a/wallet/chain/c/wallet.go +++ b/wallet/chain/c/wallet.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package c diff --git a/wallet/chain/c/wallet_with_options.go b/wallet/chain/c/wallet_with_options.go index 7d6193683d49..a0a1a60d85a8 100644 --- a/wallet/chain/c/wallet_with_options.go +++ b/wallet/chain/c/wallet_with_options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package c diff --git a/wallet/chain/p/backend.go b/wallet/chain/p/backend.go index bb75692f3908..d06c7a1f9cf9 100644 --- a/wallet/chain/p/backend.go +++ b/wallet/chain/p/backend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p diff --git a/wallet/chain/p/backend_visitor.go b/wallet/chain/p/backend_visitor.go index 57d602354428..1a0c8e39e8da 100644 --- a/wallet/chain/p/backend_visitor.go +++ b/wallet/chain/p/backend_visitor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p diff --git a/wallet/chain/p/builder.go b/wallet/chain/p/builder.go index f890790dad26..9a8189c4db6b 100644 --- a/wallet/chain/p/builder.go +++ b/wallet/chain/p/builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -311,7 +311,7 @@ func (b *builder) NewBaseTx( outputs = append(outputs, changeOutputs...) avax.SortTransferableOutputs(outputs, txs.Codec) // sort the outputs - return &txs.CreateSubnetTx{ + tx := &txs.CreateSubnetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -320,7 +320,8 @@ func (b *builder) NewBaseTx( Memo: ops.Memo(), }}, Owner: &secp256k1fx.OutputOwners{}, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewAddValidatorTx( @@ -343,7 +344,7 @@ func (b *builder) NewAddValidatorTx( } utils.Sort(rewardsOwner.Addrs) - return &txs.AddValidatorTx{ + tx := &txs.AddValidatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -355,7 +356,8 @@ func (b *builder) NewAddValidatorTx( StakeOuts: stakeOutputs, RewardsOwner: rewardsOwner, DelegationShares: shares, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewAddSubnetValidatorTx( @@ -377,7 +379,7 @@ func (b *builder) NewAddSubnetValidatorTx( return nil, err } - return &txs.AddSubnetValidatorTx{ + tx := &txs.AddSubnetValidatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -387,7 +389,8 @@ func (b *builder) NewAddSubnetValidatorTx( }}, SubnetValidator: *vdr, SubnetAuth: subnetAuth, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewRemoveSubnetValidatorTx( @@ -410,7 +413,7 @@ func (b *builder) NewRemoveSubnetValidatorTx( return nil, err } - return &txs.RemoveSubnetValidatorTx{ + tx := &txs.RemoveSubnetValidatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -421,7 +424,8 @@ func (b *builder) NewRemoveSubnetValidatorTx( Subnet: subnetID, NodeID: nodeID, SubnetAuth: subnetAuth, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewAddDelegatorTx( @@ -443,7 +447,7 @@ func (b *builder) NewAddDelegatorTx( } utils.Sort(rewardsOwner.Addrs) - return &txs.AddDelegatorTx{ + tx := &txs.AddDelegatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -454,7 +458,8 @@ func (b *builder) NewAddDelegatorTx( Validator: *vdr, StakeOuts: stakeOutputs, DelegationRewardsOwner: rewardsOwner, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewCreateChainTx( @@ -481,7 +486,7 @@ func (b *builder) NewCreateChainTx( } utils.Sort(fxIDs) - return &txs.CreateChainTx{ + tx := &txs.CreateChainTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -495,7 +500,8 @@ func (b *builder) NewCreateChainTx( FxIDs: fxIDs, GenesisData: genesis, SubnetAuth: subnetAuth, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewCreateSubnetTx( @@ -513,7 +519,7 @@ func (b *builder) NewCreateSubnetTx( } utils.Sort(owner.Addrs) - return &txs.CreateSubnetTx{ + tx := &txs.CreateSubnetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -522,7 +528,8 @@ func (b *builder) NewCreateSubnetTx( Memo: ops.Memo(), }}, Owner: owner, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewImportTx( @@ -618,7 +625,7 @@ func (b *builder) NewImportTx( } avax.SortTransferableOutputs(outputs, txs.Codec) // sort imported outputs - return &txs.ImportTx{ + tx := &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -628,7 +635,8 @@ func (b *builder) NewImportTx( }}, SourceChain: sourceChainID, ImportedInputs: importedInputs, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewExportTx( @@ -656,7 +664,7 @@ func (b *builder) NewExportTx( } avax.SortTransferableOutputs(outputs, txs.Codec) // sort exported outputs - return &txs.ExportTx{ + tx := &txs.ExportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -666,7 +674,8 @@ func (b *builder) NewExportTx( }}, DestinationChain: chainID, ExportedOutputs: outputs, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewTransformSubnetTx( @@ -702,7 +711,7 @@ func (b *builder) NewTransformSubnetTx( return nil, err } - return &txs.TransformSubnetTx{ + tx := &txs.TransformSubnetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -725,7 +734,8 @@ func (b *builder) NewTransformSubnetTx( MaxValidatorWeightFactor: maxValidatorWeightFactor, UptimeRequirement: uptimeRequirement, SubnetAuth: subnetAuth, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewAddPermissionlessValidatorTx( @@ -755,7 +765,7 @@ func (b *builder) NewAddPermissionlessValidatorTx( utils.Sort(validationRewardsOwner.Addrs) utils.Sort(delegationRewardsOwner.Addrs) - return &txs.AddPermissionlessValidatorTx{ + tx := &txs.AddPermissionlessValidatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -770,7 +780,8 @@ func (b *builder) NewAddPermissionlessValidatorTx( ValidatorRewardsOwner: validationRewardsOwner, DelegatorRewardsOwner: delegationRewardsOwner, DelegationShares: shares, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewAddPermissionlessDelegatorTx( @@ -796,7 +807,7 @@ func (b *builder) NewAddPermissionlessDelegatorTx( } utils.Sort(rewardsOwner.Addrs) - return &txs.AddPermissionlessDelegatorTx{ + tx := &txs.AddPermissionlessDelegatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -808,7 +819,8 @@ func (b *builder) NewAddPermissionlessDelegatorTx( Subnet: vdr.Subnet, StakeOuts: stakeOutputs, DelegationRewardsOwner: rewardsOwner, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) getBalance( @@ -1117,3 +1129,13 @@ func (b *builder) authorizeSubnet(subnetID ids.ID, options *common.Options) (*se SigIndices: inputSigIndices, }, nil } + +func (b *builder) initCtx(tx txs.UnsignedTx) error { + ctx, err := newSnowContext(b.backend) + if err != nil { + return err + } + + tx.InitCtx(ctx) + return nil +} diff --git a/wallet/chain/p/builder_with_options.go b/wallet/chain/p/builder_with_options.go index 9060d7639410..46deab976577 100644 --- a/wallet/chain/p/builder_with_options.go +++ b/wallet/chain/p/builder_with_options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p diff --git a/wallet/chain/p/context.go b/wallet/chain/p/context.go index 5c6e520c3b13..9eb404a35f02 100644 --- a/wallet/chain/p/context.go +++ b/wallet/chain/p/context.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -18,10 +18,14 @@ import ( "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/avm" ) +const Alias = "P" + var _ Context = (*context)(nil) type Context interface { @@ -155,3 +159,15 @@ func (c *context) AddSubnetValidatorFee() uint64 { func (c *context) AddSubnetDelegatorFee() uint64 { return c.addSubnetDelegatorFee } + +func newSnowContext(c Context) (*snow.Context, error) { + lookup := ids.NewAliaser() + return &snow.Context{ + NetworkID: c.NetworkID(), + SubnetID: constants.PrimaryNetworkID, + ChainID: constants.PlatformChainID, + AVAXAssetID: c.AVAXAssetID(), + Log: logging.NoLog{}, + BCLookup: lookup, + }, lookup.Alias(constants.PlatformChainID, Alias) +} diff --git a/wallet/chain/p/signer.go b/wallet/chain/p/signer.go index a795dd63c539..be2db8ddd2c0 100644 --- a/wallet/chain/p/signer.go +++ b/wallet/chain/p/signer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p diff --git a/wallet/chain/p/signer_visitor.go b/wallet/chain/p/signer_visitor.go index 9dd6018ea2e3..c5c444a97f13 100644 --- a/wallet/chain/p/signer_visitor.go +++ b/wallet/chain/p/signer_visitor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -284,7 +284,7 @@ func (s *signerVisitor) getSubnetSigners(subnetID ids.ID, subnetAuth verify.Veri // TODO: remove [signHash] after the ledger supports signing all transactions. func sign(tx *txs.Tx, signHash bool, txSigners [][]keychain.Signer) error { - unsignedBytes, err := txs.Codec.Marshal(txs.Version, &tx.Unsigned) + unsignedBytes, err := txs.Codec.Marshal(txs.CodecVersion, &tx.Unsigned) if err != nil { return fmt.Errorf("couldn't marshal unsigned tx: %w", err) } @@ -345,7 +345,7 @@ func sign(tx *txs.Tx, signHash bool, txSigners [][]keychain.Signer) error { } } - signedBytes, err := txs.Codec.Marshal(txs.Version, tx) + signedBytes, err := txs.Codec.Marshal(txs.CodecVersion, tx) if err != nil { return fmt.Errorf("couldn't marshal tx: %w", err) } diff --git a/wallet/chain/p/wallet.go b/wallet/chain/p/wallet.go index c4d5545818ae..e982a204a9f8 100644 --- a/wallet/chain/p/wallet.go +++ b/wallet/chain/p/wallet.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p diff --git a/wallet/chain/p/wallet_with_options.go b/wallet/chain/p/wallet_with_options.go index 8135db0ecfbc..33faa1a86bb0 100644 --- a/wallet/chain/p/wallet_with_options.go +++ b/wallet/chain/p/wallet_with_options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p diff --git a/wallet/chain/x/backend.go b/wallet/chain/x/backend.go index 56ade31be1b7..6c2f81365daf 100644 --- a/wallet/chain/x/backend.go +++ b/wallet/chain/x/backend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x diff --git a/wallet/chain/x/backend_visitor.go b/wallet/chain/x/backend_visitor.go index d617638434c6..7ce9aa2acd00 100644 --- a/wallet/chain/x/backend_visitor.go +++ b/wallet/chain/x/backend_visitor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x diff --git a/wallet/chain/x/builder.go b/wallet/chain/x/builder.go index 0b639a7776ad..27932019e1f4 100644 --- a/wallet/chain/x/builder.go +++ b/wallet/chain/x/builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x @@ -26,6 +26,12 @@ var ( errNoChangeAddress = errors.New("no possible change address") errInsufficientFunds = errors.New("insufficient funds") + fxIndexToID = map[uint32]ids.ID{ + 0: secp256k1fx.ID, + 1: nftfx.ID, + 2: propertyfx.ID, + } + _ Builder = (*builder)(nil) ) @@ -213,13 +219,14 @@ func (b *builder) NewBaseTx( outputs = append(outputs, changeOutputs...) avax.SortTransferableOutputs(outputs, Parser.Codec()) // sort the outputs - return &txs.BaseTx{BaseTx: avax.BaseTx{ + tx := &txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: b.backend.BlockchainID(), Ins: inputs, Outs: outputs, Memo: ops.Memo(), - }}, nil + }} + return tx, b.initCtx(tx) } func (b *builder) NewCreateAssetTx( @@ -243,12 +250,14 @@ func (b *builder) NewCreateAssetTx( for fxIndex, outs := range initialState { state := &txs.InitialState{ FxIndex: fxIndex, + FxID: fxIndexToID[fxIndex], Outs: outs, } state.Sort(codec) // sort the outputs states = append(states, state) } + utils.Sort(states) // sort the initial states tx := &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), @@ -262,8 +271,7 @@ func (b *builder) NewCreateAssetTx( Denomination: denomination, States: states, } - utils.Sort(tx.States) // sort the initial states - return tx, nil + return tx, b.initCtx(tx) } func (b *builder) NewOperationTx( @@ -280,7 +288,7 @@ func (b *builder) NewOperationTx( } txs.SortOperations(operations, Parser.Codec()) - return &txs.OperationTx{ + tx := &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: b.backend.BlockchainID(), @@ -289,7 +297,8 @@ func (b *builder) NewOperationTx( Memo: ops.Memo(), }}, Ops: operations, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewOperationTxMintFT( @@ -380,6 +389,7 @@ func (b *builder) NewImportTx( importedInputs = append(importedInputs, &avax.TransferableInput{ UTXOID: utxo.UTXOID, Asset: utxo.Asset, + FxID: secp256k1fx.ID, In: &secp256k1fx.TransferInput{ Amt: out.Amt, Input: secp256k1fx.Input{ @@ -428,6 +438,7 @@ func (b *builder) NewImportTx( for assetID, amount := range importedAmounts { outputs = append(outputs, &avax.TransferableOutput{ Asset: avax.Asset{ID: assetID}, + FxID: secp256k1fx.ID, Out: &secp256k1fx.TransferOutput{ Amt: amount, OutputOwners: *to, @@ -436,7 +447,7 @@ func (b *builder) NewImportTx( } avax.SortTransferableOutputs(outputs, Parser.Codec()) - return &txs.ImportTx{ + tx := &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: b.backend.BlockchainID(), @@ -446,7 +457,8 @@ func (b *builder) NewImportTx( }}, SourceChain: chainID, ImportedIns: importedInputs, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewExportTx( @@ -473,7 +485,7 @@ func (b *builder) NewExportTx( } avax.SortTransferableOutputs(outputs, Parser.Codec()) - return &txs.ExportTx{ + tx := &txs.ExportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: b.backend.BlockchainID(), @@ -483,7 +495,8 @@ func (b *builder) NewExportTx( }}, DestinationChain: chainID, ExportedOuts: outputs, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) getBalance( @@ -578,6 +591,7 @@ func (b *builder) spend( inputs = append(inputs, &avax.TransferableInput{ UTXOID: utxo.UTXOID, Asset: utxo.Asset, + FxID: secp256k1fx.ID, In: &secp256k1fx.TransferInput{ Amt: out.Amt, Input: secp256k1fx.Input{ @@ -596,6 +610,7 @@ func (b *builder) spend( // This input had extra value, so some of it must be returned outputs = append(outputs, &avax.TransferableOutput{ Asset: utxo.Asset, + FxID: secp256k1fx.ID, Out: &secp256k1fx.TransferOutput{ Amt: remainingAmount, OutputOwners: *changeOwner, @@ -656,6 +671,7 @@ func (b *builder) mintFTs( operations = append(operations, &txs.Operation{ Asset: utxo.Asset, UTXOIDs: []*avax.UTXOID{&utxo.UTXOID}, + FxID: secp256k1fx.ID, Op: &secp256k1fx.MintOperation{ MintInput: secp256k1fx.Input{ SigIndices: inputSigIndices, @@ -719,6 +735,7 @@ func (b *builder) mintNFTs( UTXOIDs: []*avax.UTXOID{ &utxo.UTXOID, }, + FxID: nftfx.ID, Op: &nftfx.MintOperation{ MintInput: secp256k1fx.Input{ SigIndices: inputSigIndices, @@ -775,6 +792,7 @@ func (b *builder) mintProperty( UTXOIDs: []*avax.UTXOID{ &utxo.UTXOID, }, + FxID: propertyfx.ID, Op: &propertyfx.MintOperation{ MintInput: secp256k1fx.Input{ SigIndices: inputSigIndices, @@ -831,6 +849,7 @@ func (b *builder) burnProperty( UTXOIDs: []*avax.UTXOID{ &utxo.UTXOID, }, + FxID: propertyfx.ID, Op: &propertyfx.BurnOperation{ Input: secp256k1fx.Input{ SigIndices: inputSigIndices, @@ -847,3 +866,13 @@ func (b *builder) burnProperty( } return operations, nil } + +func (b *builder) initCtx(tx txs.UnsignedTx) error { + ctx, err := newSnowContext(b.backend) + if err != nil { + return err + } + + tx.InitCtx(ctx) + return nil +} diff --git a/wallet/chain/x/builder_with_options.go b/wallet/chain/x/builder_with_options.go index 63d554009fff..c2b65b05a630 100644 --- a/wallet/chain/x/builder_with_options.go +++ b/wallet/chain/x/builder_with_options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x diff --git a/wallet/chain/x/constants.go b/wallet/chain/x/constants.go index ed43fc07a84d..47efbfc2ff6d 100644 --- a/wallet/chain/x/constants.go +++ b/wallet/chain/x/constants.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x import ( + "time" + "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/nftfx" @@ -22,11 +24,14 @@ var Parser block.Parser func init() { var err error - Parser, err = block.NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - &nftfx.Fx{}, - &propertyfx.Fx{}, - }) + Parser, err = block.NewParser( + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + &nftfx.Fx{}, + &propertyfx.Fx{}, + }, + ) if err != nil { panic(err) } diff --git a/wallet/chain/x/context.go b/wallet/chain/x/context.go index 4fa6af8b6e85..743b8c351648 100644 --- a/wallet/chain/x/context.go +++ b/wallet/chain/x/context.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********************************************************** -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x @@ -18,10 +18,14 @@ import ( "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/avm" ) +const Alias = "X" + var _ Context = (*context)(nil) type Context interface { @@ -42,7 +46,7 @@ type context struct { func NewContextFromURI(ctx stdcontext.Context, uri string) (Context, error) { infoClient := info.NewClient(uri) - xChainClient := avm.NewClient(uri, "X") + xChainClient := avm.NewClient(uri, Alias) return NewContextFromClients(ctx, infoClient, xChainClient) } @@ -56,7 +60,7 @@ func NewContextFromClients( return nil, err } - chainID, err := infoClient.GetBlockchainID(ctx, "X") + chainID, err := infoClient.GetBlockchainID(ctx, Alias) if err != nil { return nil, err } @@ -115,3 +119,17 @@ func (c *context) BaseTxFee() uint64 { func (c *context) CreateAssetTxFee() uint64 { return c.createAssetTxFee } + +func newSnowContext(c Context) (*snow.Context, error) { + chainID := c.BlockchainID() + lookup := ids.NewAliaser() + return &snow.Context{ + NetworkID: c.NetworkID(), + SubnetID: constants.PrimaryNetworkID, + ChainID: chainID, + XChainID: chainID, + AVAXAssetID: c.AVAXAssetID(), + Log: logging.NoLog{}, + BCLookup: lookup, + }, lookup.Alias(chainID, Alias) +} diff --git a/wallet/chain/x/signer.go b/wallet/chain/x/signer.go index 98d52d83218d..2c8268199a44 100644 --- a/wallet/chain/x/signer.go +++ b/wallet/chain/x/signer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x diff --git a/wallet/chain/x/signer_visitor.go b/wallet/chain/x/signer_visitor.go index 11e611b8134e..961463ec0256 100644 --- a/wallet/chain/x/signer_visitor.go +++ b/wallet/chain/x/signer_visitor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x @@ -245,10 +245,13 @@ func sign(tx *txs.Tx, creds []verify.Verifiable, txSigners [][]keychain.Signer) var cred *secp256k1fx.Credential switch credImpl := credIntf.(type) { case *secp256k1fx.Credential: + fxCred.FxID = secp256k1fx.ID cred = credImpl case *nftfx.Credential: + fxCred.FxID = nftfx.ID cred = &credImpl.Credential case *propertyfx.Credential: + fxCred.FxID = propertyfx.ID cred = &credImpl.Credential default: return errUnknownCredentialType diff --git a/wallet/chain/x/wallet.go b/wallet/chain/x/wallet.go index 4e187d58220f..75b3914e199f 100644 --- a/wallet/chain/x/wallet.go +++ b/wallet/chain/x/wallet.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x diff --git a/wallet/chain/x/wallet_with_options.go b/wallet/chain/x/wallet_with_options.go index 810f4e94b32a..d62d02efdd40 100644 --- a/wallet/chain/x/wallet_with_options.go +++ b/wallet/chain/x/wallet_with_options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x diff --git a/wallet/subnet/primary/api.go b/wallet/subnet/primary/api.go index 3ac72c217884..445c518aba25 100644 --- a/wallet/subnet/primary/api.go +++ b/wallet/subnet/primary/api.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package primary diff --git a/wallet/subnet/primary/common/options.go b/wallet/subnet/primary/common/options.go index 0c15be3c8b6b..03cc1c7b5f0a 100644 --- a/wallet/subnet/primary/common/options.go +++ b/wallet/subnet/primary/common/options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/wallet/subnet/primary/common/spend.go b/wallet/subnet/primary/common/spend.go index d7511317c4bd..42c7fc02fc34 100644 --- a/wallet/subnet/primary/common/spend.go +++ b/wallet/subnet/primary/common/spend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/wallet/subnet/primary/common/utxos.go b/wallet/subnet/primary/common/utxos.go index 36a86bc1f126..23762a0dd5d7 100644 --- a/wallet/subnet/primary/common/utxos.go +++ b/wallet/subnet/primary/common/utxos.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/wallet/subnet/primary/example_test.go b/wallet/subnet/primary/example_test.go index 483c049d4ac0..2b8d8b8eeec8 100644 --- a/wallet/subnet/primary/example_test.go +++ b/wallet/subnet/primary/example_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package primary diff --git a/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go b/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go index d5e8ce422307..33695b35f649 100644 --- a/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go +++ b/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main diff --git a/wallet/subnet/primary/examples/add-primary-validator/main.go b/wallet/subnet/primary/examples/add-primary-validator/main.go index a56dae23db3a..987229d1ec22 100644 --- a/wallet/subnet/primary/examples/add-primary-validator/main.go +++ b/wallet/subnet/primary/examples/add-primary-validator/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main diff --git a/wallet/subnet/primary/examples/c-chain-export/main.go b/wallet/subnet/primary/examples/c-chain-export/main.go index fec55c899feb..41ecb5ca814e 100644 --- a/wallet/subnet/primary/examples/c-chain-export/main.go +++ b/wallet/subnet/primary/examples/c-chain-export/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main diff --git a/wallet/subnet/primary/examples/c-chain-import/main.go b/wallet/subnet/primary/examples/c-chain-import/main.go index b4dc4e603eb3..387d435db4df 100644 --- a/wallet/subnet/primary/examples/c-chain-import/main.go +++ b/wallet/subnet/primary/examples/c-chain-import/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main diff --git a/wallet/subnet/primary/examples/create-asset/main.go b/wallet/subnet/primary/examples/create-asset/main.go index 30804f083df6..54015dda239d 100644 --- a/wallet/subnet/primary/examples/create-asset/main.go +++ b/wallet/subnet/primary/examples/create-asset/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main diff --git a/wallet/subnet/primary/examples/create-chain/main.go b/wallet/subnet/primary/examples/create-chain/main.go index 5e6898a1b649..ea98579f6f21 100644 --- a/wallet/subnet/primary/examples/create-chain/main.go +++ b/wallet/subnet/primary/examples/create-chain/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main diff --git a/wallet/subnet/primary/examples/create-locked-stakeable/main.go b/wallet/subnet/primary/examples/create-locked-stakeable/main.go index e688968e9e8a..32cdcf983ba0 100644 --- a/wallet/subnet/primary/examples/create-locked-stakeable/main.go +++ b/wallet/subnet/primary/examples/create-locked-stakeable/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main diff --git a/wallet/subnet/primary/examples/create-subnet/main.go b/wallet/subnet/primary/examples/create-subnet/main.go index add98ea7931c..e471e68f5be9 100644 --- a/wallet/subnet/primary/examples/create-subnet/main.go +++ b/wallet/subnet/primary/examples/create-subnet/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main diff --git a/wallet/subnet/primary/examples/get-p-chain-balance/main.go b/wallet/subnet/primary/examples/get-p-chain-balance/main.go index a19b3d6eae76..fd14eb4ea588 100644 --- a/wallet/subnet/primary/examples/get-p-chain-balance/main.go +++ b/wallet/subnet/primary/examples/get-p-chain-balance/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main diff --git a/wallet/subnet/primary/examples/get-x-chain-balance/main.go b/wallet/subnet/primary/examples/get-x-chain-balance/main.go index a5474f7e1095..c43d4c9dd229 100644 --- a/wallet/subnet/primary/examples/get-x-chain-balance/main.go +++ b/wallet/subnet/primary/examples/get-x-chain-balance/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main diff --git a/wallet/subnet/primary/examples/remove-subnet-validator/main.go b/wallet/subnet/primary/examples/remove-subnet-validator/main.go index 2842c7c0a790..50639943b630 100644 --- a/wallet/subnet/primary/examples/remove-subnet-validator/main.go +++ b/wallet/subnet/primary/examples/remove-subnet-validator/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main diff --git a/wallet/subnet/primary/utxos.go b/wallet/subnet/primary/utxos.go index f8c9ce20a694..71f7629856e1 100644 --- a/wallet/subnet/primary/utxos.go +++ b/wallet/subnet/primary/utxos.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package primary diff --git a/wallet/subnet/primary/wallet.go b/wallet/subnet/primary/wallet.go index 54de390d029c..3bb3e9965684 100644 --- a/wallet/subnet/primary/wallet.go +++ b/wallet/subnet/primary/wallet.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package primary diff --git a/x/archivedb/batch.go b/x/archivedb/batch.go index dc7502fafd2e..720ed6f9d5d3 100644 --- a/x/archivedb/batch.go +++ b/x/archivedb/batch.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package archivedb diff --git a/x/archivedb/db.go b/x/archivedb/db.go index ca638b982cf4..74b658a31736 100644 --- a/x/archivedb/db.go +++ b/x/archivedb/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package archivedb diff --git a/x/archivedb/db_test.go b/x/archivedb/db_test.go index a22b7768c812..2b1fbea2a46f 100644 --- a/x/archivedb/db_test.go +++ b/x/archivedb/db_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package archivedb diff --git a/x/archivedb/key.go b/x/archivedb/key.go index c90b02761402..86a884cb6c0f 100644 --- a/x/archivedb/key.go +++ b/x/archivedb/key.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package archivedb diff --git a/x/archivedb/key_test.go b/x/archivedb/key_test.go index d56dca5f37fc..e5ea0ff3ced3 100644 --- a/x/archivedb/key_test.go +++ b/x/archivedb/key_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package archivedb @@ -21,9 +21,7 @@ func TestNaturalDescSortingForSameKey(t *testing.T) { entry := [][]byte{key0, key1, key2, key3} expected := [][]byte{key3, key2, key1, key0} - slices.SortFunc(entry, func(i, j []byte) bool { - return bytes.Compare(i, j) < 0 - }) + slices.SortFunc(entry, bytes.Compare) require.Equal(t, expected, entry) } @@ -37,9 +35,7 @@ func TestSortingDifferentPrefix(t *testing.T) { entry := [][]byte{key0, key1, key2, key3} expected := [][]byte{key1, key0, key3, key2} - slices.SortFunc(entry, func(i, j []byte) bool { - return bytes.Compare(i, j) < 0 - }) + slices.SortFunc(entry, bytes.Compare) require.Equal(t, expected, entry) } diff --git a/x/archivedb/prefix_test.go b/x/archivedb/prefix_test.go index 8c6362d745f4..8558b592bf99 100644 --- a/x/archivedb/prefix_test.go +++ b/x/archivedb/prefix_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package archivedb diff --git a/x/archivedb/reader.go b/x/archivedb/reader.go index 0186cbc12712..abac3d854741 100644 --- a/x/archivedb/reader.go +++ b/x/archivedb/reader.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package archivedb diff --git a/x/archivedb/value.go b/x/archivedb/value.go index 2f7ff3e1f0f3..5f5861e23f43 100644 --- a/x/archivedb/value.go +++ b/x/archivedb/value.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package archivedb diff --git a/x/merkledb/README.md b/x/merkledb/README.md index 467a60e19b08..29c9f0a73247 100644 --- a/x/merkledb/README.md +++ b/x/merkledb/README.md @@ -1,44 +1,230 @@ -# Path Based Merkelized Radix Trie +# MerkleDB -## TODOs +## Structure -- [ ] Remove special casing around the root node from the physical structure of the hashed tree. -- [ ] Analyze performance of using database snapshots rather than in-memory history -- [ ] Improve intermediate node regeneration after ungraceful shutdown by reusing successfully written subtrees +A _Merkle radix trie_ is a data structure that is both a [Merkle tree](https://en.wikipedia.org/wiki/Merkle_tree) and a [radix trie](https://en.wikipedia.org/wiki/Radix_tree). MerkleDB is an implementation of a persisted key-value store (sometimes just called "a store") using a Merkle radix trie. We sometimes use "Merkle radix trie" and "MerkleDB instance" interchangeably below, but the two are not the same. MerkleDB maintains data in a Merkle radix trie, but not all Merkle radix tries implement a key-value store. + +Like all tries, a MerkleDB instance is composed of nodes. Conceputally, a node has: + * A unique _key_ which identifies its position in the trie. A node's key is a prefix of its childrens' keys. + * A unique _ID_, which is the hash of the node. + * A _children_ array, where each element is the ID of the child at that index. A child at a lower index is to the "left" of children at higher indices. + * An optional value. If a node has a value, then the node's key maps to its value in the key-value store. Otherwise the key isn't present in the store. + +and looks like this: +``` +Node ++--------------------------------------------+ +| ID: 32 bytes | +| Key: ? bytes | +| Value: Some(value) | None | +| Children: | +| 0: Some(child0ID) | None | +| 1: Some(child2ID) | None | +| ... | +| BranchFactor-1: Some(child15ID) | None | ++--------------------------------------------+ +``` + +This conceptual picture differs slightly from the implementation of the `node` in MerkleDB but is still useful in understanding how MerkleDB works. + +## Root IDs and Revisions + +The ID of the root node is called the _root ID_, or sometimes just the _root_ of the trie. If any node in a MerkleDB instance changes, the root ID will change. This follows from the fact that changing a node changes its ID, which changes its parent's reference to it, which changes the parent, which changes the parent's ID, and so on until the root. + +The root ID also serves as a unique identifier of a given state; instances with the same key-value mappings always have the same root ID, and instances with different key-value mappings always have different root IDs. We call a state with a given root ID a _revision_, and we sometimes say that a MerkleDB instance is "at" a given revision or root ID. The two are equivalent. + +## Views + +A _view_ is a proposal to modify a MerkleDB. If a view is _committed_, its changes are written to the MerkleDB. It can be queried, and when it is, it returns the state that the MerkleDB will contain if the view is committed. A view is immutable after creation. Namely, none of its key-value pairs can be modified. + +A view can be built atop the MerkleDB itself, or it can be built atop another view. Views can be chained together. For example, we might have: + +``` + db + / \ +view1 view2 + | +view3 +``` + +where `view1` and `view2` are built atop MerkleDB instance `db` and `view3` is built atop `view1`. Equivalently, we say that `db` is the parent of `view1` and `view2`, and `view3` is a child of `view1`. `view1` and `view2` are _siblings_. + +`view1` contains all the key-value pairs in `db`, except those modified by `view1`. That is, if `db` has key-value pair `(k,v)`, and `view1` doesn't modify that pair, then `view1` will return `v` when queried for the value of `k`. If `db` has `(k,v)` but `view1` modifies the pair to `(k, v')` then it will return `v'` when queried for the value of `k`. Similar for `view2`. + +`view3` has all of the key-value pairs as `view1`, except those modified in `view3`. That is, it has the state after the changes in `view1` are applied to `db`, followed by those in `view3`. -## Introduction +A view can be committed only if its parent is the MerkleDB (and not another view). A view can only be committed once. In the above diagram, `view3` can't be committed until `view1` is committed. -The Merkle Trie is a data structure that allows efficient and secure verification of the contents. It is a combination of a [Merkle Tree](https://en.wikipedia.org/wiki/Merkle_tree) and a [Radix Trie](https://en.wikipedia.org/wiki/Radix_tree). +When a view is created, we don't apply changes to the trie's structure or calculate the new IDs of nodes because this requires expensive hashing. Instead, we lazily apply changes and calculate node IDs (including the root ID) when necessary. -The trie contains `Merkle Nodes`, which store key/value and children information. +### Validity + +When a view is committed, its siblings and all of their descendants are _invalidated_. An invalid view can't be read or committed. Method calls on it will return `ErrInvalid`. + +In the diagram above, if `view1` were committed, `view2` would be invalidated. It `view2` were committed, `view1` and `view3` would be invalidated. + +## Proofs + +### Simple Proofs + +MerkleDB instances can produce _merkle proofs_, sometimes just called "proofs." A merkle proof uses cryptography to prove that a given key-value pair is or isn't in the key-value store with a given root. That is, a MerkleDB instance with root ID `r` can create a proof that shows that it has a key-value pair `(k,v)`, or that `k` is not present. + +Proofs can be useful as a client fetching data in a Byzantine environment. Suppose there are one or more servers, which may be Byzantine, serving a distirbuted key-value store using MerkleDB, and a client that wants to retrieve key-value pairs. Suppose also that the client can learn a "trusted" root ID, perhaps because it's posted on a blockchain. The client can request a key-value pair from a server, and use the returned proof to verify that the returned key-value pair is actually in the key-value store with (or isn't, as it were.) + +```mermaid +flowchart TD + A[Client] -->|"ProofRequest(k,r)"| B(Server) + B --> |"Proof(k,r)"| C(Client) + C --> |Proof Valid| D(Client trusts key-value pair from proof) + C --> |Proof Invalid| E(Client doesn't trust key-value pair from proof) +``` + +`ProofRequest(k,r)` is a request for the value that `k` maps to in the MerkleDB instance with root `r` and a proof for that data's correctness. -Each `Merkle Node` represents a key path into the trie. It stores the key, the value (if one exists), its ID, and the IDs of its children nodes. The children have keys that contain the current node's key path as a prefix, and the index of each child indicates the next nibble in that child's key. For example, if we have two nodes, Node 1 with key path `0x91A` and Node 2 with key path `0x91A4`, Node 2 is stored in index `0x4` of Node 1's children (since 0x4 is the first value after the common prefix). +`Proof(k,r)` is a proof that purports to show either that key-value pair `(k,v)` exists in the revision at `r`, or that `k` isn't in the revision. -To reduce the depth of nodes in the trie, a `Merkle Node` utilizes path compression. Instead of having a long chain of nodes each containing only a single nibble of the key, we can "compress" the path by recording additional key information with each of a node's children. For example, if we have three nodes, Node 1 with key path `0x91A`, Node 2 with key path `0x91A4`, and Node 3 with key path `0x91A5132`, then Node 1 has a key of `0x91A`. Node 2 is stored at index `0x4` of Node 1's children since `4` is the next nibble in Node 2's key after skipping the common nibbles from Node 1's key. Node 3 is stored at index `0x5` of Node 1's children. Rather than have extra nodes for the remainder of Node 3's key, we instead store the rest of the key (`132`) in Node 1's children info. +#### Verification +A proof is represented as: + +```go +type Proof struct { + // Nodes in the proof path from root --> target key + // (or node that would be where key is if it doesn't exist). + // Always contains at least the root. + Path []ProofNode + + // This is a proof that [key] exists/doesn't exist. + Key Key + + // Nothing if [Key] isn't in the trie. + // Otherwise, the value corresponding to [Key]. + Value maybe.Maybe[[]byte] +} + +type ProofNode struct { + Key Key + // Nothing if this is an intermediate node. + // The value in this node if its length < [HashLen]. + // The hash of the value in this node otherwise. + ValueOrHash maybe.Maybe[[]byte] + Children map[byte]ids.ID +} ``` -+-----------------------------------+ -| Merkle Node | -| | -| ID: 0x0131 | an id representing the current node, derived from the node's value and all children ids -| Key: 0x91 | prefix of the key, representing the location of the node in the trie -| Value: 0x00 | the value, if one exists, that is stored at the key (keyPrefix + compressedKey) -| Children: | a map of children node ids for any nodes in the trie that have this node's key as a prefix -| 0: [:0x00542F] | child 0 represents a node with key 0x910 with ID 0x00542F -| 1: [0x432:0xA0561C] | child 1 represents a node with key 0x911432 with ID 0xA0561C -| ... | -| 15: [0x9A67B:0x02FB093] | child 15 represents a node with key 0x91F9A67B with ID 0x02FB093 -+-----------------------------------+ + +For an inclusion proof, the last node in `Path` should be the one containing `Key`. +For an exclusion proof, the last node is either: +* The node that would be the parent of `Key`, if such node has no child at the index `Key` would be at. +* The node at the same child index `Key` would be at, otherwise. + +In other words, the last node of a proof says either, "the key is in the trie, and this node contains it," or, "the key isn't in the trie, and this node's existence precludes the existence of the key." + +The prover can't simply trust that such a node exists, though. It has to verify this. The prover creates an empty trie and inserts the nodes in `Path`. If the root ID of this trie matches the `r`, the verifier can trust that the last node really does exist in the trie. If the last node _didn't_ really exist, the proof creator couldn't create `Path` such that its nodes both imply the existence of the ("fake") last node and also result in the correct root ID. This follows from the one-way property of hashing. + +### Range Proofs + +MerkleDB instances can also produce _range proofs_. A range proof proves that a contiguous set of key-value pairs is or isn't in the key-value store with a given root. This is similar to the merkle proofs described above, except for multiple key-value pairs. + +```mermaid +flowchart TD + A[Client] -->|"RangeProofRequest(start,end,r)"| B(Server) + B --> |"RangeProof(start,end,r)"| C(Client) + C --> |Proof Valid| D(Client trusts key-value pairs) + C --> |Proof Invalid| E(Client doesn't trust key-value pairs) ``` +`RangeProofRequest(start,end,r)` is a request for all of the key-value pairs, in order, between keys `start` and `end` at revision `r`. + +`RangeProof(start,end,r)` contains a list of key-value pairs `kvs`, sorted by increasing key. It purports to show that, at revision `r`: +* Each element of `kvs` is a key-value pair in the store. +* There are no keys at/after `start` but before the first key in `kvs`. +* For adjacent key-value pairs `(k1,v1)` and `(k2,v2)` in `kvs`, there doesn't exist a key-value pair `(k3,v3)` in the store such that `k1 < k3 < k2`. In other words, `kvs` is a contiguous set of key-value pairs. + +Clients can use range proofs to efficiently download many key-value pairs at a time from a MerkleDB instance, as opposed to getting a proof for each key-value pair individually. + +#### Verification + +Like simple proofs, range proofs can be verified without any additional context or knowledge of the contents of the key-value store. + +A range proof is represented as: + +```go +type RangeProof struct { + // Invariant: At least one of [StartProof], [EndProof], [KeyValues] is non-empty. + + // A proof that the smallest key in the requested range does/doesn't exist. + // Note that this may not be an entire proof -- nodes are omitted if + // they are also in [EndProof]. + StartProof []ProofNode + + // If no upper range bound was given and [KeyValues] is empty, this is empty. + // + // If no upper range bound was given and [KeyValues] is non-empty, this is + // a proof for the largest key in [KeyValues]. + // + // Otherwise this is a proof for the upper range bound. + EndProof []ProofNode + + // This proof proves that the key-value pairs in [KeyValues] are in the trie. + // Sorted by increasing key. + KeyValues []KeyValue +} +``` + +The prover creates an empty trie and adds to it all of the key-value pairs in `KeyValues`. + +Then, it inserts: +* The nodes in `StartProof` +* The nodes in `EndProof` + +For each node in `StartProof`, the prover only populates `Children` entries whose key is before `start`. +For each node in `EndProof`, it populates only `Children` entries whose key is after `end`, where `end` is the largest key proven by the range proof. + +Then, it calculates the root ID of this trie and compares it to the expected one. + +If the proof: +* Omits any key-values in the range +* Includes additional key-values that aren't really in the range +* Provides an incorrect value for a key in the range + +then the actual root ID won't match the expected root ID. + +Like simple proofs, range proof verification relies on the fact that the proof generator can't forge data such that it results in a trie with both incorrect data and the correct root ID. + +### Change Proofs + +Finally, MerkleDB instances can produce and verify _change proofs_. A change proof proves that a set of key-value changes were applied to a MerkleDB instance in the process of changing its root from `r` to `r'`. For example, suppose there's an instance with root `r` + +```mermaid +flowchart TD + A[Client] -->|"ChangeProofRequest(start,end,r,r')"| B(Server) + B --> |"ChangeProof(start,end,r,r')"| C(Client) + C --> |Proof Valid| D(Client trusts key-value pair changes) + C --> |Proof Invalid| E(Client doesn't trust key-value changes) +``` + +`ChangeProofRequest(start,end,r,r')` is a request for all key-value pairs, in order, between keys `start` and `end`, that occurred after the root of was `r` and before the root was `r'`. + +`ChangeProof(start,end,r,r')` contains a set of key-value pairs `kvs`. It purports to show that: +* Each element of `kvs` is a key-value pair in the at revision `r'` but not at revision `r`. +* There are no key-value changes between `r` and `r'` such that the key is at/after `start` but before the first key in `kvs`. +* For adjacent key-value changes `(k1,v1)` and `(k2,v2)` in `kvs`, there doesn't exist a key-value change `(k3,v3)` between `r` and `r'` such that `k1 < k3 < k2`. In other words, `kvs` is a contiguous set of key-value changes. + +Change proofs are useful for applying changes between revisions. For example, suppose a client has a MerkleDB instance at revision `r`. The client learns that the state has been updated and that the new root is `r'`. The client can request a change proof from a server at revision `r'`, and apply the changes in the change proof to change its state from `r` to `r'`. Note that `r` and `r'` need not be "consecutive" revisions. For example, it's possible that the state goes from revision `r` to `r1` to `r2` to `r'`. The client apply changes to get directly from `r` to `r'`, without ever needing to be at revision `r1` or `r2`. + +#### Verification + +Unlike simple proofs and range proofs, change proofs require additional context to verify. Namely, the prover must have the trie at the start root `r`. + +The verification algorithm is similar to range proofs, except that instead of inserting the key-value changes, start proof and end proof into an empty trie, they are added to the trie at revision `r`. + ## Serialization ### Node -Nodes are persisted in an underlying database. In order to persist nodes, we must first serialize them. -Serialization is done by the `encoder` interface defined in `codec.go`. +Nodes are persisted in an underlying database. In order to persist nodes, we must first serialize them. Serialization is done by the `encoder` interface defined in `codec.go`. -The node serialization format is as follows: +The node serialization format is: ``` +----------------------------------------------------+ @@ -76,8 +262,8 @@ The node serialization format is as follows: Where: * `Value existence flag` is `1` if this node has a value, otherwise `0`. -* `Value length` is the length of the value, if it exists (i.e. if `Value existince flag` is `1`.) Otherwise not serialized. -* `Value` is the value, if it exists (i.e. if `Value existince flag` is `1`.) Otherwise not serialized. +* `Value length` is the length of the value, if it exists (i.e. if `Value existence flag` is `1`.) Otherwise not serialized. +* `Value` is the value, if it exists (i.e. if `Value existence flag` is `1`.) Otherwise not serialized. * `Number of children` is the number of children this node has. * `Child index` is the index of a child node within the list of the node's children. * `Child compressed key length` is the length of the child node's compressed key. @@ -91,9 +277,9 @@ For each child of the node, we have an additional: +----------------------------------------------------+ | Child index (varint) | +----------------------------------------------------+ -| Child compressed key length (varint) | +| Child compressed key length (varint) | +----------------------------------------------------+ -| Child compressed key (variable length bytes) | +| Child compressed key (variable length bytes) | +----------------------------------------------------+ | Child ID (32 bytes) | +----------------------------------------------------+ @@ -134,10 +320,10 @@ The second is at child index `14`, has compressed key `0x0F0F0F` and ID (in hex) | Child index (varint) | | 0x00 | +--------------------------------------------------------------------+ -| Child compressed key length (varint) | +| Child compressed key length (varint) | | 0x02 | +--------------------------------------------------------------------+ -| Child compressed key (variable length bytes) | +| Child compressed key (variable length bytes) | | 0x10 | +--------------------------------------------------------------------+ | Child ID (32 bytes) | @@ -146,10 +332,10 @@ The second is at child index `14`, has compressed key `0x0F0F0F` and ID (in hex) | Child index (varint) | | 0x0E | +--------------------------------------------------------------------+ -| Child compressed key length (varint) | +| Child compressed key length (varint) | | 0x06 | +--------------------------------------------------------------------+ -| Child compressed key (variable length bytes) | +| Child compressed key (variable length bytes) | | 0xFFF0 | +--------------------------------------------------------------------+ | Child ID (32 bytes) | @@ -164,6 +350,13 @@ Each node must have a unique ID that identifies it. This ID is calculated by has * The node's value digest * The node's key +The node's value digest is: +* Nothing, if the node has no value +* The node's value, if it has a value < 32 bytes +* The hash of the node's value otherwise + +We use the node's value digest rather than its value when hashing so that when we send proofs, each `ProofNode` doesn't need to contain the node's value, which could be very large. By using the value digest, we allow a proof verifier to calculate a node's ID while limiting the size of the data sent to the verifier. + Specifically, we encode these values in the following way: ``` @@ -197,8 +390,8 @@ Where: * `Child index` is the index of a child node within the list of the node's children. * `Child ID` is the child node's ID. * `Value existence flag` is `1` if this node has a value, otherwise `0`. -* `Value length` is the length of the value, if it exists (i.e. if `Value existince flag` is `1`.) Otherwise not serialized. -* `Value` is the value, if it exists (i.e. if `Value existince flag` is `1`.) Otherwise not serialized. +* `Value length` is the length of the value, if it exists (i.e. if `Value existence flag` is `1`.) Otherwise not serialized. +* `Value` is the value, if it exists (i.e. if `Value existence flag` is `1`.) Otherwise not serialized. * `Key length` is the number of nibbles in this node's key. * `Key` is the node's key. @@ -216,39 +409,40 @@ Bytes are encoded by simply copying them onto the buffer. ## Design choices ### []byte copying -Nodes contain a []byte which represents its value. This slice should never be edited internally. This allows usage without having to make copies of it for safety. -Anytime these values leave the library, for example in `Get`, `GetValue`, `GetProof`, `GetRangeProof`, etc, they need to be copied into a new slice to prevent -edits made outside the library from being reflected in the DB/TrieViews. + +A node may contain a value, which is represented in Go as a `[]byte`. This slice is never edited, allowing it to be used without copying it first in many places. When a value leaves the library, for example when returned in `Get`, `GetValue`, `GetProof`, `GetRangeProof`, etc., the value is copied to prevent edits made outside the library from being reflected in the database. ### Split Node Storage -The nodes are stored under two different prefixes depending on if the node contains a value. -If it does contain a value it is stored within the ValueNodeDB and if it doesn't it is stored in the IntermediateNodeDB. -By splitting the nodes up by value, it allows better key/value iteration and a more compact key format. -### Single node type +Nodes with values ("value nodes") are persisted under one database prefix, while nodes without values ("intermediate nodes") are persisted under another database prefix. This separation allows for easy iteration over all key-value pairs in the database, as this is simply iterating over the database prefix containing value nodes. -A `Merkle Node` holds the IDs of its children, its value, as well as any key extension. This simplifies some logic and allows all of the data about a node to be loaded in a single database read. This trades off a small amount of storage efficiency (some fields may be `nil` but are still stored for every node). +### Single Node Type -### Validity +MerkleDB uses one type to represent nodes, rather than having multiple types (e.g. branch nodes, value nodes, extension nodes) as other Merkle Trie implementations do. -A `trieView` is built atop another trie, and there may be other `trieView`s built atop the same trie. We call these *siblings*. If one sibling is committed to database, we *invalidate* all other siblings and their descendants. Operations on an invalid trie return `ErrInvalid`. The children of the committed `trieView` are updated so that their new `parentTrie` is the database. +Not using extension nodes results in worse storage efficiency (some nodes may have mostly empty children) but simpler code. ### Locking `merkleDB` has a `RWMutex` named `lock`. Its read operations don't store data in a map, so a read lock suffices for read operations. `merkleDB` has a `Mutex` named `commitLock`. It enforces that only a single view/batch is attempting to commit to the database at one time. `lock` is insufficient because there is a period of view preparation where read access should still be allowed, followed by a period where a full write lock is needed. The `commitLock` ensures that only a single goroutine makes the transition from read => write. -A `trieView` is built atop another trie, which may be the underlying `merkleDB` or another `trieView`. +A `view` is built atop another trie, which may be the underlying `merkleDB` or another `view`. We use locking to guarantee atomicity/consistency of trie operations. -`trieView` has a `RWMutex` named `commitLock` which ensures that we don't create a view atop the `trieView` while it's being committed. -It also has a `RWMutex` named `validityTrackingLock` that is held during methods that change the view's validity, tracking of child views' validity, or of the `trieView` parent trie. This lock ensures that writing/reading from `trieView` or any of its descendants is safe. -The `CommitToDB` method grabs the `merkleDB`'s `commitLock`. This is the only `trieView` method that modifies the underlying `merkleDB`. +`view` has a `RWMutex` named `commitLock` which ensures that we don't create a view atop the `view` while it's being committed. +It also has a `RWMutex` named `validityTrackingLock` that is held during methods that change the view's validity, tracking of child views' validity, or of the `view` parent trie. This lock ensures that writing/reading from `view` or any of its descendants is safe. +The `CommitToDB` method grabs the `merkleDB`'s `commitLock`. This is the only `view` method that modifies the underlying `merkleDB`. -In some of `merkleDB`'s methods, we create a `trieView` and call unexported methods on it without locking it. +In some of `merkleDB`'s methods, we create a `view` and call unexported methods on it without locking it. We do so because the exported counterpart of the method read locks the `merkleDB`, which is already locked. This pattern is safe because the `merkleDB` is locked, so no data under the view is changing, and nobody else has a reference to the view, so there can't be any concurrent access. -To prevent deadlocks, `trieView` and `merkleDB` never acquire the `commitLock` of descendant views. +To prevent deadlocks, `view` and `merkleDB` never acquire the `commitLock` of descendant views. That is, locking is always done from a view toward to the underlying `merkleDB`, never the other way around. The `validityTrackingLock` goes the opposite way. A view can lock the `validityTrackingLock` of its children, but not its ancestors. Because of this, any function that takes the `validityTrackingLock` must not take the `commitLock` as this may cause a deadlock. Keeping `commitLock` solely in the ancestor direction and `validityTrackingLock` solely in the descendant direction prevents deadlocks from occurring. + +## TODOs + +- [ ] Analyze performance of using database snapshots rather than in-memory history +- [ ] Improve intermediate node regeneration after ungraceful shutdown by reusing successfully written subtrees diff --git a/x/merkledb/batch.go b/x/merkledb/batch.go index 82ff3533aaaf..033200409b79 100644 --- a/x/merkledb/batch.go +++ b/x/merkledb/batch.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb diff --git a/x/merkledb/cache.go b/x/merkledb/cache.go index 57d674ed63ef..ee2e7f0b2713 100644 --- a/x/merkledb/cache.go +++ b/x/merkledb/cache.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -25,6 +25,7 @@ type onEvictCache[K comparable, V any] struct { onEviction func(K, V) error } +// [size] must always return a positive number. func newOnEvictCache[K comparable, V any]( maxSize int, size func(K, V) int, @@ -48,7 +49,7 @@ func (c *onEvictCache[K, V]) Get(key K) (V, bool) { // Put an element into this cache. If this causes an element // to be evicted, calls [c.onEviction] on the evicted element -// and returns the error from [c.onEviction]. Otherwise returns nil. +// and returns the error from [c.onEviction]. Otherwise, returns nil. func (c *onEvictCache[K, V]) Put(key K, value V) error { c.lock.Lock() defer c.lock.Unlock() diff --git a/x/merkledb/cache_test.go b/x/merkledb/cache_test.go index e0939df9451d..9883c23af38c 100644 --- a/x/merkledb/cache_test.go +++ b/x/merkledb/cache_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb diff --git a/x/merkledb/codec.go b/x/merkledb/codec.go index e7ef1eddb7f5..eae205631192 100644 --- a/x/merkledb/codec.go +++ b/x/merkledb/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -9,6 +9,7 @@ import ( "errors" "io" "math" + "math/bits" "sync" "golang.org/x/exp/maps" @@ -29,11 +30,8 @@ const ( minDBNodeLen = minMaybeByteSliceLen + minVarIntLen minChildLen = minVarIntLen + minKeyLen + ids.IDLen + boolLen - estimatedKeyLen = 64 - estimatedValueLen = 64 - estimatedCompressedKeyLen = 8 - // Child index, child compressed key, child ID, child has value - estimatedNodeChildLen = minVarIntLen + estimatedCompressedKeyLen + ids.IDLen + boolLen + estimatedKeyLen = 64 + estimatedValueLen = 64 // Child index, child ID hashValuesChildLen = minVarIntLen + ids.IDLen ) @@ -44,7 +42,6 @@ var ( trueBytes = []byte{trueByte} falseBytes = []byte{falseByte} - errTooManyChildren = errors.New("length of children list is larger than branching factor") errChildIndexTooLarge = errors.New("invalid child index. Must be less than branching factor") errLeadingZeroes = errors.New("varint has leading zeroes") errInvalidBool = errors.New("decoded bool is neither true nor false") @@ -63,13 +60,18 @@ type encoderDecoder interface { type encoder interface { // Assumes [n] is non-nil. encodeDBNode(n *dbNode) []byte - // Assumes [hv] is non-nil. - encodeHashValues(hv *hashValues) []byte + encodedDBNodeSize(n *dbNode) int + + // Returns the bytes that will be hashed to generate [n]'s ID. + // Assumes [n] is non-nil. + encodeHashValues(n *node) []byte + encodeKey(key Key) []byte } type decoder interface { // Assumes [n] is non-nil. - decodeDBNode(bytes []byte, n *dbNode, factor BranchFactor) error + decodeDBNode(bytes []byte, n *dbNode) error + decodeKey(bytes []byte) (Key, error) } func newCodec() encoderDecoder { @@ -82,7 +84,7 @@ func newCodec() encoderDecoder { } } -// Note that bytes.Buffer.Write always returns nil so we +// Note that bytes.Buffer.Write always returns nil, so we // can ignore its return values in [codecImpl] methods. type codecImpl struct { // Invariant: Every byte slice returned by [varIntPool] has @@ -90,16 +92,47 @@ type codecImpl struct { varIntPool sync.Pool } -func (c *codecImpl) encodeDBNode(n *dbNode) []byte { - var ( - numChildren = len(n.children) - // Estimate size of [n] to prevent memory allocations - estimatedLen = estimatedValueLen + minVarIntLen + estimatedNodeChildLen*numChildren - buf = bytes.NewBuffer(make([]byte, 0, estimatedLen)) - ) +func (c *codecImpl) childSize(index byte, childEntry *child) int { + // * index + // * child ID + // * child key + // * bool indicating whether the child has a value + return c.uintSize(uint64(index)) + ids.IDLen + c.keySize(childEntry.compressedKey) + boolLen +} +// based on the current implementation of codecImpl.encodeUint which uses binary.PutUvarint +func (*codecImpl) uintSize(value uint64) int { + if value == 0 { + return 1 + } + return (bits.Len64(value) + 6) / 7 +} + +func (c *codecImpl) keySize(p Key) int { + return c.uintSize(uint64(p.length)) + bytesNeeded(p.length) +} + +func (c *codecImpl) encodedDBNodeSize(n *dbNode) int { + // * number of children + // * bool indicating whether [n] has a value + // * the value (optional) + // * children + size := c.uintSize(uint64(len(n.children))) + boolLen + if n.value.HasValue() { + valueLen := len(n.value.Value()) + size += c.uintSize(uint64(valueLen)) + valueLen + } + // for each non-nil entry, we add the additional size of the child entry + for index, entry := range n.children { + size += c.childSize(index, entry) + } + return size +} + +func (c *codecImpl) encodeDBNode(n *dbNode) []byte { + buf := bytes.NewBuffer(make([]byte, 0, c.encodedDBNodeSize(n))) c.encodeMaybeByteSlice(buf, n.value) - c.encodeUint(buf, uint64(numChildren)) + c.encodeUint(buf, uint64(len(n.children))) // Note we insert children in order of increasing index // for determinism. keys := maps.Keys(n.children) @@ -107,16 +140,16 @@ func (c *codecImpl) encodeDBNode(n *dbNode) []byte { for _, index := range keys { entry := n.children[index] c.encodeUint(buf, uint64(index)) - c.encodeKey(buf, entry.compressedKey) + c.encodeKeyToBuffer(buf, entry.compressedKey) _, _ = buf.Write(entry.id[:]) c.encodeBool(buf, entry.hasValue) } return buf.Bytes() } -func (c *codecImpl) encodeHashValues(hv *hashValues) []byte { +func (c *codecImpl) encodeHashValues(n *node) []byte { var ( - numChildren = len(hv.Children) + numChildren = len(n.children) // Estimate size [hv] to prevent memory allocations estimatedLen = minVarIntLen + numChildren*hashValuesChildLen + estimatedValueLen + estimatedKeyLen buf = bytes.NewBuffer(make([]byte, 0, estimatedLen)) @@ -125,19 +158,20 @@ func (c *codecImpl) encodeHashValues(hv *hashValues) []byte { c.encodeUint(buf, uint64(numChildren)) // ensure that the order of entries is consistent - for index := 0; BranchFactor(index) < hv.Key.branchFactor; index++ { - if entry, ok := hv.Children[byte(index)]; ok { - c.encodeUint(buf, uint64(index)) - _, _ = buf.Write(entry.id[:]) - } + keys := maps.Keys(n.children) + slices.Sort(keys) + for _, index := range keys { + entry := n.children[index] + c.encodeUint(buf, uint64(index)) + _, _ = buf.Write(entry.id[:]) } - c.encodeMaybeByteSlice(buf, hv.Value) - c.encodeKey(buf, hv.Key) + c.encodeMaybeByteSlice(buf, n.valueDigest) + c.encodeKeyToBuffer(buf, n.key) return buf.Bytes() } -func (c *codecImpl) decodeDBNode(b []byte, n *dbNode, branchFactor BranchFactor) error { +func (c *codecImpl) decodeDBNode(b []byte, n *dbNode) error { if minDBNodeLen > len(b) { return io.ErrUnexpectedEOF } @@ -154,25 +188,23 @@ func (c *codecImpl) decodeDBNode(b []byte, n *dbNode, branchFactor BranchFactor) switch { case err != nil: return err - case numChildren > uint64(branchFactor): - return errTooManyChildren case numChildren > uint64(src.Len()/minChildLen): return io.ErrUnexpectedEOF } - n.children = make(map[byte]child, branchFactor) + n.children = make(map[byte]*child, numChildren) var previousChild uint64 for i := uint64(0); i < numChildren; i++ { index, err := c.decodeUint(src) if err != nil { return err } - if index >= uint64(branchFactor) || (i != 0 && index <= previousChild) { + if (i != 0 && index <= previousChild) || index > math.MaxUint8 { return errChildIndexTooLarge } previousChild = index - compressedKey, err := c.decodeKey(src, branchFactor) + compressedKey, err := c.decodeKeyFromReader(src) if err != nil { return err } @@ -184,7 +216,7 @@ func (c *codecImpl) decodeDBNode(b []byte, n *dbNode, branchFactor BranchFactor) if err != nil { return err } - n.children[byte(index)] = child{ + n.children[byte(index)] = &child{ compressedKey: compressedKey, id: childID, hasValue: hasValue, @@ -277,12 +309,12 @@ func (c *codecImpl) decodeMaybeByteSlice(src *bytes.Reader) (maybe.Maybe[[]byte] return maybe.Nothing[[]byte](), err } - bytes, err := c.decodeByteSlice(src) + rawBytes, err := c.decodeByteSlice(src) if err != nil { return maybe.Nothing[[]byte](), err } - return maybe.Some(bytes), nil + return maybe.Some(rawBytes), nil } func (c *codecImpl) decodeByteSlice(src *bytes.Reader) ([]byte, error) { @@ -330,12 +362,31 @@ func (*codecImpl) decodeID(src *bytes.Reader) (ids.ID, error) { return id, err } -func (c *codecImpl) encodeKey(dst *bytes.Buffer, key Key) { - c.encodeUint(dst, uint64(key.tokenLength)) +func (c *codecImpl) encodeKey(key Key) []byte { + estimatedLen := binary.MaxVarintLen64 + len(key.Bytes()) + dst := bytes.NewBuffer(make([]byte, 0, estimatedLen)) + c.encodeKeyToBuffer(dst, key) + return dst.Bytes() +} + +func (c *codecImpl) encodeKeyToBuffer(dst *bytes.Buffer, key Key) { + c.encodeUint(dst, uint64(key.length)) _, _ = dst.Write(key.Bytes()) } -func (c *codecImpl) decodeKey(src *bytes.Reader, branchFactor BranchFactor) (Key, error) { +func (c *codecImpl) decodeKey(b []byte) (Key, error) { + src := bytes.NewReader(b) + key, err := c.decodeKeyFromReader(src) + if err != nil { + return Key{}, err + } + if src.Len() != 0 { + return Key{}, errExtraSpace + } + return key, err +} + +func (c *codecImpl) decodeKeyFromReader(src *bytes.Reader) (Key, error) { if minKeyLen > src.Len() { return Key{}, io.ErrUnexpectedEOF } @@ -347,9 +398,10 @@ func (c *codecImpl) decodeKey(src *bytes.Reader, branchFactor BranchFactor) (Key if length > math.MaxInt { return Key{}, errIntOverflow } - result := emptyKey(branchFactor) - result.tokenLength = int(length) - keyBytesLen := result.bytesNeeded(result.tokenLength) + result := Key{ + length: int(length), + } + keyBytesLen := bytesNeeded(result.length) if keyBytesLen > src.Len() { return Key{}, io.ErrUnexpectedEOF } @@ -363,8 +415,8 @@ func (c *codecImpl) decodeKey(src *bytes.Reader, branchFactor BranchFactor) (Key if result.hasPartialByte() { // Confirm that the padding bits in the partial byte are 0. // We want to only look at the bits to the right of the last token, which is at index length-1. - // Generate a mask with (8-bitsToShift) 0s followed by bitsToShift 1s. - paddingMask := byte(0xFF >> (8 - result.bitsToShift(result.tokenLength-1))) + // Generate a mask where the (result.length % 8) left bits are 0. + paddingMask := byte(0xFF >> (result.length % 8)) if buffer[keyBytesLen-1]&paddingMask != 0 { return Key{}, errNonZeroKeyPadding } diff --git a/x/merkledb/codec_test.go b/x/merkledb/codec_test.go index cb83e1ce582c..455b75e1bed1 100644 --- a/x/merkledb/codec_test.go +++ b/x/merkledb/codec_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -80,24 +80,15 @@ func FuzzCodecKey(f *testing.F) { b []byte, ) { require := require.New(t) - for _, branchFactor := range branchFactors { - codec := codec.(*codecImpl) - reader := bytes.NewReader(b) - startLen := reader.Len() - got, err := codec.decodeKey(reader, branchFactor) - if err != nil { - t.SkipNow() - } - endLen := reader.Len() - numRead := startLen - endLen - - // Encoding [got] should be the same as [b]. - var buf bytes.Buffer - codec.encodeKey(&buf, got) - bufBytes := buf.Bytes() - require.Len(bufBytes, numRead) - require.Equal(b[:numRead], bufBytes) + codec := codec.(*codecImpl) + got, err := codec.decodeKey(b) + if err != nil { + t.SkipNow() } + + // Encoding [got] should be the same as [b]. + gotBytes := codec.encodeKey(got) + require.Equal(b, gotBytes) }, ) } @@ -109,17 +100,15 @@ func FuzzCodecDBNodeCanonical(f *testing.F) { b []byte, ) { require := require.New(t) - for _, branchFactor := range branchFactors { - codec := codec.(*codecImpl) - node := &dbNode{} - if err := codec.decodeDBNode(b, node, branchFactor); err != nil { - t.SkipNow() - } - - // Encoding [node] should be the same as [b]. - buf := codec.encodeDBNode(node) - require.Equal(b, buf) + codec := codec.(*codecImpl) + node := &dbNode{} + if err := codec.decodeDBNode(b, node); err != nil { + t.SkipNow() } + + // Encoding [node] should be the same as [b]. + buf := codec.encodeDBNode(node) + require.Equal(b, buf) }, ) } @@ -133,7 +122,7 @@ func FuzzCodecDBNodeDeterministic(f *testing.F) { valueBytes []byte, ) { require := require.New(t) - for _, branchFactor := range branchFactors { + for _, bf := range validBranchFactors { r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 value := maybe.Nothing[[]byte]() @@ -148,9 +137,9 @@ func FuzzCodecDBNodeDeterministic(f *testing.F) { value = maybe.Some(valueBytes) } - numChildren := r.Intn(int(branchFactor)) // #nosec G404 + numChildren := r.Intn(int(bf)) // #nosec G404 - children := map[byte]child{} + children := map[byte]*child{} for i := 0; i < numChildren; i++ { var childID ids.ID _, _ = r.Read(childID[:]) // #nosec G404 @@ -158,8 +147,8 @@ func FuzzCodecDBNodeDeterministic(f *testing.F) { childKeyBytes := make([]byte, r.Intn(32)) // #nosec G404 _, _ = r.Read(childKeyBytes) // #nosec G404 - children[byte(i)] = child{ - compressedKey: ToKey(childKeyBytes, branchFactor), + children[byte(i)] = &child{ + compressedKey: ToKey(childKeyBytes), id: childID, } } @@ -169,9 +158,9 @@ func FuzzCodecDBNodeDeterministic(f *testing.F) { } nodeBytes := codec.encodeDBNode(&node) - + require.Len(nodeBytes, codec.encodedDBNodeSize(&node)) var gotNode dbNode - require.NoError(codec.decodeDBNode(nodeBytes, &gotNode, branchFactor)) + require.NoError(codec.decodeDBNode(nodeBytes, &gotNode)) require.Equal(node, gotNode) nodeBytes2 := codec.encodeDBNode(&gotNode) @@ -181,31 +170,15 @@ func FuzzCodecDBNodeDeterministic(f *testing.F) { ) } -func TestCodecDecodeDBNode(t *testing.T) { +func TestCodecDecodeDBNode_TooShort(t *testing.T) { require := require.New(t) var ( parsedDBNode dbNode tooShortBytes = make([]byte, minDBNodeLen-1) ) - err := codec.decodeDBNode(tooShortBytes, &parsedDBNode, BranchFactor16) + err := codec.decodeDBNode(tooShortBytes, &parsedDBNode) require.ErrorIs(err, io.ErrUnexpectedEOF) - - proof := dbNode{ - value: maybe.Some([]byte{1}), - children: map[byte]child{}, - } - - nodeBytes := codec.encodeDBNode(&proof) - // Remove num children (0) from end - nodeBytes = nodeBytes[:len(nodeBytes)-minVarIntLen] - proofBytesBuf := bytes.NewBuffer(nodeBytes) - - // Put num children > branch factor - codec.(*codecImpl).encodeUint(proofBytesBuf, uint64(BranchFactor16+1)) - - err = codec.decodeDBNode(proofBytesBuf.Bytes(), &parsedDBNode, BranchFactor16) - require.ErrorIs(err, errTooManyChildren) } // Ensure that encodeHashValues is deterministic @@ -219,18 +192,18 @@ func FuzzEncodeHashValues(f *testing.F) { randSeed int, ) { require := require.New(t) - for _, branchFactor := range branchFactors { // Create a random *hashValues + for _, bf := range validBranchFactors { // Create a random node r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 - children := map[byte]child{} - numChildren := r.Intn(int(branchFactor)) // #nosec G404 + children := map[byte]*child{} + numChildren := r.Intn(int(bf)) // #nosec G404 for i := 0; i < numChildren; i++ { compressedKeyLen := r.Intn(32) // #nosec G404 compressedKeyBytes := make([]byte, compressedKeyLen) _, _ = r.Read(compressedKeyBytes) // #nosec G404 - children[byte(i)] = child{ - compressedKey: ToKey(compressedKeyBytes, branchFactor), + children[byte(i)] = &child{ + compressedKey: ToKey(compressedKeyBytes), id: ids.GenerateTestID(), hasValue: r.Intn(2) == 1, // #nosec G404 } @@ -247,13 +220,15 @@ func FuzzEncodeHashValues(f *testing.F) { key := make([]byte, r.Intn(32)) // #nosec G404 _, _ = r.Read(key) // #nosec G404 - hv := &hashValues{ - Children: children, - Value: value, - Key: ToKey(key, branchFactor), + hv := &node{ + key: ToKey(key), + dbNode: dbNode{ + children: children, + value: value, + }, } - // Serialize the *hashValues with both codecs + // Serialize hv with both codecs hvBytes1 := codec1.encodeHashValues(hv) hvBytes2 := codec2.encodeHashValues(hv) @@ -266,7 +241,28 @@ func FuzzEncodeHashValues(f *testing.F) { func TestCodecDecodeKeyLengthOverflowRegression(t *testing.T) { codec := codec.(*codecImpl) - bytes := bytes.NewReader(binary.AppendUvarint(nil, math.MaxInt)) - _, err := codec.decodeKey(bytes, BranchFactor16) + _, err := codec.decodeKey(binary.AppendUvarint(nil, math.MaxInt)) require.ErrorIs(t, err, io.ErrUnexpectedEOF) } + +func TestUintSize(t *testing.T) { + c := codec.(*codecImpl) + + // Test lower bound + expectedSize := c.uintSize(0) + actualSize := binary.PutUvarint(make([]byte, binary.MaxVarintLen64), 0) + require.Equal(t, expectedSize, actualSize) + + // Test upper bound + expectedSize = c.uintSize(math.MaxUint64) + actualSize = binary.PutUvarint(make([]byte, binary.MaxVarintLen64), math.MaxUint64) + require.Equal(t, expectedSize, actualSize) + + // Test powers of 2 + for power := 0; power < 64; power++ { + n := uint64(1) << uint(power) + expectedSize := c.uintSize(n) + actualSize := binary.PutUvarint(make([]byte, binary.MaxVarintLen64), n) + require.Equal(t, expectedSize, actualSize, power) + } +} diff --git a/x/merkledb/db.go b/x/merkledb/db.go index 87439010b1f0..021ebc12d7d8 100644 --- a/x/merkledb/db.go +++ b/x/merkledb/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -32,17 +32,17 @@ import ( ) const ( - // TODO: name better rebuildViewSizeFractionOfCacheSize = 50 minRebuildViewSizePerCommit = 1000 + clearBatchSize = units.MiB rebuildIntermediateDeletionWriteSize = units.MiB valueNodePrefixLen = 1 + cacheEntryOverHead = 8 ) var ( - rootKey []byte - _ MerkleDB = (*merkleDB)(nil) + _ MerkleDB = (*merkleDB)(nil) codec = newCodec() @@ -51,11 +51,11 @@ var ( intermediateNodePrefix = []byte{2} cleanShutdownKey = []byte(string(metadataPrefix) + "cleanShutdown") + rootDBKey = []byte(string(metadataPrefix) + "root") hadCleanShutdown = []byte{1} didNotHaveCleanShutdown = []byte{0} - errSameRoot = errors.New("start and end root are the same") - errNoNewRoot = errors.New("there was no updated root in change list") + errSameRoot = errors.New("start and end root are the same") ) type ChangeProofer interface { @@ -64,6 +64,11 @@ type ChangeProofer interface { // Returns at most [maxLength] key/value pairs. // Returns [ErrInsufficientHistory] if this node has insufficient history // to generate the proof. + // Returns ErrEmptyProof if [endRootID] is ids.Empty. + // Note that [endRootID] == ids.Empty means the trie is empty + // (i.e. we don't need a change proof.) + // Returns [ErrNoEndRoot], which wraps [ErrInsufficientHistory], if the + // history doesn't contain the [endRootID]. GetChangeProof( ctx context.Context, startRootID ids.ID, @@ -73,7 +78,7 @@ type ChangeProofer interface { maxLength int, ) (*ChangeProof, error) - // Returns nil iff all of the following hold: + // Returns nil iff all the following hold: // - [start] <= [end]. // - [proof] is non-empty. // - All keys in [proof.KeyValues] and [proof.DeletedKeys] are in [start, end]. @@ -100,6 +105,9 @@ type RangeProofer interface { // [start, end] when the root of the trie was [rootID]. // If [start] is Nothing, there's no lower bound on the range. // If [end] is Nothing, there's no upper bound on the range. + // Returns ErrEmptyProof if [rootID] is ids.Empty. + // Note that [rootID] == ids.Empty means the trie is empty + // (i.e. we don't need a range proof.) GetRangeProofAtRoot( ctx context.Context, rootID ids.ID, @@ -114,6 +122,12 @@ type RangeProofer interface { CommitRangeProof(ctx context.Context, start, end maybe.Maybe[[]byte], proof *RangeProof) error } +type Clearer interface { + // Deletes all key/value pairs from the database + // and clears the change history. + Clear() error +} + type Prefetcher interface { // PrefetchPath attempts to load all trie nodes on the path of [key] // into the cache. @@ -129,6 +143,7 @@ type Prefetcher interface { type MerkleDB interface { database.Database + Clearer Trie MerkleRootGetter ProofGetter @@ -146,16 +161,19 @@ type Config struct { // // If 0 is specified, [runtime.NumCPU] will be used. RootGenConcurrency uint - // The number of bytes to write to disk when intermediate nodes are evicted - // from their cache and written to disk. - EvictionBatchSize uint + // The number of changes to the database that we store in memory in order to // serve change proofs. HistoryLength uint - // The number of bytes to cache nodes with values. + // The number of bytes used to cache nodes with values. ValueNodeCacheSize uint - // The number of bytes to cache nodes without values. + // The number of bytes used to cache nodes without values. IntermediateNodeCacheSize uint + // The number of bytes used to store nodes without values in memory before forcing them onto disk. + IntermediateWriteBufferSize uint + // The number of bytes to write to disk when intermediate nodes are evicted + // from the write buffer and written to disk. + IntermediateWriteBatchSize uint // If [Reg] is nil, metrics are collected locally but not exported through // Prometheus. // This may be useful for testing. @@ -164,7 +182,7 @@ type Config struct { Tracer trace.Tracer } -// merkleDB can only be edited by committing changes from a trieView. +// merkleDB can only be edited by committing changes from a view. type merkleDB struct { // Must be held when reading/writing fields. lock sync.RWMutex @@ -175,7 +193,7 @@ type merkleDB struct { // Should be held before taking [db.lock] commitLock sync.RWMutex - // Contains all of the key-value pairs stored by this database, + // Contains all the key-value pairs stored by this database, // including metadata, intermediate nodes and value nodes. baseDB database.Database @@ -195,17 +213,19 @@ type merkleDB struct { infoTracer trace.Tracer // The root of this trie. - root *node + // Nothing if the trie is empty. + root maybe.Maybe[*node] + + rootID ids.ID // Valid children of this trie. - childViews []*trieView + childViews []*view // calculateNodeIDsSema controls the number of goroutines inside // [calculateNodeIDsHelper] at any given time. calculateNodeIDsSema *semaphore.Weighted - toKey func(p []byte) Key - rootKey Key + tokenSize int } // New returns a new merkle database. @@ -223,17 +243,13 @@ func newDatabase( config Config, metrics merkleMetrics, ) (*merkleDB, error) { - rootGenConcurrency := uint(runtime.NumCPU()) - if config.RootGenConcurrency != 0 { - rootGenConcurrency = config.RootGenConcurrency - } - if err := config.BranchFactor.Valid(); err != nil { return nil, err } - toKey := func(b []byte) Key { - return ToKey(b, config.BranchFactor) + rootGenConcurrency := uint(runtime.NumCPU()) + if config.RootGenConcurrency != 0 { + rootGenConcurrency = config.RootGenConcurrency } // Share a sync.Pool of []byte between the intermediateNodeDB and valueNodeDB @@ -243,28 +259,40 @@ func newDatabase( return make([]byte, 0, defaultBufferLength) }, } + trieDB := &merkleDB{ - metrics: metrics, - baseDB: db, - valueNodeDB: newValueNodeDB(db, bufferPool, metrics, int(config.ValueNodeCacheSize), config.BranchFactor), - intermediateNodeDB: newIntermediateNodeDB(db, bufferPool, metrics, int(config.IntermediateNodeCacheSize), int(config.EvictionBatchSize)), - history: newTrieHistory(int(config.HistoryLength), toKey), + metrics: metrics, + baseDB: db, + intermediateNodeDB: newIntermediateNodeDB( + db, + bufferPool, + metrics, + int(config.IntermediateNodeCacheSize), + int(config.IntermediateWriteBufferSize), + int(config.IntermediateWriteBatchSize), + BranchFactorToTokenSize[config.BranchFactor]), + valueNodeDB: newValueNodeDB(db, + bufferPool, + metrics, + int(config.ValueNodeCacheSize)), + history: newTrieHistory(int(config.HistoryLength)), debugTracer: getTracerIfEnabled(config.TraceLevel, DebugTrace, config.Tracer), infoTracer: getTracerIfEnabled(config.TraceLevel, InfoTrace, config.Tracer), - childViews: make([]*trieView, 0, defaultPreallocationSize), + childViews: make([]*view, 0, defaultPreallocationSize), calculateNodeIDsSema: semaphore.NewWeighted(int64(rootGenConcurrency)), - toKey: toKey, - rootKey: toKey(rootKey), + tokenSize: BranchFactorToTokenSize[config.BranchFactor], } - root, err := trieDB.initializeRootIfNeeded() - if err != nil { + if err := trieDB.initializeRoot(); err != nil { return nil, err } // add current root to history (has no changes) trieDB.history.record(&changeSummary{ - rootID: root, + rootID: trieDB.rootID, + rootChange: change[maybe.Maybe[*node]]{ + after: trieDB.root, + }, values: map[Key]*change[maybe.Maybe[[]byte]]{}, nodes: map[Key]*change[*node]{}, }) @@ -292,7 +320,8 @@ func newDatabase( // Deletes every intermediate node and rebuilds them by re-adding every key/value. // TODO: make this more efficient by only clearing out the stale portions of the trie. func (db *merkleDB) rebuild(ctx context.Context, cacheSize int) error { - db.root = newNode(nil, db.rootKey) + db.root = maybe.Nothing[*node]() + db.rootID = ids.Empty // Delete intermediate nodes. if err := database.ClearPrefix(db.baseDB, intermediateNodePrefix, rebuildIntermediateDeletionWriteSize); err != nil { @@ -306,10 +335,11 @@ func (db *merkleDB) rebuild(ctx context.Context, cacheSize int) error { ) currentOps := make([]database.BatchOp, 0, opsSizeLimit) valueIt := db.NewIterator() - defer valueIt.Release() + // ensure valueIt is captured and release gets called on the latest copy of valueIt + defer func() { valueIt.Release() }() for valueIt.Next() { if len(currentOps) >= opsSizeLimit { - view, err := newTrieView(db, db, ViewChanges{BatchOps: currentOps, ConsumeBytes: true}) + view, err := newView(db, db, ViewChanges{BatchOps: currentOps, ConsumeBytes: true}) if err != nil { return err } @@ -332,7 +362,7 @@ func (db *merkleDB) rebuild(ctx context.Context, cacheSize int) error { if err := valueIt.Error(); err != nil { return err } - view, err := newTrieView(db, db, ViewChanges{BatchOps: currentOps, ConsumeBytes: true}) + view, err := newView(db, db, ViewChanges{BatchOps: currentOps, ConsumeBytes: true}) if err != nil { return err } @@ -358,7 +388,7 @@ func (db *merkleDB) CommitChangeProof(ctx context.Context, proof *ChangeProof) e } } - view, err := newTrieView(db, db, ViewChanges{BatchOps: ops}) + view, err := newView(db, db, ViewChanges{BatchOps: ops}) if err != nil { return err } @@ -399,7 +429,7 @@ func (db *merkleDB) CommitRangeProof(ctx context.Context, start, end maybe.Maybe } // Don't need to lock [view] because nobody else has a reference to it. - view, err := newTrieView(db, db, ViewChanges{BatchOps: ops}) + view, err := newView(db, db, ViewChanges{BatchOps: ops}) if err != nil { return err } @@ -425,6 +455,9 @@ func (db *merkleDB) Close() error { return database.ErrClosed } + // mark all children as no longer valid because the db has closed + db.invalidateChildrenExcept(nil) + db.closed = true db.valueNodeDB.Close() // Flush intermediary nodes to disk. @@ -444,13 +477,8 @@ func (db *merkleDB) PrefetchPaths(keys [][]byte) error { return database.ErrClosed } - // reuse the view so that it can keep repeated nodes in memory - tempView, err := newTrieView(db, db, ViewChanges{}) - if err != nil { - return err - } for _, key := range keys { - if err := db.prefetchPath(tempView, key); err != nil { + if err := db.prefetchPath(key); err != nil { return err } } @@ -465,21 +493,16 @@ func (db *merkleDB) PrefetchPath(key []byte) error { if db.closed { return database.ErrClosed } - tempView, err := newTrieView(db, db, ViewChanges{}) - if err != nil { - return err - } - - return db.prefetchPath(tempView, key) + return db.prefetchPath(key) } -func (db *merkleDB) prefetchPath(view *trieView, keyBytes []byte) error { - return view.visitPathToKey(db.toKey(keyBytes), func(n *node) error { - if !n.hasValue() { - return db.intermediateNodeDB.nodeCache.Put(n.key, n) +func (db *merkleDB) prefetchPath(keyBytes []byte) error { + return visitPathToKey(db, ToKey(keyBytes), func(n *node) error { + if n.hasValue() { + db.valueNodeDB.nodeCache.Put(n.key, n) + } else { + db.intermediateNodeDB.nodeCache.Put(n.key, n) } - - db.valueNodeDB.nodeCache.Put(n.key, n) return nil }) } @@ -501,11 +524,11 @@ func (db *merkleDB) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []e defer db.lock.RUnlock() values := make([][]byte, len(keys)) - errors := make([]error, len(keys)) + getErrors := make([]error, len(keys)) for i, key := range keys { - values[i], errors[i] = db.getValueCopy(db.toKey(key)) + values[i], getErrors[i] = db.getValueCopy(ToKey(key)) } - return values, errors + return values, getErrors } // GetValue returns the value associated with [key]. @@ -517,7 +540,7 @@ func (db *merkleDB) GetValue(ctx context.Context, key []byte) ([]byte, error) { db.lock.RLock() defer db.lock.RUnlock() - return db.getValueCopy(db.toKey(key)) + return db.getValueCopy(ToKey(key)) } // getValueCopy returns a copy of the value for the given [key]. @@ -573,31 +596,23 @@ func (db *merkleDB) GetMerkleRoot(ctx context.Context) (ids.ID, error) { return db.getMerkleRoot(), nil } -// Assumes [db.lock] is read locked. +// Assumes [db.lock] or [db.commitLock] is read locked. func (db *merkleDB) getMerkleRoot() ids.ID { - return db.root.id + return db.rootID } func (db *merkleDB) GetProof(ctx context.Context, key []byte) (*Proof, error) { db.commitLock.RLock() defer db.commitLock.RUnlock() - return db.getProof(ctx, key) -} + _, span := db.infoTracer.Start(ctx, "MerkleDB.GetProof") + defer span.End() -// Assumes [db.commitLock] is read locked. -// Assumes [db.lock] is not held -func (db *merkleDB) getProof(ctx context.Context, key []byte) (*Proof, error) { if db.closed { return nil, database.ErrClosed } - view, err := newTrieView(db, db, ViewChanges{}) - if err != nil { - return nil, err - } - // Don't need to lock [view] because nobody else has a reference to it. - return view.getProof(ctx, key) + return getProof(db, key) } func (db *merkleDB) GetRangeProof( @@ -609,7 +624,14 @@ func (db *merkleDB) GetRangeProof( db.commitLock.RLock() defer db.commitLock.RUnlock() - return db.getRangeProofAtRoot(ctx, db.getMerkleRoot(), start, end, maxLength) + _, span := db.infoTracer.Start(ctx, "MerkleDB.GetRangeProof") + defer span.End() + + if db.closed { + return nil, database.ErrClosed + } + + return getRangeProof(db, start, end, maxLength) } func (db *merkleDB) GetRangeProofAtRoot( @@ -622,30 +644,23 @@ func (db *merkleDB) GetRangeProofAtRoot( db.commitLock.RLock() defer db.commitLock.RUnlock() - return db.getRangeProofAtRoot(ctx, rootID, start, end, maxLength) -} + _, span := db.infoTracer.Start(ctx, "MerkleDB.GetRangeProofAtRoot") + defer span.End() -// Assumes [db.commitLock] is read locked. -// Assumes [db.lock] is not held -func (db *merkleDB) getRangeProofAtRoot( - ctx context.Context, - rootID ids.ID, - start maybe.Maybe[[]byte], - end maybe.Maybe[[]byte], - maxLength int, -) (*RangeProof, error) { - if db.closed { + switch { + case db.closed: return nil, database.ErrClosed - } - if maxLength <= 0 { + case maxLength <= 0: return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) + case rootID == ids.Empty: + return nil, ErrEmptyProof } - historicalView, err := db.getHistoricalViewForRange(rootID, start, end) + historicalTrie, err := db.getTrieAtRootForRange(rootID, start, end) if err != nil { return nil, err } - return historicalView.GetRangeProof(ctx, start, end, maxLength) + return getRangeProof(historicalTrie, start, end, maxLength) } func (db *merkleDB) GetChangeProof( @@ -656,11 +671,16 @@ func (db *merkleDB) GetChangeProof( end maybe.Maybe[[]byte], maxLength int, ) (*ChangeProof, error) { - if start.HasValue() && end.HasValue() && bytes.Compare(start.Value(), end.Value()) == 1 { + _, span := db.infoTracer.Start(ctx, "MerkleDB.GetChangeProof") + defer span.End() + + switch { + case start.HasValue() && end.HasValue() && bytes.Compare(start.Value(), end.Value()) == 1: return nil, ErrStartAfterEnd - } - if startRootID == endRootID { + case startRootID == endRootID: return nil, errSameRoot + case endRootID == ids.Empty: + return nil, ErrEmptyProof } db.commitLock.RLock() @@ -702,13 +722,13 @@ func (db *merkleDB) GetChangeProof( // Since we hold [db.commitlock] we must still have sufficient // history to recreate the trie at [endRootID]. - historicalView, err := db.getHistoricalViewForRange(endRootID, start, largestKey) + historicalTrie, err := db.getTrieAtRootForRange(endRootID, start, largestKey) if err != nil { return nil, err } if largestKey.HasValue() { - endProof, err := historicalView.getProof(ctx, largestKey.Value()) + endProof, err := getProof(historicalTrie, largestKey.Value()) if err != nil { return nil, err } @@ -716,7 +736,7 @@ func (db *merkleDB) GetChangeProof( } if start.HasValue() { - startProof, err := historicalView.getProof(ctx, start.Value()) + startProof, err := getProof(historicalTrie, start.Value()) if err != nil { return nil, err } @@ -753,7 +773,7 @@ func (db *merkleDB) GetChangeProof( func (db *merkleDB) NewView( _ context.Context, changes ViewChanges, -) (TrieView, error) { +) (View, error) { // ensure the db doesn't change while creating the new view db.commitLock.RLock() defer db.commitLock.RUnlock() @@ -762,7 +782,7 @@ func (db *merkleDB) NewView( return nil, database.ErrClosed } - newView, err := newTrieView(db, db, changes) + newView, err := newView(db, db, changes) if err != nil { return nil, err } @@ -783,8 +803,8 @@ func (db *merkleDB) Has(k []byte) (bool, error) { return false, database.ErrClosed } - _, err := db.getValueWithoutLock(db.toKey(k)) - if err == database.ErrNotFound { + _, err := db.getValueWithoutLock(ToKey(k)) + if errors.Is(err, database.ErrNotFound) { return false, nil } return err == nil, err @@ -835,7 +855,7 @@ func (db *merkleDB) PutContext(ctx context.Context, k, v []byte) error { return database.ErrClosed } - view, err := newTrieView(db, db, ViewChanges{BatchOps: []database.BatchOp{{Key: k, Value: v}}}) + view, err := newView(db, db, ViewChanges{BatchOps: []database.BatchOp{{Key: k, Value: v}}}) if err != nil { return err } @@ -854,7 +874,7 @@ func (db *merkleDB) DeleteContext(ctx context.Context, key []byte) error { return database.ErrClosed } - view, err := newTrieView(db, db, + view, err := newView(db, db, ViewChanges{ BatchOps: []database.BatchOp{{ Key: key, @@ -868,7 +888,7 @@ func (db *merkleDB) DeleteContext(ctx context.Context, key []byte) error { return view.commitToDB(ctx) } -// Assumes values inside of [ops] are safe to reference after the function +// Assumes values inside [ops] are safe to reference after the function // returns. Assumes [db.lock] isn't held. func (db *merkleDB) commitBatch(ops []database.BatchOp) error { db.commitLock.Lock() @@ -878,7 +898,7 @@ func (db *merkleDB) commitBatch(ops []database.BatchOp) error { return database.ErrClosed } - view, err := newTrieView(db, db, ViewChanges{BatchOps: ops, ConsumeBytes: true}) + view, err := newView(db, db, ViewChanges{BatchOps: ops, ConsumeBytes: true}) if err != nil { return err } @@ -887,7 +907,8 @@ func (db *merkleDB) commitBatch(ops []database.BatchOp) error { // commitChanges commits the changes in [trieToCommit] to [db]. // Assumes [trieToCommit]'s node IDs have been calculated. -func (db *merkleDB) commitChanges(ctx context.Context, trieToCommit *trieView) error { +// Assumes [db.commitLock] is held. +func (db *merkleDB) commitChanges(ctx context.Context, trieToCommit *view) error { db.lock.Lock() defer db.lock.Unlock() @@ -921,13 +942,7 @@ func (db *merkleDB) commitChanges(ctx context.Context, trieToCommit *trieView) e return nil } - rootChange, ok := changes.nodes[db.rootKey] - if !ok { - return errNoNewRoot - } - currentValueNodeBatch := db.valueNodeDB.NewBatch() - _, nodesSpan := db.infoTracer.Start(ctx, "MerkleDB.commitChanges.writeNodes") for key, nodeChange := range changes.nodes { shouldAddIntermediate := nodeChange.after != nil && !nodeChange.after.hasValue() @@ -963,16 +978,23 @@ func (db *merkleDB) commitChanges(ctx context.Context, trieToCommit *trieView) e return err } - // Only modify in-memory state after the commit succeeds - // so that we don't need to clean up on error. - db.root = rootChange.after db.history.record(changes) - return nil + + // Update root in database. + db.root = changes.rootChange.after + db.rootID = changes.rootID + + if db.root.IsNothing() { + return db.baseDB.Delete(rootDBKey) + } + + rootKey := codec.encodeKey(db.root.Value().key) + return db.baseDB.Put(rootDBKey, rootKey) } // moveChildViewsToDB removes any child views from the trieToCommit and moves them to the db // assumes [db.lock] is held -func (db *merkleDB) moveChildViewsToDB(trieToCommit *trieView) { +func (db *merkleDB) moveChildViewsToDB(trieToCommit *view) { trieToCommit.validityTrackingLock.Lock() defer trieToCommit.validityTrackingLock.Unlock() @@ -980,11 +1002,11 @@ func (db *merkleDB) moveChildViewsToDB(trieToCommit *trieView) { childView.updateParent(db) db.childViews = append(db.childViews, childView) } - trieToCommit.childViews = make([]*trieView, 0, defaultPreallocationSize) + trieToCommit.childViews = make([]*view, 0, defaultPreallocationSize) } // CommitToDB is a no-op for db since it is already in sync with itself. -// This exists to satisfy the TrieView interface. +// This exists to satisfy the View interface. func (*merkleDB) CommitToDB(context.Context) error { return nil } @@ -1003,7 +1025,7 @@ func (db *merkleDB) VerifyChangeProof( case start.HasValue() && end.HasValue() && bytes.Compare(start.Value(), end.Value()) > 0: return ErrStartAfterEnd case proof.Empty(): - return ErrNoMerkleProof + return ErrEmptyProof case end.HasValue() && len(proof.KeyChanges) == 0 && len(proof.EndProof) == 0: // We requested an end proof but didn't get one. return ErrNoEndProof @@ -1020,7 +1042,7 @@ func (db *merkleDB) VerifyChangeProof( return err } - smallestKey := maybe.Bind(start, db.toKey) + smallestKey := maybe.Bind(start, ToKey) // Make sure the start proof, if given, is well-formed. if err := verifyProofPath(proof.StartProof, smallestKey); err != nil { @@ -1030,12 +1052,12 @@ func (db *merkleDB) VerifyChangeProof( // Find the greatest key in [proof.KeyChanges] // Note that [proof.EndProof] is a proof for this key. // [largestKey] is also used when we add children of proof nodes to [trie] below. - largestKey := maybe.Bind(end, db.toKey) + largestKey := maybe.Bind(end, ToKey) if len(proof.KeyChanges) > 0 { // If [proof] has key-value pairs, we should insert children // greater than [end] to ancestors of the node containing [end] // so that we get the expected root ID. - largestKey = maybe.Some(db.toKey(proof.KeyChanges[len(proof.KeyChanges)-1].Key)) + largestKey = maybe.Some(ToKey(proof.KeyChanges[len(proof.KeyChanges)-1].Key)) } // Make sure the end proof, if given, is well-formed. @@ -1045,7 +1067,7 @@ func (db *merkleDB) VerifyChangeProof( keyValues := make(map[Key]maybe.Maybe[[]byte], len(proof.KeyChanges)) for _, keyValue := range proof.KeyChanges { - keyValues[db.toKey(keyValue.Key)] = keyValue.Value + keyValues[ToKey(keyValue.Key)] = keyValue.Value } // want to prevent commit writes to DB, but not prevent DB reads @@ -1089,7 +1111,7 @@ func (db *merkleDB) VerifyChangeProof( } // Don't need to lock [view] because nobody else has a reference to it. - view, err := newTrieView(db, db, ViewChanges{BatchOps: ops, ConsumeBytes: true}) + view, err := newView(db, db, ViewChanges{BatchOps: ops, ConsumeBytes: true}) if err != nil { return err } @@ -1129,7 +1151,7 @@ func (db *merkleDB) VerifyChangeProof( // Invalidates and removes any child views that aren't [exception]. // Assumes [db.lock] is held. -func (db *merkleDB) invalidateChildrenExcept(exception *trieView) { +func (db *merkleDB) invalidateChildrenExcept(exception *view) { isTrackedView := false for _, childView := range db.childViews { @@ -1139,65 +1161,69 @@ func (db *merkleDB) invalidateChildrenExcept(exception *trieView) { isTrackedView = true } } - db.childViews = make([]*trieView, 0, defaultPreallocationSize) + db.childViews = make([]*view, 0, defaultPreallocationSize) if isTrackedView { db.childViews = append(db.childViews, exception) } } -func (db *merkleDB) initializeRootIfNeeded() (ids.ID, error) { - // not sure if the root exists or had a value or not - // check under both prefixes - var err error - db.root, err = db.intermediateNodeDB.Get(db.rootKey) - if err == database.ErrNotFound { - db.root, err = db.valueNodeDB.Get(db.rootKey) - } - if err == nil { - // Root already exists, so calculate its id - db.root.calculateID(db.metrics) - return db.root.id, nil - } - if err != database.ErrNotFound { - return ids.Empty, err +// If the root is on disk, set [db.root] to it. +// Otherwise leave [db.root] as Nothing. +func (db *merkleDB) initializeRoot() error { + rootKeyBytes, err := db.baseDB.Get(rootDBKey) + if err != nil { + if !errors.Is(err, database.ErrNotFound) { + return err + } + // Root isn't on disk. + return nil } - // Root doesn't exist; make a new one. - db.root = newNode(nil, db.rootKey) + // Root is on disk. + rootKey, err := codec.decodeKey(rootKeyBytes) + if err != nil { + return err + } - // update its ID - db.root.calculateID(db.metrics) + // First, see if root is an intermediate node. + var root *node + root, err = db.getEditableNode(rootKey, false /* hasValue */) + if err != nil { + if !errors.Is(err, database.ErrNotFound) { + return err + } - if err := db.intermediateNodeDB.Put(db.rootKey, db.root); err != nil { - return ids.Empty, err + // The root must be a value node. + root, err = db.getEditableNode(rootKey, true /* hasValue */) + if err != nil { + return err + } } - return db.root.id, nil + db.rootID = root.calculateID(db.metrics) + db.root = maybe.Some(root) + return nil } // Returns a view of the trie as it was when it had root [rootID] for keys within range [start, end]. // If [start] is Nothing, there's no lower bound on the range. // If [end] is Nothing, there's no upper bound on the range. // Assumes [db.commitLock] is read locked. -// Assumes [db.lock] isn't held. -func (db *merkleDB) getHistoricalViewForRange( +func (db *merkleDB) getTrieAtRootForRange( rootID ids.ID, start maybe.Maybe[[]byte], end maybe.Maybe[[]byte], -) (*trieView, error) { - currentRootID := db.getMerkleRoot() - +) (Trie, error) { // looking for the trie's current root id, so return the trie unmodified - if currentRootID == rootID { - // create an empty trie - return newTrieView(db, db, ViewChanges{}) + if rootID == db.getMerkleRoot() { + return db, nil } changeHistory, err := db.history.getChangesToGetToRoot(rootID, start, end) if err != nil { return nil, err } - return newHistoricalTrieView(db, changeHistory) + return newViewWithChanges(db, changeHistory) } // Returns all keys in range [start, end] that aren't in [keySet]. @@ -1248,12 +1274,51 @@ func (db *merkleDB) getNode(key Key, hasValue bool) (*node, error) { switch { case db.closed: return nil, database.ErrClosed - case key == db.rootKey: - return db.root, nil + case db.root.HasValue() && key == db.root.Value().key: + return db.root.Value(), nil case hasValue: return db.valueNodeDB.Get(key) + default: + return db.intermediateNodeDB.Get(key) } - return db.intermediateNodeDB.Get(key) +} + +// Assumes [db.lock] or [db.commitLock] is read locked. +func (db *merkleDB) getRoot() maybe.Maybe[*node] { + return db.root +} + +func (db *merkleDB) Clear() error { + db.commitLock.Lock() + defer db.commitLock.Unlock() + + db.lock.Lock() + defer db.lock.Unlock() + + // Clear nodes from disk and caches + if err := db.valueNodeDB.Clear(); err != nil { + return err + } + if err := db.intermediateNodeDB.Clear(); err != nil { + return err + } + + // Clear root + db.root = maybe.Nothing[*node]() + db.rootID = ids.Empty + + // Clear history + db.history = newTrieHistory(db.history.maxHistoryLen) + db.history.record(&changeSummary{ + rootID: db.rootID, + values: map[Key]*change[maybe.Maybe[[]byte]]{}, + nodes: map[Key]*change[*node]{}, + }) + return nil +} + +func (db *merkleDB) getTokenSize() int { + return db.tokenSize } // Returns [key] prefixed by [prefix]. @@ -1284,11 +1349,10 @@ func getBufferFromPool(bufferPool *sync.Pool, size int) []byte { return buffer } -// cacheEntrySize returns a rough approximation of the memory consumed by storing the key and node +// cacheEntrySize returns a rough approximation of the memory consumed by storing the key and node. func cacheEntrySize(key Key, n *node) int { if n == nil { - return len(key.Bytes()) + return cacheEntryOverHead + len(key.Bytes()) } - // nodes cache their bytes representation so the total memory consumed is roughly twice that - return len(key.Bytes()) + 2*len(n.bytes()) + return cacheEntryOverHead + len(key.Bytes()) + codec.encodedDBNodeSize(&n.dbNode) } diff --git a/x/merkledb/db_test.go b/x/merkledb/db_test.go index d4f09803cdaf..48703556b72a 100644 --- a/x/merkledb/db_test.go +++ b/x/merkledb/db_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -13,6 +13,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" "golang.org/x/exp/maps" @@ -41,13 +42,14 @@ func newDB(ctx context.Context, db database.Database, config Config) (*merkleDB, func newDefaultConfig() Config { return Config{ - EvictionBatchSize: 10, - HistoryLength: defaultHistoryLength, - ValueNodeCacheSize: units.MiB, - IntermediateNodeCacheSize: units.MiB, - Reg: prometheus.NewRegistry(), - Tracer: trace.Noop, - BranchFactor: BranchFactor16, + IntermediateWriteBatchSize: 10, + HistoryLength: defaultHistoryLength, + ValueNodeCacheSize: units.MiB, + IntermediateNodeCacheSize: units.MiB, + IntermediateWriteBufferSize: units.KiB, + Reg: prometheus.NewRegistry(), + Tracer: trace.Noop, + BranchFactor: BranchFactor16, } } @@ -63,7 +65,7 @@ func Test_MerkleDB_Get_Safety(t *testing.T) { val, err := db.Get(keyBytes) require.NoError(err) - n, err := db.getNode(ToKey(keyBytes, BranchFactor16), true) + n, err := db.getNode(ToKey(keyBytes), true) require.NoError(err) // node's value shouldn't be affected by the edit @@ -96,11 +98,13 @@ func Test_MerkleDB_GetValues_Safety(t *testing.T) { } func Test_MerkleDB_DB_Interface(t *testing.T) { - for _, bf := range branchFactors { - for _, test := range database.Tests { - db, err := getBasicDBWithBranchFactor(bf) - require.NoError(t, err) - test(t, db) + for _, bf := range validBranchFactors { + for name, test := range database.Tests { + t.Run(fmt.Sprintf("%s_%d", name, bf), func(t *testing.T) { + db, err := getBasicDBWithBranchFactor(bf) + require.NoError(t, err) + test(t, db) + }) } } } @@ -108,11 +112,13 @@ func Test_MerkleDB_DB_Interface(t *testing.T) { func Benchmark_MerkleDB_DBInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bf := range branchFactors { - for _, bench := range database.Benchmarks { - db, err := getBasicDBWithBranchFactor(bf) - require.NoError(b, err) - bench(b, db, fmt.Sprintf("merkledb_%d", bf), keys, values) + for _, bf := range validBranchFactors { + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("merkledb_%d_%d_pairs_%d_keys_%d_values_%s", bf, size[0], size[1], size[2], name), func(b *testing.B) { + db, err := getBasicDBWithBranchFactor(bf) + require.NoError(b, err) + bench(b, db, keys, values) + }) } } } @@ -150,7 +156,7 @@ func Test_MerkleDB_DB_Load_Root_From_DB(t *testing.T) { require.NoError(db.Close()) - // reloading the db, should set the root back to the one that was saved to [baseDB] + // reloading the db should set the root back to the one that was saved to [baseDB] db, err = New( context.Background(), baseDB, @@ -296,15 +302,15 @@ func Test_MerkleDB_Invalidate_Siblings_On_Commit(t *testing.T) { sibling2, err := dbTrie.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.False(sibling1.(*trieView).isInvalid()) - require.False(sibling2.(*trieView).isInvalid()) + require.False(sibling1.(*view).isInvalid()) + require.False(sibling2.(*view).isInvalid()) // Committing viewToCommit should invalidate siblings require.NoError(viewToCommit.CommitToDB(context.Background())) - require.True(sibling1.(*trieView).isInvalid()) - require.True(sibling2.(*trieView).isInvalid()) - require.False(viewToCommit.(*trieView).isInvalid()) + require.True(sibling1.(*view).isInvalid()) + require.True(sibling2.(*view).isInvalid()) + require.False(viewToCommit.(*view).isInvalid()) } func Test_MerkleDB_CommitRangeProof_DeletesValuesInRange(t *testing.T) { @@ -500,7 +506,7 @@ func TestDatabaseNewUntrackedView(t *testing.T) { require.NoError(err) // Create a new untracked view. - view, err := newTrieView( + view, err := newView( db, db, ViewChanges{ @@ -560,7 +566,7 @@ func TestDatabaseCommitChanges(t *testing.T) { // Committing an invalid view should fail. invalidView, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - invalidView.(*trieView).invalidate() + invalidView.(*view).invalidate() err = invalidView.CommitToDB(context.Background()) require.ErrorIs(err, ErrInvalid) @@ -581,22 +587,22 @@ func TestDatabaseCommitChanges(t *testing.T) { }, ) require.NoError(err) - require.IsType(&trieView{}, view1Intf) - view1 := view1Intf.(*trieView) + require.IsType(&view{}, view1Intf) + view1 := view1Intf.(*view) view1Root, err := view1.GetMerkleRoot(context.Background()) require.NoError(err) // Make a second view view2Intf, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.IsType(&trieView{}, view2Intf) - view2 := view2Intf.(*trieView) + require.IsType(&view{}, view2Intf) + view2 := view2Intf.(*view) // Make a view atop a view view3Intf, err := view1.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.IsType(&trieView{}, view3Intf) - view3 := view3Intf.(*trieView) + require.IsType(&view{}, view3Intf) + view3 := view3Intf.(*view) // view3 // | @@ -645,18 +651,18 @@ func TestDatabaseInvalidateChildrenExcept(t *testing.T) { // Create children view1Intf, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.IsType(&trieView{}, view1Intf) - view1 := view1Intf.(*trieView) + require.IsType(&view{}, view1Intf) + view1 := view1Intf.(*view) view2Intf, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.IsType(&trieView{}, view2Intf) - view2 := view2Intf.(*trieView) + require.IsType(&view{}, view2Intf) + view2 := view2Intf.(*view) view3Intf, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.IsType(&trieView{}, view3Intf) - view3 := view3Intf.(*trieView) + require.IsType(&view{}, view3Intf) + view3 := view3Intf.(*view) db.invalidateChildrenExcept(view1) @@ -773,6 +779,49 @@ func Test_MerkleDB_Random_Insert_Ordering(t *testing.T) { } } +func TestMerkleDBClear(t *testing.T) { + require := require.New(t) + + // Make a database and insert some key-value pairs. + db, err := getBasicDB() + require.NoError(err) + + emptyRootID := db.getMerkleRoot() + + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 + + insertRandomKeyValues( + require, + r, + []database.Database{db}, + 1_000, + 0.25, + ) + + // Clear the database. + require.NoError(db.Clear()) + + // Assert that the database is empty. + iter := db.NewIterator() + defer iter.Release() + require.False(iter.Next()) + require.Equal(ids.Empty, db.getMerkleRoot()) + require.True(db.root.IsNothing()) + + // Assert caches are empty. + require.Zero(db.valueNodeDB.nodeCache.Len()) + require.Zero(db.intermediateNodeDB.writeBuffer.currentSize) + + // Assert history has only the clearing change. + require.Len(db.history.lastChanges, 1) + change, ok := db.history.lastChanges[emptyRootID] + require.True(ok) + require.Empty(change.nodes) + require.Empty(change.values) +} + func FuzzMerkleDBEmptyRandomizedActions(f *testing.F) { f.Fuzz( func( @@ -785,7 +834,7 @@ func FuzzMerkleDBEmptyRandomizedActions(f *testing.F) { } require := require.New(t) r := rand.New(rand.NewSource(randSeed)) // #nosec G404 - for _, bf := range branchFactors { + for _, ts := range validTokenSizes { runRandDBTest( require, r, @@ -795,7 +844,7 @@ func FuzzMerkleDBEmptyRandomizedActions(f *testing.F) { size, 0.01, /*checkHashProbability*/ ), - bf, + ts, ) } }) @@ -813,7 +862,7 @@ func FuzzMerkleDBInitialValuesRandomizedActions(f *testing.F) { } require := require.New(t) r := rand.New(rand.NewSource(randSeed)) // #nosec G404 - for _, bf := range branchFactors { + for _, ts := range validTokenSizes { runRandDBTest( require, r, @@ -824,7 +873,7 @@ func FuzzMerkleDBInitialValuesRandomizedActions(f *testing.F) { numSteps, 0.001, /*checkHashProbability*/ ), - bf, + ts, ) } }) @@ -851,8 +900,8 @@ const ( opMax // boundary value, not an actual op ) -func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, bf BranchFactor) { - db, err := getBasicDBWithBranchFactor(bf) +func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, tokenSize int) { + db, err := getBasicDBWithBranchFactor(tokenSizeToBranchFactor[tokenSize]) require.NoError(err) const ( @@ -877,13 +926,13 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, bf Br case opUpdate: require.NoError(currentBatch.Put(step.key, step.value)) - uncommittedKeyValues[ToKey(step.key, bf)] = step.value - uncommittedDeletes.Remove(ToKey(step.key, bf)) + uncommittedKeyValues[ToKey(step.key)] = step.value + uncommittedDeletes.Remove(ToKey(step.key)) case opDelete: require.NoError(currentBatch.Delete(step.key)) - uncommittedDeletes.Add(ToKey(step.key, bf)) - delete(uncommittedKeyValues, ToKey(step.key, bf)) + uncommittedDeletes.Add(ToKey(step.key)) + delete(uncommittedKeyValues, ToKey(step.key)) case opGenerateRangeProof: root, err := db.GetMerkleRoot(context.Background()) require.NoError(err) @@ -902,6 +951,10 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, bf Br } rangeProof, err := db.GetRangeProofAtRoot(context.Background(), root, start, end, maxProofLen) + if root == ids.Empty { + require.ErrorIs(err, ErrEmptyProof) + continue + } require.NoError(err) require.LessOrEqual(len(rangeProof.KeyValues), maxProofLen) @@ -910,6 +963,7 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, bf Br start, end, root, + tokenSize, )) case opGenerateChangeProof: root, err := db.GetMerkleRoot(context.Background()) @@ -934,10 +988,14 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, bf Br require.ErrorIs(err, errSameRoot) continue } + if root == ids.Empty { + require.ErrorIs(err, ErrEmptyProof) + continue + } require.NoError(err) require.LessOrEqual(len(changeProof.KeyChanges), maxProofLen) - changeProofDB, err := getBasicDBWithBranchFactor(bf) + changeProofDB, err := getBasicDBWithBranchFactor(tokenSizeToBranchFactor[tokenSize]) require.NoError(err) require.NoError(changeProofDB.VerifyChangeProof( @@ -984,10 +1042,10 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, bf Br require.ErrorIs(err, database.ErrNotFound) } - want := values[ToKey(step.key, bf)] + want := values[ToKey(step.key)] require.True(bytes.Equal(want, v)) // Use bytes.Equal so nil treated equal to []byte{} - trieValue, err := getNodeValueWithBranchFactor(db, string(step.key), bf) + trieValue, err := getNodeValue(db, string(step.key)) if err != nil { require.ErrorIs(err, database.ErrNotFound) } @@ -995,7 +1053,7 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, bf Br require.True(bytes.Equal(want, trieValue)) // Use bytes.Equal so nil treated equal to []byte{} case opCheckhash: // Create a view with the same key-values as [db] - newDB, err := getBasicDBWithBranchFactor(bf) + newDB, err := getBasicDBWithBranchFactor(tokenSizeToBranchFactor[tokenSize]) require.NoError(err) ops := make([]database.BatchOp, 0, len(values)) @@ -1093,7 +1151,7 @@ func generateRandTestWithKeys( step.value = genEnd(step.key) case opCheckhash: // this gets really expensive so control how often it happens - if r.Float64() < checkHashProbability { + if r.Float64() > checkHashProbability { continue } } @@ -1195,3 +1253,40 @@ func insertRandomKeyValues( } } } + +func TestGetRangeProofAtRootEmptyRootID(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + _, err = db.GetRangeProofAtRoot( + context.Background(), + ids.Empty, + maybe.Nothing[[]byte](), + maybe.Nothing[[]byte](), + 10, + ) + require.ErrorIs(err, ErrEmptyProof) +} + +func TestGetChangeProofEmptyRootID(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + require.NoError(db.Put([]byte("key"), []byte("value"))) + + rootID := db.getMerkleRoot() + + _, err = db.GetChangeProof( + context.Background(), + rootID, + ids.Empty, + maybe.Nothing[[]byte](), + maybe.Nothing[[]byte](), + 10, + ) + require.ErrorIs(err, ErrEmptyProof) +} diff --git a/x/merkledb/helpers_test.go b/x/merkledb/helpers_test.go index 3cd84ce11e7c..acb620aba5f1 100644 --- a/x/merkledb/helpers_test.go +++ b/x/merkledb/helpers_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -52,13 +52,13 @@ func writeBasicBatch(t *testing.T, db *merkleDB) { func newRandomProofNode(r *rand.Rand) ProofNode { key := make([]byte, r.Intn(32)) // #nosec G404 _, _ = r.Read(key) // #nosec G404 - serializedKey := ToKey(key, BranchFactor16) + serializedKey := ToKey(key) val := make([]byte, r.Intn(64)) // #nosec G404 _, _ = r.Read(val) // #nosec G404 children := map[byte]ids.ID{} - for j := 0; j < int(BranchFactor16); j++ { + for j := 0; j < 16; j++ { if r.Float64() < 0.5 { var childID ids.ID _, _ = r.Read(childID[:]) // #nosec G404 diff --git a/x/merkledb/history.go b/x/merkledb/history.go index c82fbb1e5f78..22d87cd1cb48 100644 --- a/x/merkledb/history.go +++ b/x/merkledb/history.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -15,7 +15,10 @@ import ( "github.com/ava-labs/avalanchego/utils/set" ) -var ErrInsufficientHistory = errors.New("insufficient history to generate proof") +var ( + ErrInsufficientHistory = errors.New("insufficient history to generate proof") + ErrNoEndRoot = fmt.Errorf("%w: end root not found", ErrInsufficientHistory) +) // stores previous trie states type trieHistory struct { @@ -32,8 +35,6 @@ type trieHistory struct { // Each change is tagged with this monotonic increasing number. nextInsertNumber uint64 - - toKey func([]byte) Key } // Tracks the beginning and ending state of a value. @@ -51,26 +52,30 @@ type changeSummaryAndInsertNumber struct { insertNumber uint64 } -// Tracks all of the node and value changes that resulted in the rootID. +// Tracks all the node and value changes that resulted in the rootID. type changeSummary struct { + // The ID of the trie after these changes. rootID ids.ID - nodes map[Key]*change[*node] - values map[Key]*change[maybe.Maybe[[]byte]] + // The root before/after this change. + // Set in [calculateNodeIDs]. + rootChange change[maybe.Maybe[*node]] + nodes map[Key]*change[*node] + values map[Key]*change[maybe.Maybe[[]byte]] } func newChangeSummary(estimatedSize int) *changeSummary { return &changeSummary{ - nodes: make(map[Key]*change[*node], estimatedSize), - values: make(map[Key]*change[maybe.Maybe[[]byte]], estimatedSize), + nodes: make(map[Key]*change[*node], estimatedSize), + values: make(map[Key]*change[maybe.Maybe[[]byte]], estimatedSize), + rootChange: change[maybe.Maybe[*node]]{}, } } -func newTrieHistory(maxHistoryLookback int, toKey func([]byte) Key) *trieHistory { +func newTrieHistory(maxHistoryLookback int) *trieHistory { return &trieHistory{ maxHistoryLen: maxHistoryLookback, history: buffer.NewUnboundedDeque[*changeSummaryAndInsertNumber](maxHistoryLookback), lastChanges: make(map[ids.ID]*changeSummaryAndInsertNumber), - toKey: toKey, } } @@ -80,6 +85,8 @@ func newTrieHistory(maxHistoryLookback int, toKey func([]byte) Key) *trieHistory // If [end] is Nothing, there's no upper bound on the range. // Returns [ErrInsufficientHistory] if the history is insufficient // to generate the proof. +// Returns [ErrNoEndRoot], which wraps [ErrInsufficientHistory], if +// the [endRoot] isn't in the history. func (th *trieHistory) getValueChanges( startRoot ids.ID, endRoot ids.ID, @@ -96,13 +103,9 @@ func (th *trieHistory) getValueChanges( } // [endRootChanges] is the last change in the history resulting in [endRoot]. - // TODO when we update to minimum go version 1.20.X, make this return another - // wrapped error ErrNoEndRoot. In NetworkServer.HandleChangeProofRequest, if we return - // that error, we know we shouldn't try to generate a range proof since we - // lack the necessary history. endRootChanges, ok := th.lastChanges[endRoot] if !ok { - return nil, fmt.Errorf("%w: end root %s not found", ErrInsufficientHistory, endRoot) + return nil, fmt.Errorf("%w: %s", ErrNoEndRoot, endRoot) } // Confirm there's a change resulting in [startRoot] before @@ -158,8 +161,8 @@ func (th *trieHistory) getValueChanges( // in order to stay within the [maxLength] limit if necessary. changedKeys = set.Set[Key]{} - startKey = maybe.Bind(start, th.toKey) - endKey = maybe.Bind(end, th.toKey) + startKey = maybe.Bind(start, ToKey) + endKey = maybe.Bind(end, ToKey) // For each element in the history in the range between [startRoot]'s // last appearance (exclusive) and [endRoot]'s last appearance (inclusive), @@ -237,8 +240,8 @@ func (th *trieHistory) getChangesToGetToRoot(rootID ids.ID, start maybe.Maybe[[] } var ( - startKey = maybe.Bind(start, th.toKey) - endKey = maybe.Bind(end, th.toKey) + startKey = maybe.Bind(start, ToKey) + endKey = maybe.Bind(end, ToKey) combinedChanges = newChangeSummary(defaultPreallocationSize) mostRecentChangeInsertNumber = th.nextInsertNumber - 1 mostRecentChangeIndex = th.history.Len() - 1 @@ -252,6 +255,13 @@ func (th *trieHistory) getChangesToGetToRoot(rootID ids.ID, start maybe.Maybe[[] for i := mostRecentChangeIndex; i > lastRootChangeIndex; i-- { changes, _ := th.history.Index(i) + if i == mostRecentChangeIndex { + combinedChanges.rootChange.before = changes.rootChange.after + } + if i == lastRootChangeIndex+1 { + combinedChanges.rootChange.after = changes.rootChange.before + } + for key, changedNode := range changes.nodes { combinedChanges.nodes[key] = &change[*node]{ after: changedNode.before, diff --git a/x/merkledb/history_test.go b/x/merkledb/history_test.go index 1261c92b22df..09c84321f50c 100644 --- a/x/merkledb/history_test.go +++ b/x/merkledb/history_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -36,8 +36,8 @@ func Test_History_Simple(t *testing.T) { origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + origRootID := db.rootID + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Put([]byte("key"), []byte("value0"))) @@ -45,7 +45,7 @@ func Test_History_Simple(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Put([]byte("key1"), []byte("value1"))) @@ -54,7 +54,7 @@ func Test_History_Simple(t *testing.T) { newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Put([]byte("k"), []byte("v"))) @@ -62,7 +62,7 @@ func Test_History_Simple(t *testing.T) { newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Delete([]byte("k"))) @@ -78,7 +78,7 @@ func Test_History_Simple(t *testing.T) { newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_Large(t *testing.T) { @@ -141,7 +141,7 @@ func Test_History_Large(t *testing.T) { require.NoError(err) require.NotNil(proof) - require.NoError(proof.Verify(context.Background(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), roots[i])) + require.NoError(proof.Verify(context.Background(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), roots[i], BranchFactorToTokenSize[config.BranchFactor])) } } } @@ -164,13 +164,13 @@ func Test_History_Bad_GetValueChanges_Input(t *testing.T) { require.NoError(batch.Put([]byte("key"), []byte("value"))) require.NoError(batch.Write()) - toBeDeletedRoot := db.getMerkleRoot() + root1 := db.getMerkleRoot() batch = db.NewBatch() require.NoError(batch.Put([]byte("key"), []byte("value0"))) require.NoError(batch.Write()) - startRoot := db.getMerkleRoot() + root2 := db.getMerkleRoot() batch = db.NewBatch() require.NoError(batch.Put([]byte("key1"), []byte("value0"))) @@ -184,31 +184,30 @@ func Test_History_Bad_GetValueChanges_Input(t *testing.T) { require.NoError(batch.Put([]byte("key2"), []byte("value3"))) require.NoError(batch.Write()) - endRoot := db.getMerkleRoot() + root3 := db.getMerkleRoot() // ensure these start as valid calls - _, err = db.history.getValueChanges(toBeDeletedRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 1) + _, err = db.history.getValueChanges(root1, root3, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 1) require.NoError(err) - _, err = db.history.getValueChanges(startRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 1) + _, err = db.history.getValueChanges(root2, root3, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 1) require.NoError(err) - _, err = db.history.getValueChanges(startRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), -1) + _, err = db.history.getValueChanges(root2, root3, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), -1) require.ErrorIs(err, ErrInvalidMaxLength) - _, err = db.history.getValueChanges(endRoot, startRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 1) + _, err = db.history.getValueChanges(root3, root2, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 1) require.ErrorIs(err, ErrInsufficientHistory) - // trigger the first root to be deleted by exiting the lookback window + // Cause root1 to be removed from the history batch = db.NewBatch() require.NoError(batch.Put([]byte("key2"), []byte("value4"))) require.NoError(batch.Write()) - // now this root should no longer be present - _, err = db.history.getValueChanges(toBeDeletedRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 1) + _, err = db.history.getValueChanges(root1, root3, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 1) require.ErrorIs(err, ErrInsufficientHistory) // same start/end roots should yield an empty changelist - changes, err := db.history.getValueChanges(endRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 10) + changes, err := db.history.getValueChanges(root3, root3, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 10) require.NoError(err) require.Empty(changes.values) } @@ -240,6 +239,7 @@ func Test_History_Trigger_History_Queue_Looping(t *testing.T) { maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, + db.tokenSize, )) // write a new value into the db, now there should be 2 roots in the history @@ -256,6 +256,7 @@ func Test_History_Trigger_History_Queue_Looping(t *testing.T) { maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, + db.tokenSize, )) // trigger a new root to be added to the history, which should cause rollover since there can only be 2 @@ -312,10 +313,10 @@ func Test_History_Values_Lookup_Over_Queue_Break(t *testing.T) { // changes should still be collectable even though the history has had to loop due to hitting max size changes, err := db.history.getValueChanges(startRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 10) require.NoError(err) - require.Contains(changes.values, ToKey([]byte("key1"), BranchFactor16)) - require.Equal([]byte("value1"), changes.values[ToKey([]byte("key1"), BranchFactor16)].after.Value()) - require.Contains(changes.values, ToKey([]byte("key2"), BranchFactor16)) - require.Equal([]byte("value3"), changes.values[ToKey([]byte("key2"), BranchFactor16)].after.Value()) + require.Contains(changes.values, ToKey([]byte("key1"))) + require.Equal([]byte("value1"), changes.values[ToKey([]byte("key1"))].after.Value()) + require.Contains(changes.values, ToKey([]byte("key2"))) + require.Equal([]byte("value3"), changes.values[ToKey([]byte("key2"))].after.Value()) } func Test_History_RepeatedRoot(t *testing.T) { @@ -336,8 +337,8 @@ func Test_History_RepeatedRoot(t *testing.T) { origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + origRootID := db.rootID + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Put([]byte("key1"), []byte("other"))) @@ -347,7 +348,7 @@ func Test_History_RepeatedRoot(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) // revert state to be the same as in orig proof batch = db.NewBatch() @@ -359,7 +360,7 @@ func Test_History_RepeatedRoot(t *testing.T) { newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_ExcessDeletes(t *testing.T) { @@ -378,8 +379,8 @@ func Test_History_ExcessDeletes(t *testing.T) { origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + origRootID := db.rootID + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Delete([]byte("key1"))) @@ -391,7 +392,7 @@ func Test_History_ExcessDeletes(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_DontIncludeAllNodes(t *testing.T) { @@ -410,8 +411,8 @@ func Test_History_DontIncludeAllNodes(t *testing.T) { origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + origRootID := db.rootID + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Put([]byte("z"), []byte("z"))) @@ -419,7 +420,7 @@ func Test_History_DontIncludeAllNodes(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_Branching2Nodes(t *testing.T) { @@ -438,8 +439,8 @@ func Test_History_Branching2Nodes(t *testing.T) { origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + origRootID := db.rootID + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Put([]byte("k"), []byte("v"))) @@ -447,7 +448,7 @@ func Test_History_Branching2Nodes(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_Branching3Nodes(t *testing.T) { @@ -466,8 +467,8 @@ func Test_History_Branching3Nodes(t *testing.T) { origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + origRootID := db.rootID + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Put([]byte("key321"), []byte("value321"))) @@ -475,7 +476,7 @@ func Test_History_Branching3Nodes(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_MaxLength(t *testing.T) { @@ -572,9 +573,7 @@ func TestHistoryRecord(t *testing.T) { require := require.New(t) maxHistoryLen := 3 - th := newTrieHistory(maxHistoryLen, func(bytes []byte) Key { - return ToKey(bytes, BranchFactor16) - }) + th := newTrieHistory(maxHistoryLen) changes := []*changeSummary{} for i := 0; i < maxHistoryLen; i++ { // Fill the history @@ -647,22 +646,23 @@ func TestHistoryRecord(t *testing.T) { func TestHistoryGetChangesToRoot(t *testing.T) { maxHistoryLen := 3 - history := newTrieHistory(maxHistoryLen, func(bytes []byte) Key { - return ToKey(bytes, BranchFactor16) - }) + history := newTrieHistory(maxHistoryLen) changes := []*changeSummary{} for i := 0; i < maxHistoryLen; i++ { // Fill the history changes = append(changes, &changeSummary{ rootID: ids.GenerateTestID(), + rootChange: change[maybe.Maybe[*node]]{ + before: maybe.Some(&node{}), + }, nodes: map[Key]*change[*node]{ - history.toKey([]byte{byte(i)}): { - before: &node{id: ids.GenerateTestID()}, - after: &node{id: ids.GenerateTestID()}, + ToKey([]byte{byte(i)}): { + before: &node{}, + after: &node{}, }, }, values: map[Key]*change[maybe.Maybe[[]byte]]{ - history.toKey([]byte{byte(i)}): { + ToKey([]byte{byte(i)}): { before: maybe.Some([]byte{byte(i)}), after: maybe.Some([]byte{byte(i + 1)}), }, @@ -690,7 +690,8 @@ func TestHistoryGetChangesToRoot(t *testing.T) { name: "most recent change", rootID: changes[maxHistoryLen-1].rootID, validateFunc: func(require *require.Assertions, got *changeSummary) { - require.Equal(newChangeSummary(defaultPreallocationSize), got) + expected := newChangeSummary(defaultPreallocationSize) + require.Equal(expected, got) }, }, { @@ -701,7 +702,7 @@ func TestHistoryGetChangesToRoot(t *testing.T) { require.Len(got.nodes, 1) require.Len(got.values, 1) reversedChanges := changes[maxHistoryLen-1] - removedKey := history.toKey([]byte{byte(maxHistoryLen - 1)}) + removedKey := ToKey([]byte{byte(maxHistoryLen - 1)}) require.Equal(reversedChanges.nodes[removedKey].before, got.nodes[removedKey].after) require.Equal(reversedChanges.values[removedKey].before, got.values[removedKey].after) require.Equal(reversedChanges.values[removedKey].after, got.values[removedKey].before) @@ -714,12 +715,12 @@ func TestHistoryGetChangesToRoot(t *testing.T) { require.Len(got.nodes, 2) require.Len(got.values, 2) reversedChanges1 := changes[maxHistoryLen-1] - removedKey1 := history.toKey([]byte{byte(maxHistoryLen - 1)}) + removedKey1 := ToKey([]byte{byte(maxHistoryLen - 1)}) require.Equal(reversedChanges1.nodes[removedKey1].before, got.nodes[removedKey1].after) require.Equal(reversedChanges1.values[removedKey1].before, got.values[removedKey1].after) require.Equal(reversedChanges1.values[removedKey1].after, got.values[removedKey1].before) reversedChanges2 := changes[maxHistoryLen-2] - removedKey2 := history.toKey([]byte{byte(maxHistoryLen - 2)}) + removedKey2 := ToKey([]byte{byte(maxHistoryLen - 2)}) require.Equal(reversedChanges2.nodes[removedKey2].before, got.nodes[removedKey2].after) require.Equal(reversedChanges2.values[removedKey2].before, got.values[removedKey2].after) require.Equal(reversedChanges2.values[removedKey2].after, got.values[removedKey2].before) @@ -733,12 +734,12 @@ func TestHistoryGetChangesToRoot(t *testing.T) { require.Len(got.nodes, 2) require.Len(got.values, 1) reversedChanges1 := changes[maxHistoryLen-1] - removedKey1 := history.toKey([]byte{byte(maxHistoryLen - 1)}) + removedKey1 := ToKey([]byte{byte(maxHistoryLen - 1)}) require.Equal(reversedChanges1.nodes[removedKey1].before, got.nodes[removedKey1].after) require.Equal(reversedChanges1.values[removedKey1].before, got.values[removedKey1].after) require.Equal(reversedChanges1.values[removedKey1].after, got.values[removedKey1].before) reversedChanges2 := changes[maxHistoryLen-2] - removedKey2 := history.toKey([]byte{byte(maxHistoryLen - 2)}) + removedKey2 := ToKey([]byte{byte(maxHistoryLen - 2)}) require.Equal(reversedChanges2.nodes[removedKey2].before, got.nodes[removedKey2].after) }, }, @@ -750,10 +751,10 @@ func TestHistoryGetChangesToRoot(t *testing.T) { require.Len(got.nodes, 2) require.Len(got.values, 1) reversedChanges1 := changes[maxHistoryLen-1] - removedKey1 := history.toKey([]byte{byte(maxHistoryLen - 1)}) + removedKey1 := ToKey([]byte{byte(maxHistoryLen - 1)}) require.Equal(reversedChanges1.nodes[removedKey1].before, got.nodes[removedKey1].after) reversedChanges2 := changes[maxHistoryLen-2] - removedKey2 := history.toKey([]byte{byte(maxHistoryLen - 2)}) + removedKey2 := ToKey([]byte{byte(maxHistoryLen - 2)}) require.Equal(reversedChanges2.nodes[removedKey2].before, got.nodes[removedKey2].after) require.Equal(reversedChanges2.values[removedKey2].before, got.values[removedKey2].after) require.Equal(reversedChanges2.values[removedKey2].after, got.values[removedKey2].before) diff --git a/x/merkledb/intermediate_node_db.go b/x/merkledb/intermediate_node_db.go index e146b943d6c2..b0318e99064d 100644 --- a/x/merkledb/intermediate_node_db.go +++ b/x/merkledb/intermediate_node_db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -6,6 +6,8 @@ package merkledb import ( "sync" + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/database" ) @@ -13,7 +15,7 @@ const defaultBufferLength = 256 // Holds intermediate nodes. That is, those without values. // Changes to this database aren't written to [baseDB] until -// they're evicted from the [nodeCache] or Flush is called.. +// they're evicted from the [nodeCache] or Flush is called. type intermediateNodeDB struct { // Holds unused []byte bufferPool *sync.Pool @@ -22,42 +24,51 @@ type intermediateNodeDB struct { // Keys written to [baseDB] are prefixed with [intermediateNodePrefix]. baseDB database.Database - // If a value is nil, the corresponding key isn't in the trie. + // The write buffer contains nodes that have been changed but have not been written to disk. // Note that a call to Put may cause a node to be evicted // from the cache, which will call [OnEviction]. // A non-nil error returned from Put is considered fatal. // Keys in [nodeCache] aren't prefixed with [intermediateNodePrefix]. - nodeCache onEvictCache[Key, *node] + writeBuffer onEvictCache[Key, *node] + + // If a value is nil, the corresponding key isn't in the trie. + nodeCache cache.Cacher[Key, *node] + // the number of bytes to evict during an eviction batch evictionBatchSize int metrics merkleMetrics + tokenSize int } func newIntermediateNodeDB( db database.Database, bufferPool *sync.Pool, metrics merkleMetrics, - size int, + cacheSize int, + writeBufferSize int, evictionBatchSize int, + tokenSize int, ) *intermediateNodeDB { result := &intermediateNodeDB{ metrics: metrics, baseDB: db, bufferPool: bufferPool, evictionBatchSize: evictionBatchSize, + tokenSize: tokenSize, + nodeCache: cache.NewSizedLRU(cacheSize, cacheEntrySize), } - result.nodeCache = newOnEvictCache( - size, + result.writeBuffer = newOnEvictCache( + writeBufferSize, cacheEntrySize, result.onEviction, ) + return result } // A non-nil error is considered fatal and closes [db.baseDB]. func (db *intermediateNodeDB) onEviction(key Key, n *node) error { writeBatch := db.baseDB.NewBatch() - totalSize := cacheEntrySize(key, n) if err := db.addToBatch(writeBatch, key, n); err != nil { _ = db.baseDB.Close() @@ -70,7 +81,7 @@ func (db *intermediateNodeDB) onEviction(key Key, n *node) error { // node, because each time this method is called we do a disk write. // Evicts a total number of bytes, rather than a number of nodes for totalSize < db.evictionBatchSize { - key, n, exists := db.nodeCache.removeOldest() + key, n, exists := db.writeBuffer.removeOldest() if !exists { // The cache is empty. break @@ -108,6 +119,15 @@ func (db *intermediateNodeDB) Get(key Key) (*node, error) { } db.metrics.IntermediateNodeCacheMiss() + if cachedValue, isCached := db.writeBuffer.Get(key); isCached { + db.metrics.IntermediateNodeCacheHit() + if cachedValue == nil { + return nil, database.ErrNotFound + } + return cachedValue, nil + } + db.metrics.IntermediateNodeCacheMiss() + dbKey := db.constructDBKey(key) db.metrics.DatabaseNodeRead() nodeBytes, err := db.baseDB.Get(dbKey) @@ -121,25 +141,41 @@ func (db *intermediateNodeDB) Get(key Key) (*node, error) { // constructDBKey returns a key that can be used in [db.baseDB]. // We need to be able to differentiate between two keys of equal -// byte length but different token length, so we add padding to differentiate. +// byte length but different bit length, so we add padding to differentiate. // Additionally, we add a prefix indicating it is part of the intermediateNodeDB. func (db *intermediateNodeDB) constructDBKey(key Key) []byte { - if key.branchFactor == BranchFactor256 { - // For BranchFactor256, no padding is needed since byte length == token length + if db.tokenSize == 8 { + // For tokens of size byte, no padding is needed since byte length == token length return addPrefixToKey(db.bufferPool, intermediateNodePrefix, key.Bytes()) } - return addPrefixToKey(db.bufferPool, intermediateNodePrefix, key.Append(1).Bytes()) + return addPrefixToKey(db.bufferPool, intermediateNodePrefix, key.Extend(ToToken(1, db.tokenSize)).Bytes()) } func (db *intermediateNodeDB) Put(key Key, n *node) error { - return db.nodeCache.Put(key, n) + db.nodeCache.Put(key, n) + return db.writeBuffer.Put(key, n) } func (db *intermediateNodeDB) Flush() error { - return db.nodeCache.Flush() + db.nodeCache.Flush() + return db.writeBuffer.Flush() } func (db *intermediateNodeDB) Delete(key Key) error { - return db.nodeCache.Put(key, nil) + db.nodeCache.Put(key, nil) + return db.writeBuffer.Put(key, nil) +} + +func (db *intermediateNodeDB) Clear() error { + db.nodeCache.Flush() + + // Reset the buffer. Note we don't flush because that would cause us to + // persist intermediate nodes we're about to delete. + db.writeBuffer = newOnEvictCache( + db.writeBuffer.maxSize, + db.writeBuffer.size, + db.writeBuffer.onEviction, + ) + return database.AtomicClearPrefix(db.baseDB, db.baseDB, intermediateNodePrefix) } diff --git a/x/merkledb/intermediate_node_db_test.go b/x/merkledb/intermediate_node_db_test.go index 3d40aa7f8a05..26ad722ffa45 100644 --- a/x/merkledb/intermediate_node_db_test.go +++ b/x/merkledb/intermediate_node_db_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -23,13 +23,15 @@ import ( func Test_IntermediateNodeDB(t *testing.T) { require := require.New(t) - n := newNode(nil, ToKey([]byte{0x00}, BranchFactor16)) + n := newNode(ToKey([]byte{0x00})) n.setValue(maybe.Some([]byte{byte(0x02)})) nodeSize := cacheEntrySize(n.key, n) // use exact multiple of node size so require.Equal(1, db.nodeCache.fifo.Len()) is correct later - cacheSize := nodeSize * 20 - evictionBatchSize := cacheSize + cacheSize := nodeSize * 100 + bufferSize := nodeSize * 20 + + evictionBatchSize := bufferSize baseDB := memdb.New() db := newIntermediateNodeDB( baseDB, @@ -38,12 +40,14 @@ func Test_IntermediateNodeDB(t *testing.T) { }, &mockMetrics{}, cacheSize, + bufferSize, evictionBatchSize, + 4, ) // Put a key-node pair - node1Key := ToKey([]byte{0x01}, BranchFactor16) - node1 := newNode(nil, node1Key) + node1Key := ToKey([]byte{0x01}) + node1 := newNode(node1Key) node1.setValue(maybe.Some([]byte{byte(0x01)})) require.NoError(db.Put(node1Key, node1)) @@ -53,7 +57,7 @@ func Test_IntermediateNodeDB(t *testing.T) { require.Equal(node1, node1Read) // Overwrite the key-node pair - node1Updated := newNode(nil, node1Key) + node1Updated := newNode(node1Key) node1Updated.setValue(maybe.Some([]byte{byte(0x02)})) require.NoError(db.Put(node1Key, node1Updated)) @@ -73,11 +77,11 @@ func Test_IntermediateNodeDB(t *testing.T) { expectedSize := 0 added := 0 for { - key := ToKey([]byte{byte(added)}, BranchFactor16) - node := newNode(nil, emptyKey(BranchFactor16)) + key := ToKey([]byte{byte(added)}) + node := newNode(Key{}) node.setValue(maybe.Some([]byte{byte(added)})) newExpectedSize := expectedSize + cacheEntrySize(key, node) - if newExpectedSize > cacheSize { + if newExpectedSize > bufferSize { // Don't trigger eviction. break } @@ -88,25 +92,25 @@ func Test_IntermediateNodeDB(t *testing.T) { } // Assert cache has expected number of elements - require.Equal(added, db.nodeCache.fifo.Len()) + require.Equal(added, db.writeBuffer.fifo.Len()) // Put one more element in the cache, which should trigger an eviction // of all but 2 elements. 2 elements remain rather than 1 element because of // the added key prefix increasing the size tracked by the batch. - key := ToKey([]byte{byte(added)}, BranchFactor16) - node := newNode(nil, emptyKey(BranchFactor16)) + key := ToKey([]byte{byte(added)}) + node := newNode(Key{}) node.setValue(maybe.Some([]byte{byte(added)})) require.NoError(db.Put(key, node)) // Assert cache has expected number of elements - require.Equal(1, db.nodeCache.fifo.Len()) - gotKey, _, ok := db.nodeCache.fifo.Oldest() + require.Equal(1, db.writeBuffer.fifo.Len()) + gotKey, _, ok := db.writeBuffer.fifo.Oldest() require.True(ok) - require.Equal(ToKey([]byte{byte(added)}, BranchFactor16), gotKey) + require.Equal(ToKey([]byte{byte(added)}), gotKey) // Get a node from the base database // Use an early key that has been evicted from the cache - _, inCache := db.nodeCache.Get(node1Key) + _, inCache := db.writeBuffer.Get(node1Key) require.False(inCache) nodeRead, err := db.Get(node1Key) require.NoError(err) @@ -116,7 +120,7 @@ func Test_IntermediateNodeDB(t *testing.T) { require.NoError(db.Flush()) // Assert the cache is empty - require.Zero(db.nodeCache.fifo.Len()) + require.Zero(db.writeBuffer.fifo.Len()) // Assert the evicted cache elements were written to disk with prefix. it := baseDB.NewIteratorWithPrefix(intermediateNodePrefix) @@ -131,44 +135,50 @@ func Test_IntermediateNodeDB(t *testing.T) { } func FuzzIntermediateNodeDBConstructDBKey(f *testing.F) { + bufferSize := 200 cacheSize := 200 - evictionBatchSize := cacheSize + evictionBatchSize := bufferSize baseDB := memdb.New() - db := newIntermediateNodeDB( - baseDB, - &sync.Pool{ - New: func() interface{} { return make([]byte, 0) }, - }, - &mockMetrics{}, - cacheSize, - evictionBatchSize, - ) + f.Fuzz(func( t *testing.T, key []byte, tokenLength uint, ) { require := require.New(t) - for _, branchFactor := range branchFactors { - p := ToKey(key, branchFactor) - if p.tokenLength <= int(tokenLength) { + for _, tokenSize := range validTokenSizes { + db := newIntermediateNodeDB( + baseDB, + &sync.Pool{ + New: func() interface{} { return make([]byte, 0) }, + }, + &mockMetrics{}, + cacheSize, + bufferSize, + evictionBatchSize, + tokenSize, + ) + + p := ToKey(key) + uBitLength := tokenLength * uint(tokenSize) + if uBitLength >= uint(p.length) { t.SkipNow() } - p = p.Take(int(tokenLength)) + p = p.Take(int(uBitLength)) constructedKey := db.constructDBKey(p) baseLength := len(p.value) + len(intermediateNodePrefix) require.Equal(intermediateNodePrefix, constructedKey[:len(intermediateNodePrefix)]) switch { - case branchFactor == BranchFactor256: + case tokenSize == 8: // for keys with tokens of size byte, no padding is added require.Equal(p.Bytes(), constructedKey[len(intermediateNodePrefix):]) case p.hasPartialByte(): require.Len(constructedKey, baseLength) - require.Equal(p.Append(1).Bytes(), constructedKey[len(intermediateNodePrefix):]) + require.Equal(p.Extend(ToToken(1, tokenSize)).Bytes(), constructedKey[len(intermediateNodePrefix):]) default: // when a whole number of bytes, there is an extra padding byte require.Len(constructedKey, baseLength+1) - require.Equal(p.Append(1).Bytes(), constructedKey[len(intermediateNodePrefix):]) + require.Equal(p.Extend(ToToken(1, tokenSize)).Bytes(), constructedKey[len(intermediateNodePrefix):]) } } }) @@ -177,7 +187,8 @@ func FuzzIntermediateNodeDBConstructDBKey(f *testing.F) { func Test_IntermediateNodeDB_ConstructDBKey_DirtyBuffer(t *testing.T) { require := require.New(t) cacheSize := 200 - evictionBatchSize := cacheSize + bufferSize := 200 + evictionBatchSize := bufferSize baseDB := memdb.New() db := newIntermediateNodeDB( baseDB, @@ -186,11 +197,13 @@ func Test_IntermediateNodeDB_ConstructDBKey_DirtyBuffer(t *testing.T) { }, &mockMetrics{}, cacheSize, + bufferSize, evictionBatchSize, + 4, ) db.bufferPool.Put([]byte{0xFF, 0xFF, 0xFF}) - constructedKey := db.constructDBKey(ToKey([]byte{}, BranchFactor16)) + constructedKey := db.constructDBKey(ToKey([]byte{})) require.Len(constructedKey, 2) require.Equal(intermediateNodePrefix, constructedKey[:len(intermediateNodePrefix)]) require.Equal(byte(16), constructedKey[len(constructedKey)-1]) @@ -201,9 +214,81 @@ func Test_IntermediateNodeDB_ConstructDBKey_DirtyBuffer(t *testing.T) { }, } db.bufferPool.Put([]byte{0xFF, 0xFF, 0xFF}) - p := ToKey([]byte{0xF0}, BranchFactor16).Take(1) + p := ToKey([]byte{0xF0}).Take(4) constructedKey = db.constructDBKey(p) require.Len(constructedKey, 2) require.Equal(intermediateNodePrefix, constructedKey[:len(intermediateNodePrefix)]) - require.Equal(p.Append(1).Bytes(), constructedKey[len(intermediateNodePrefix):]) + require.Equal(p.Extend(ToToken(1, 4)).Bytes(), constructedKey[len(intermediateNodePrefix):]) +} + +func TestIntermediateNodeDBClear(t *testing.T) { + require := require.New(t) + cacheSize := 200 + bufferSize := 200 + evictionBatchSize := bufferSize + baseDB := memdb.New() + db := newIntermediateNodeDB( + baseDB, + &sync.Pool{ + New: func() interface{} { return make([]byte, 0) }, + }, + &mockMetrics{}, + cacheSize, + bufferSize, + evictionBatchSize, + 4, + ) + + for _, b := range [][]byte{{1}, {2}, {3}} { + require.NoError(db.Put(ToKey(b), newNode(ToKey(b)))) + } + + require.NoError(db.Clear()) + + iter := baseDB.NewIteratorWithPrefix(intermediateNodePrefix) + defer iter.Release() + require.False(iter.Next()) + + require.Zero(db.writeBuffer.currentSize) +} + +// Test that deleting the empty key and flushing works correctly. +// Previously, there was a bug that occurred when deleting the empty key +// if the cache was empty. The size of the cache entry was reported as 0, +// which caused the cache's currentSize to be 0, so on resize() we didn't +// call onEviction. This caused the empty key to not be deleted from the baseDB. +func TestIntermediateNodeDBDeleteEmptyKey(t *testing.T) { + require := require.New(t) + cacheSize := 200 + bufferSize := 200 + evictionBatchSize := bufferSize + baseDB := memdb.New() + db := newIntermediateNodeDB( + baseDB, + &sync.Pool{ + New: func() interface{} { return make([]byte, 0) }, + }, + &mockMetrics{}, + cacheSize, + bufferSize, + evictionBatchSize, + 4, + ) + + emptyKey := ToKey([]byte{}) + require.NoError(db.Put(emptyKey, newNode(emptyKey))) + require.NoError(db.Flush()) + + emptyDBKey := db.constructDBKey(emptyKey) + has, err := baseDB.Has(emptyDBKey) + require.NoError(err) + require.True(has) + + require.NoError(db.Delete(ToKey([]byte{}))) + require.NoError(db.Flush()) + + emptyDBKey = db.constructDBKey(emptyKey) + has, err = baseDB.Has(emptyDBKey) + require.NoError(err) + require.False(has) } diff --git a/x/merkledb/key.go b/x/merkledb/key.go index 461372a2baa8..dd9938f6aaf0 100644 --- a/x/merkledb/key.go +++ b/x/merkledb/key.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -8,112 +8,137 @@ import ( "fmt" "strings" "unsafe" + + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" + + "github.com/ava-labs/avalanchego/utils" ) var ( - errInvalidBranchFactor = errors.New("invalid branch factor") - - branchFactorToTokenConfig = map[BranchFactor]tokenConfig{ - BranchFactor2: { - branchFactor: BranchFactor2, - tokenBitSize: 1, - tokensPerByte: 8, - singleTokenMask: 0b0000_0001, - }, - BranchFactor4: { - branchFactor: BranchFactor4, - tokenBitSize: 2, - tokensPerByte: 4, - singleTokenMask: 0b0000_0011, - }, - BranchFactor16: { - branchFactor: BranchFactor16, - tokenBitSize: 4, - tokensPerByte: 2, - singleTokenMask: 0b0000_1111, - }, - BranchFactor256: { - branchFactor: BranchFactor256, - tokenBitSize: 8, - tokensPerByte: 1, - singleTokenMask: 0b1111_1111, - }, + ErrInvalidBranchFactor = errors.New("branch factor must match one of the predefined branch factors") + + BranchFactorToTokenSize = map[BranchFactor]int{ + BranchFactor2: 1, + BranchFactor4: 2, + BranchFactor16: 4, + BranchFactor256: 8, + } + + tokenSizeToBranchFactor = map[int]BranchFactor{ + 1: BranchFactor2, + 2: BranchFactor4, + 4: BranchFactor16, + 8: BranchFactor256, + } + + validTokenSizes = maps.Keys(tokenSizeToBranchFactor) + + validBranchFactors = []BranchFactor{ + BranchFactor2, + BranchFactor4, + BranchFactor16, + BranchFactor256, } ) type BranchFactor int const ( - BranchFactor2 BranchFactor = 2 - BranchFactor4 BranchFactor = 4 - BranchFactor16 BranchFactor = 16 - BranchFactor256 BranchFactor = 256 + BranchFactor2 = BranchFactor(2) + BranchFactor4 = BranchFactor(4) + BranchFactor16 = BranchFactor(16) + BranchFactor256 = BranchFactor(256) ) -func (f BranchFactor) Valid() error { - if _, ok := branchFactorToTokenConfig[f]; ok { - return nil +// Valid checks if BranchFactor [b] is one of the predefined valid options for BranchFactor +func (b BranchFactor) Valid() error { + for _, validBF := range validBranchFactors { + if validBF == b { + return nil + } } - return fmt.Errorf("%w: %d", errInvalidBranchFactor, f) + return fmt.Errorf("%w: %d", ErrInvalidBranchFactor, b) } -type tokenConfig struct { - branchFactor BranchFactor - tokensPerByte int - tokenBitSize byte - singleTokenMask byte +// ToToken creates a key version of the passed byte with bit length equal to tokenSize +func ToToken(val byte, tokenSize int) Key { + return Key{ + value: string([]byte{val << dualBitIndex(tokenSize)}), + length: tokenSize, + } } -type Key struct { - tokenLength int - value string - tokenConfig +// Token returns the token at the specified index, +// Assumes that bitIndex + tokenSize doesn't cross a byte boundary +func (k Key) Token(bitIndex int, tokenSize int) byte { + storageByte := k.value[bitIndex/8] + // Shift the byte right to get the last bit to the rightmost position. + storageByte >>= dualBitIndex((bitIndex + tokenSize) % 8) + // Apply a mask to remove any other bits in the byte. + return storageByte & (0xFF >> dualBitIndex(tokenSize)) } -func emptyKey(bf BranchFactor) Key { - return Key{ - tokenConfig: branchFactorToTokenConfig[bf], +// iteratedHasPrefix checks if the provided prefix key is a prefix of the current key starting after the [bitsOffset]th bit +// this has better performance than constructing the actual key via Skip() then calling HasPrefix because it avoids an allocation +func (k Key) iteratedHasPrefix(prefix Key, bitsOffset int, tokenSize int) bool { + if k.length-bitsOffset < prefix.length { + return false } + for i := 0; i < prefix.length; i += tokenSize { + if k.Token(bitsOffset+i, tokenSize) != prefix.Token(i, tokenSize) { + return false + } + } + return true } -// ToKey returns [keyBytes] as a new key with the given [branchFactor]. -// Assumes [branchFactor] is valid. -func ToKey(keyBytes []byte, branchFactor BranchFactor) Key { - tc := branchFactorToTokenConfig[branchFactor] - return Key{ - value: byteSliceToString(keyBytes), - tokenConfig: tc, - tokenLength: len(keyBytes) * tc.tokensPerByte, - } +type Key struct { + // The number of bits in the key. + length int + // The string representation of the key + value string } -// TokensLength returns the number of tokens in [k]. -func (k Key) TokensLength() int { - return k.tokenLength +// ToKey returns [keyBytes] as a new key +// Assumes all bits of the keyBytes are part of the Key, call Key.Take if that is not the case +// Creates a copy of [keyBytes], so keyBytes are safe to edit after the call +func ToKey(keyBytes []byte) Key { + return toKey(slices.Clone(keyBytes)) +} + +// toKey returns [keyBytes] as a new key +// Assumes all bits of the keyBytes are part of the Key, call Key.Take if that is not the case +// Caller must not modify [keyBytes] after this call. +func toKey(keyBytes []byte) Key { + return Key{ + value: byteSliceToString(keyBytes), + length: len(keyBytes) * 8, + } } // hasPartialByte returns true iff the key fits into a non-whole number of bytes func (k Key) hasPartialByte() bool { - return k.tokenLength%k.tokensPerByte > 0 + return k.length%8 > 0 } // HasPrefix returns true iff [prefix] is a prefix of [k] or equal to it. func (k Key) HasPrefix(prefix Key) bool { // [prefix] must be shorter than [k] to be a prefix. - if k.tokenLength < prefix.tokenLength { + if k.length < prefix.length { return false } // The number of tokens in the last byte of [prefix], or zero // if [prefix] fits into a whole number of bytes. - remainderTokensCount := prefix.tokenLength % k.tokensPerByte - if remainderTokensCount == 0 { + remainderBitCount := prefix.length % 8 + if remainderBitCount == 0 { return strings.HasPrefix(k.value, prefix.value) } // check that the tokens in the partially filled final byte of [prefix] are // equal to the tokens in the final byte of [k]. - remainderBitsMask := byte(0xFF >> (remainderTokensCount * int(k.tokenBitSize))) + remainderBitsMask := byte(0xFF >> remainderBitCount) prefixRemainderTokens := prefix.value[len(prefix.value)-1] | remainderBitsMask remainderTokens := k.value[len(prefix.value)-1] | remainderBitsMask @@ -122,7 +147,7 @@ func (k Key) HasPrefix(prefix Key) bool { } // Note that this will never be an index OOB because len(prefix.value) > 0. - // If len(prefix.value) == 0 were true, [remainderTokens] would be 0 so we + // If len(prefix.value) == 0 were true, [remainderTokens] would be 0, so we // would have returned above. prefixWithoutPartialByte := prefix.value[:len(prefix.value)-1] return strings.HasPrefix(k.value, prefixWithoutPartialByte) @@ -134,130 +159,71 @@ func (k Key) HasStrictPrefix(prefix Key) bool { return k != prefix && k.HasPrefix(prefix) } -// Token returns the token at the specified index, -func (k Key) Token(index int) byte { - // Find the index in [k.value] of the byte containing the token at [index]. - storageByteIndex := index / k.tokensPerByte - storageByte := k.value[storageByteIndex] - // Shift the byte right to get the token to the rightmost position. - storageByte >>= k.bitsToShift(index) - // Apply a mask to remove any other tokens in the byte. - return storageByte & k.singleTokenMask -} - -// Append returns a new Path that equals the current -// Path with [token] appended to the end. -func (k Key) Append(token byte) Key { - buffer := make([]byte, k.bytesNeeded(k.tokenLength+1)) - k.appendIntoBuffer(buffer, token) - return Key{ - value: byteSliceToString(buffer), - tokenLength: k.tokenLength + 1, - tokenConfig: k.tokenConfig, - } +// Length returns the number of bits in the Key +func (k Key) Length() int { + return k.length } // Greater returns true if current Key is greater than other Key func (k Key) Greater(other Key) bool { - return k.value > other.value || (k.value == other.value && k.tokenLength > other.tokenLength) + return k.Compare(other) == 1 } -// Less returns true if current Key is less than other Key +// Less will return true if current Key is less than other Key func (k Key) Less(other Key) bool { - return k.value < other.value || (k.value == other.value && k.tokenLength < other.tokenLength) + return k.Compare(other) == -1 } -// bitsToShift returns the number of bits to right shift a token -// within its storage byte to get it to the rightmost -// position in the byte. Equivalently, this is the number of bits -// to left shift a raw token value to get it to the correct position -// within its storage byte. -// Example with branch factor 16: -// Suppose the token array is -// [0x01, 0x02, 0x03, 0x04] -// The byte representation of this array is -// [0b0001_0010, 0b0011_0100] -// To get the token at index 0 (0b0001) to the rightmost position -// in its storage byte (i.e. to make 0b0001_0010 into 0b0000_0001), -// we need to shift 0b0001_0010 to the right by 4 bits. -// Similarly: -// * Token at index 1 (0b0010) needs to be shifted by 0 bits -// * Token at index 2 (0b0011) needs to be shifted by 4 bits -// * Token at index 3 (0b0100) needs to be shifted by 0 bits -func (k Key) bitsToShift(index int) byte { - // [tokenIndex] is the index of the token in the byte. - // For example, if the branch factor is 16, then each byte contains 2 tokens. - // The first is at index 0, and the second is at index 1, by this definition. - tokenIndex := index % k.tokensPerByte - // The bit within the byte that the token starts at. - startBitIndex := k.tokenBitSize * byte(tokenIndex) - // The bit within the byte that the token ends at. - endBitIndex := startBitIndex + k.tokenBitSize - 1 - // We want to right shift until [endBitIndex] is at the last index, so return - // the distance from the end of the byte to the end of the token. - // Note that 7 is the index of the last bit in a byte. - return 7 - endBitIndex -} - -// bytesNeeded returns the number of bytes needed to store the passed number of -// tokens. -// -// Invariant: [tokens] is a non-negative, but otherwise untrusted, input and -// this method must never overflow. -func (k Key) bytesNeeded(tokens int) int { - size := tokens / k.tokensPerByte - if tokens%k.tokensPerByte != 0 { - size++ +func (k Key) Compare(other Key) int { + if valueCmp := utils.Compare(k.value, other.value); valueCmp != 0 { + return valueCmp } - return size + return utils.Compare(k.length, other.length) } -func (k Key) AppendExtend(token byte, extensionKey Key) Key { - appendBytes := k.bytesNeeded(k.tokenLength + 1) - totalLength := k.tokenLength + 1 + extensionKey.tokenLength - buffer := make([]byte, k.bytesNeeded(totalLength)) - k.appendIntoBuffer(buffer[:appendBytes], token) - - // the extension path will be shifted based on the number of tokens in the partial byte - tokenRemainder := (k.tokenLength + 1) % k.tokensPerByte - result := Key{ - value: byteSliceToString(buffer), - tokenLength: totalLength, - tokenConfig: k.tokenConfig, +// Extend returns a new Key that is the in-order aggregation of Key [k] with [keys] +func (k Key) Extend(keys ...Key) Key { + totalBitLength := k.length + for _, key := range keys { + totalBitLength += key.length + } + buffer := make([]byte, bytesNeeded(totalBitLength)) + copy(buffer, k.value) + currentTotal := k.length + for _, key := range keys { + extendIntoBuffer(buffer, key, currentTotal) + currentTotal += key.length } - extensionBuffer := buffer[appendBytes-1:] - if extensionKey.tokenLength == 0 { - return result + return Key{ + value: byteSliceToString(buffer), + length: totalBitLength, } +} - // If the existing value fits into a whole number of bytes, - // the extension path can be copied directly into the buffer. - if tokenRemainder == 0 { - copy(extensionBuffer[1:], extensionKey.value) - return result +func extendIntoBuffer(buffer []byte, val Key, bitsOffset int) { + if val.length == 0 { + return + } + bytesOffset := bytesNeeded(bitsOffset) + bitsRemainder := bitsOffset % 8 + if bitsRemainder == 0 { + copy(buffer[bytesOffset:], val.value) + return } - // The existing path doesn't fit into a whole number of bytes. - // Figure out how many bits to shift. - shift := extensionKey.bitsToShift(tokenRemainder - 1) // Fill the partial byte with the first [shift] bits of the extension path - extensionBuffer[0] |= extensionKey.value[0] >> (8 - shift) + buffer[bytesOffset-1] |= val.value[0] >> bitsRemainder // copy the rest of the extension path bytes into the buffer, // shifted byte shift bits - shiftCopy(extensionBuffer[1:], extensionKey.value, shift) - - return result + shiftCopy(buffer[bytesOffset:], val.value, dualBitIndex(bitsRemainder)) } -func (k Key) appendIntoBuffer(buffer []byte, token byte) { - copy(buffer, k.value) - - // Shift [token] to the left such that it's at the correct - // index within its storage byte, then OR it with its storage - // byte to write the token into the byte. - buffer[len(buffer)-1] |= token << k.bitsToShift(k.tokenLength) +// dualBitIndex gets the dual of the bit index +// ex: in a byte, the bit 5 from the right is the same as the bit 3 from the left +func dualBitIndex(shift int) int { + return (8 - shift) % 8 } // Treats [src] as a bit array and copies it into [dst] shifted by [shift] bits. @@ -266,10 +232,11 @@ func (k Key) appendIntoBuffer(buffer []byte, token byte) { // Assumes len(dst) >= len(src)-1. // If len(dst) == len(src)-1 the last byte of [src] is only partially copied // (i.e. the rightmost bits are not copied). -func shiftCopy(dst []byte, src string, shift byte) { +func shiftCopy(dst []byte, src string, shift int) { i := 0 + dualShift := dualBitIndex(shift) for ; i < len(src)-1; i++ { - dst[i] = src[i]<>(8-shift) + dst[i] = src[i]<>dualShift } if i < len(dst) { @@ -279,59 +246,56 @@ func shiftCopy(dst []byte, src string, shift byte) { } // Skip returns a new Key that contains the last -// k.length-tokensToSkip tokens of [k]. -func (k Key) Skip(tokensToSkip int) Key { - if k.tokenLength == tokensToSkip { - return emptyKey(k.branchFactor) +// k.length-bitsToSkip bits of [k]. +func (k Key) Skip(bitsToSkip int) Key { + if k.length <= bitsToSkip { + return Key{} } result := Key{ - value: k.value[tokensToSkip/k.tokensPerByte:], - tokenLength: k.tokenLength - tokensToSkip, - tokenConfig: k.tokenConfig, + value: k.value[bitsToSkip/8:], + length: k.length - bitsToSkip, } // if the tokens to skip is a whole number of bytes, // the remaining bytes exactly equals the new key. - if tokensToSkip%k.tokensPerByte == 0 { + if bitsToSkip%8 == 0 { return result } - // tokensToSkip does not remove a whole number of bytes. + // bitsToSkip does not remove a whole number of bytes. // copy the remaining shifted bytes into a new buffer. - buffer := make([]byte, k.bytesNeeded(result.tokenLength)) - bitsSkipped := tokensToSkip * int(k.tokenBitSize) - bitsRemovedFromFirstRemainingByte := byte(bitsSkipped % 8) + buffer := make([]byte, bytesNeeded(result.length)) + bitsRemovedFromFirstRemainingByte := bitsToSkip % 8 shiftCopy(buffer, result.value, bitsRemovedFromFirstRemainingByte) result.value = byteSliceToString(buffer) return result } -// Take returns a new Key that contains the first tokensToTake tokens of the current Key -func (k Key) Take(tokensToTake int) Key { - if k.tokenLength <= tokensToTake { +// Take returns a new Key that contains the first bitsToTake bits of the current Key +func (k Key) Take(bitsToTake int) Key { + if k.length <= bitsToTake { return k } result := Key{ - tokenLength: tokensToTake, - tokenConfig: k.tokenConfig, + length: bitsToTake, } - if !result.hasPartialByte() { - result.value = k.value[:tokensToTake/k.tokensPerByte] + remainderBits := result.length % 8 + if remainderBits == 0 { + result.value = k.value[:bitsToTake/8] return result } // We need to zero out some bits of the last byte so a simple slice will not work // Create a new []byte to store the altered value - buffer := make([]byte, k.bytesNeeded(tokensToTake)) + buffer := make([]byte, bytesNeeded(bitsToTake)) copy(buffer, k.value) - // We want to zero out everything to the right of the last token, which is at index [tokensToTake] - 1 - // Mask will be (8-bitsToShift) number of 1's followed by (bitsToShift) number of 0's - mask := byte(0xFF << k.bitsToShift(tokensToTake-1)) - buffer[len(buffer)-1] &= mask + // We want to zero out everything to the right of the last token, which is at index bitsToTake-1 + // Mask will be (8-remainderBits) number of 1's followed by (remainderBits) number of 0's + buffer[len(buffer)-1] &= byte(0xFF << dualBitIndex(remainderBits)) result.value = byteSliceToString(buffer) return result @@ -345,20 +309,6 @@ func (k Key) Bytes() []byte { return stringToByteSlice(k.value) } -// iteratedHasPrefix checks if the provided prefix path is a prefix of the current path after having skipped [skipTokens] tokens first -// this has better performance than constructing the actual path via Skip() then calling HasPrefix because it avoids the []byte allocation -func (k Key) iteratedHasPrefix(skipTokens int, prefix Key) bool { - if k.tokenLength-skipTokens < prefix.tokenLength { - return false - } - for i := 0; i < prefix.tokenLength; i++ { - if k.Token(skipTokens+i) != prefix.Token(i) { - return false - } - } - return true -} - // byteSliceToString converts the []byte to a string // Invariant: The input []byte must not be modified. func byteSliceToString(bs []byte) string { @@ -374,3 +324,12 @@ func stringToByteSlice(value string) []byte { // "safe" because we never edit the []byte return unsafe.Slice(unsafe.StringData(value), len(value)) } + +// Returns the number of bytes needed to store [bits] bits. +func bytesNeeded(bits int) int { + size := bits / 8 + if bits%8 != 0 { + size++ + } + return size +} diff --git a/x/merkledb/key_test.go b/x/merkledb/key_test.go index e56ee1a98050..aab666e13afc 100644 --- a/x/merkledb/key_test.go +++ b/x/merkledb/key_test.go @@ -1,52 +1,56 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb import ( "fmt" + "strconv" "testing" "github.com/stretchr/testify/require" ) -var branchFactors = []BranchFactor{ - BranchFactor2, - BranchFactor4, - BranchFactor16, - BranchFactor256, +func TestBranchFactor_Valid(t *testing.T) { + require := require.New(t) + for _, bf := range validBranchFactors { + require.NoError(bf.Valid()) + } + var empty BranchFactor + err := empty.Valid() + require.ErrorIs(err, ErrInvalidBranchFactor) } func TestHasPartialByte(t *testing.T) { - for _, branchFactor := range branchFactors { - t.Run(fmt.Sprint(branchFactor), func(t *testing.T) { + for _, ts := range validTokenSizes { + t.Run(strconv.Itoa(ts), func(t *testing.T) { require := require.New(t) - key := emptyKey(branchFactor) + key := Key{} require.False(key.hasPartialByte()) - if branchFactor == BranchFactor256 { + if ts == 8 { // Tokens are an entire byte so // there is never a partial byte. - key = key.Append(0) + key = key.Extend(ToToken(1, ts)) require.False(key.hasPartialByte()) - key = key.Append(0) + key = key.Extend(ToToken(0, ts)) require.False(key.hasPartialByte()) return } // Fill all but the last token of the first byte. - for i := 0; i < key.tokensPerByte-1; i++ { - key = key.Append(0) + for i := 0; i < 8-ts; i += ts { + key = key.Extend(ToToken(1, ts)) require.True(key.hasPartialByte()) } // Fill the last token of the first byte. - key = key.Append(0) + key = key.Extend(ToToken(0, ts)) require.False(key.hasPartialByte()) // Fill the first token of the second byte. - key = key.Append(0) + key = key.Extend(ToToken(0, ts)) require.True(key.hasPartialByte()) }) } @@ -55,66 +59,71 @@ func TestHasPartialByte(t *testing.T) { func Test_Key_Has_Prefix(t *testing.T) { type test struct { name string - keyA func(bf BranchFactor) Key - keyB func(bf BranchFactor) Key + keyA func(ts int) Key + keyB func(ts int) Key isStrictPrefix bool isPrefix bool } key := "Key" - keyLength := map[BranchFactor]int{} - for _, branchFactor := range branchFactors { - config := branchFactorToTokenConfig[branchFactor] - keyLength[branchFactor] = len(key) * config.tokensPerByte - } tests := []test{ { name: "equal keys", - keyA: func(bf BranchFactor) Key { return ToKey([]byte(key), bf) }, - keyB: func(bf BranchFactor) Key { return ToKey([]byte(key), bf) }, + keyA: func(ts int) Key { return ToKey([]byte(key)) }, + keyB: func(ts int) Key { return ToKey([]byte(key)) }, isPrefix: true, isStrictPrefix: false, }, { - name: "one key has one fewer token", - keyA: func(bf BranchFactor) Key { return ToKey([]byte(key), bf) }, - keyB: func(bf BranchFactor) Key { return ToKey([]byte(key), bf).Take(keyLength[bf] - 1) }, + name: "one key has one fewer token", + keyA: func(ts int) Key { return ToKey([]byte(key)) }, + keyB: func(ts int) Key { + return ToKey([]byte(key)).Take(len(key)*8 - ts) + }, isPrefix: true, isStrictPrefix: true, }, { - name: "equal keys, both have one fewer token", - keyA: func(bf BranchFactor) Key { return ToKey([]byte(key), bf).Take(keyLength[bf] - 1) }, - keyB: func(bf BranchFactor) Key { return ToKey([]byte(key), bf).Take(keyLength[bf] - 1) }, + name: "equal keys, both have one fewer token", + keyA: func(ts int) Key { + return ToKey([]byte(key)).Take(len(key)*8 - ts) + }, + keyB: func(ts int) Key { + return ToKey([]byte(key)).Take(len(key)*8 - ts) + }, isPrefix: true, isStrictPrefix: false, }, { name: "different keys", - keyA: func(bf BranchFactor) Key { return ToKey([]byte{0xF7}, bf) }, - keyB: func(bf BranchFactor) Key { return ToKey([]byte{0xF0}, bf) }, + keyA: func(ts int) Key { return ToKey([]byte{0xF7}) }, + keyB: func(ts int) Key { return ToKey([]byte{0xF0}) }, isPrefix: false, isStrictPrefix: false, }, { - name: "same bytes, different lengths", - keyA: func(bf BranchFactor) Key { return ToKey([]byte{0x10, 0x00}, bf).Take(1) }, - keyB: func(bf BranchFactor) Key { return ToKey([]byte{0x10, 0x00}, bf).Take(2) }, + name: "same bytes, different lengths", + keyA: func(ts int) Key { + return ToKey([]byte{0x10, 0x00}).Take(ts) + }, + keyB: func(ts int) Key { + return ToKey([]byte{0x10, 0x00}).Take(ts * 2) + }, isPrefix: false, isStrictPrefix: false, }, } for _, tt := range tests { - for _, bf := range branchFactors { - t.Run(tt.name+" bf "+fmt.Sprint(bf), func(t *testing.T) { + for _, ts := range validTokenSizes { + t.Run(tt.name+" ts "+strconv.Itoa(ts), func(t *testing.T) { require := require.New(t) - keyA := tt.keyA(bf) - keyB := tt.keyB(bf) + keyA := tt.keyA(ts) + keyB := tt.keyB(ts) require.Equal(tt.isPrefix, keyA.HasPrefix(keyB)) - require.Equal(tt.isPrefix, keyA.iteratedHasPrefix(0, keyB)) + require.Equal(tt.isPrefix, keyA.iteratedHasPrefix(keyB, 0, ts)) require.Equal(tt.isStrictPrefix, keyA.HasStrictPrefix(keyB)) }) } @@ -124,30 +133,29 @@ func Test_Key_Has_Prefix(t *testing.T) { func Test_Key_Skip(t *testing.T) { require := require.New(t) - for _, bf := range branchFactors { - empty := emptyKey(bf) - require.Equal(ToKey([]byte{0}, bf).Skip(empty.tokensPerByte), empty) - if bf == BranchFactor256 { + empty := Key{} + require.Equal(ToKey([]byte{0}).Skip(8), empty) + for _, ts := range validTokenSizes { + if ts == 8 { continue } - shortKey := ToKey([]byte{0b0101_0101}, bf) - longKey := ToKey([]byte{0b0101_0101, 0b0101_0101}, bf) - for i := 0; i < shortKey.tokensPerByte; i++ { - shift := byte(i) * shortKey.tokenBitSize - skipKey := shortKey.Skip(i) + shortKey := ToKey([]byte{0b0101_0101}) + longKey := ToKey([]byte{0b0101_0101, 0b0101_0101}) + for shift := 0; shift < 8; shift += ts { + skipKey := shortKey.Skip(shift) require.Equal(byte(0b0101_0101<>(8-shift)), skipKey.value[0]) require.Equal(byte(0b0101_0101<>shift)< ts { + key1 = key1.Take(key1.length - ts) + } + key2 := ToKey(second) + if forceSecondOdd && key2.length > ts { + key2 = key2.Take(key2.length - ts) + } + token := byte(int(tokenByte) % int(tokenSizeToBranchFactor[ts])) + extendedP := key1.Extend(ToToken(token, ts), key2) + require.Equal(key1.length+key2.length+ts, extendedP.length) + firstIndex := 0 + for ; firstIndex < key1.length; firstIndex += ts { + require.Equal(key1.Token(firstIndex, ts), extendedP.Token(firstIndex, ts)) + } + require.Equal(token, extendedP.Token(firstIndex, ts)) + firstIndex += ts + for secondIndex := 0; secondIndex < key2.length; secondIndex += ts { + require.Equal(key2.Token(secondIndex, ts), extendedP.Token(firstIndex+secondIndex, ts)) + } + } + }) +} + +func FuzzKeyDoubleExtend_Any(f *testing.F) { + f.Fuzz(func( + t *testing.T, + baseKeyBytes []byte, + firstKeyBytes []byte, + secondKeyBytes []byte, + forceBaseOdd bool, forceFirstOdd bool, forceSecondOdd bool, ) { require := require.New(t) - for _, branchFactor := range branchFactors { - key1 := ToKey(first, branchFactor) - if forceFirstOdd && key1.tokenLength > 0 { - key1 = key1.Take(key1.tokenLength - 1) + for _, ts := range validTokenSizes { + baseKey := ToKey(baseKeyBytes) + if forceBaseOdd && baseKey.length > ts { + baseKey = baseKey.Take(baseKey.length - ts) + } + firstKey := ToKey(firstKeyBytes) + if forceFirstOdd && firstKey.length > ts { + firstKey = firstKey.Take(firstKey.length - ts) } - key2 := ToKey(second, branchFactor) - if forceSecondOdd && key2.tokenLength > 0 { - key2 = key2.Take(key2.tokenLength - 1) + + secondKey := ToKey(secondKeyBytes) + if forceSecondOdd && secondKey.length > ts { + secondKey = secondKey.Take(secondKey.length - ts) } - token = byte(int(token) % int(branchFactor)) - extendedP := key1.AppendExtend(token, key2) - require.Equal(key1.tokenLength+key2.tokenLength+1, extendedP.tokenLength) - for i := 0; i < key1.tokenLength; i++ { - require.Equal(key1.Token(i), extendedP.Token(i)) + + extendedP := baseKey.Extend(firstKey, secondKey) + require.Equal(baseKey.length+firstKey.length+secondKey.length, extendedP.length) + totalIndex := 0 + for baseIndex := 0; baseIndex < baseKey.length; baseIndex += ts { + require.Equal(baseKey.Token(baseIndex, ts), extendedP.Token(baseIndex, ts)) } - require.Equal(token, extendedP.Token(key1.tokenLength)) - for i := 0; i < key2.tokenLength; i++ { - require.Equal(key2.Token(i), extendedP.Token(i+1+key1.tokenLength)) + totalIndex += baseKey.length + for firstIndex := 0; firstIndex < firstKey.length; firstIndex += ts { + require.Equal(firstKey.Token(firstIndex, ts), extendedP.Token(totalIndex+firstIndex, ts)) + } + totalIndex += firstKey.length + for secondIndex := 0; secondIndex < secondKey.length; secondIndex += ts { + require.Equal(secondKey.Token(secondIndex, ts), extendedP.Token(totalIndex+secondIndex, ts)) } } }) @@ -509,15 +478,18 @@ func FuzzKeySkip(f *testing.F) { tokensToSkip uint, ) { require := require.New(t) - for _, branchFactor := range branchFactors { - key1 := ToKey(first, branchFactor) - if int(tokensToSkip) >= key1.tokenLength { + key1 := ToKey(first) + for _, ts := range validTokenSizes { + // need bits to be a multiple of token size + ubitsToSkip := tokensToSkip * uint(ts) + if ubitsToSkip >= uint(key1.length) { t.SkipNow() } - key2 := key1.Skip(int(tokensToSkip)) - require.Equal(key1.tokenLength-int(tokensToSkip), key2.tokenLength) - for i := 0; i < key2.tokenLength; i++ { - require.Equal(key1.Token(int(tokensToSkip)+i), key2.Token(i)) + bitsToSkip := int(ubitsToSkip) + key2 := key1.Skip(bitsToSkip) + require.Equal(key1.length-bitsToSkip, key2.length) + for i := 0; i < key2.length; i += ts { + require.Equal(key1.Token(bitsToSkip+i, ts), key2.Token(i, ts)) } } }) @@ -527,19 +499,24 @@ func FuzzKeyTake(f *testing.F) { f.Fuzz(func( t *testing.T, first []byte, - tokensToTake uint, + uTokensToTake uint, ) { require := require.New(t) - for _, branchFactor := range branchFactors { - key1 := ToKey(first, branchFactor) - if int(tokensToTake) >= key1.tokenLength { + for _, ts := range validTokenSizes { + key1 := ToKey(first) + uBitsToTake := uTokensToTake * uint(ts) + if uBitsToTake >= uint(key1.length) { t.SkipNow() } - key2 := key1.Take(int(tokensToTake)) - require.Equal(int(tokensToTake), key2.tokenLength) - - for i := 0; i < key2.tokenLength; i++ { - require.Equal(key1.Token(i), key2.Token(i)) + bitsToTake := int(uBitsToTake) + key2 := key1.Take(bitsToTake) + require.Equal(bitsToTake, key2.length) + if key2.hasPartialByte() { + paddingMask := byte(0xFF >> (key2.length % 8)) + require.Zero(key2.value[len(key2.value)-1] & paddingMask) + } + for i := 0; i < bitsToTake; i += ts { + require.Equal(key1.Token(i, ts), key2.Token(i, ts)) } } }) @@ -550,7 +527,7 @@ func TestShiftCopy(t *testing.T) { dst []byte src []byte expected []byte - shift byte + shift int } tests := []test{ diff --git a/x/merkledb/metrics.go b/x/merkledb/metrics.go index d8a80a02db5a..058b4869904a 100644 --- a/x/merkledb/metrics.go +++ b/x/merkledb/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb diff --git a/x/merkledb/metrics_test.go b/x/merkledb/metrics_test.go index 3bf5a9480a54..20c4accbd13c 100644 --- a/x/merkledb/metrics_test.go +++ b/x/merkledb/metrics_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -34,20 +34,20 @@ func Test_Metrics_Basic_Usage(t *testing.T) { require.Equal(t, int64(1), db.metrics.(*mockMetrics).keyReadCount) require.Equal(t, int64(1), db.metrics.(*mockMetrics).keyWriteCount) - require.Equal(t, int64(2), db.metrics.(*mockMetrics).hashCount) + require.Equal(t, int64(1), db.metrics.(*mockMetrics).hashCount) require.NoError(t, db.Delete([]byte("key"))) require.Equal(t, int64(1), db.metrics.(*mockMetrics).keyReadCount) require.Equal(t, int64(2), db.metrics.(*mockMetrics).keyWriteCount) - require.Equal(t, int64(3), db.metrics.(*mockMetrics).hashCount) + require.Equal(t, int64(1), db.metrics.(*mockMetrics).hashCount) _, err = db.Get([]byte("key2")) require.ErrorIs(t, err, database.ErrNotFound) require.Equal(t, int64(2), db.metrics.(*mockMetrics).keyReadCount) require.Equal(t, int64(2), db.metrics.(*mockMetrics).keyWriteCount) - require.Equal(t, int64(3), db.metrics.(*mockMetrics).hashCount) + require.Equal(t, int64(1), db.metrics.(*mockMetrics).hashCount) } func Test_Metrics_Initialize(t *testing.T) { diff --git a/x/merkledb/mock_db.go b/x/merkledb/mock_db.go index f7e35883c177..d43e276103f7 100644 --- a/x/merkledb/mock_db.go +++ b/x/merkledb/mock_db.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/x/merkledb (interfaces: MerkleDB) +// Source: x/merkledb/db.go +// +// Generated by this command: +// +// mockgen -source=x/merkledb/db.go -destination=x/merkledb/mock_db.go -package=merkledb -exclude_interfaces=ChangeProofer,RangeProofer,Clearer,Prefetcher +// // Package merkledb is a generated GoMock package. package merkledb @@ -40,6 +42,20 @@ func (m *MockMerkleDB) EXPECT() *MockMerkleDBMockRecorder { return m.recorder } +// Clear mocks base method. +func (m *MockMerkleDB) Clear() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Clear") + ret0, _ := ret[0].(error) + return ret0 +} + +// Clear indicates an expected call of Clear. +func (mr *MockMerkleDBMockRecorder) Clear() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clear", reflect.TypeOf((*MockMerkleDB)(nil).Clear)) +} + // Close mocks base method. func (m *MockMerkleDB) Close() error { m.ctrl.T.Helper() @@ -55,207 +71,207 @@ func (mr *MockMerkleDBMockRecorder) Close() *gomock.Call { } // CommitChangeProof mocks base method. -func (m *MockMerkleDB) CommitChangeProof(arg0 context.Context, arg1 *ChangeProof) error { +func (m *MockMerkleDB) CommitChangeProof(ctx context.Context, proof *ChangeProof) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CommitChangeProof", arg0, arg1) + ret := m.ctrl.Call(m, "CommitChangeProof", ctx, proof) ret0, _ := ret[0].(error) return ret0 } // CommitChangeProof indicates an expected call of CommitChangeProof. -func (mr *MockMerkleDBMockRecorder) CommitChangeProof(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) CommitChangeProof(ctx, proof any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).CommitChangeProof), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).CommitChangeProof), ctx, proof) } // CommitRangeProof mocks base method. -func (m *MockMerkleDB) CommitRangeProof(arg0 context.Context, arg1, arg2 maybe.Maybe[[]uint8], arg3 *RangeProof) error { +func (m *MockMerkleDB) CommitRangeProof(ctx context.Context, start, end maybe.Maybe[[]byte], proof *RangeProof) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CommitRangeProof", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "CommitRangeProof", ctx, start, end, proof) ret0, _ := ret[0].(error) return ret0 } // CommitRangeProof indicates an expected call of CommitRangeProof. -func (mr *MockMerkleDBMockRecorder) CommitRangeProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) CommitRangeProof(ctx, start, end, proof any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitRangeProof", reflect.TypeOf((*MockMerkleDB)(nil).CommitRangeProof), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitRangeProof", reflect.TypeOf((*MockMerkleDB)(nil).CommitRangeProof), ctx, start, end, proof) } // Compact mocks base method. -func (m *MockMerkleDB) Compact(arg0, arg1 []byte) error { +func (m *MockMerkleDB) Compact(start, limit []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Compact", arg0, arg1) + ret := m.ctrl.Call(m, "Compact", start, limit) ret0, _ := ret[0].(error) return ret0 } // Compact indicates an expected call of Compact. -func (mr *MockMerkleDBMockRecorder) Compact(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) Compact(start, limit any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Compact", reflect.TypeOf((*MockMerkleDB)(nil).Compact), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Compact", reflect.TypeOf((*MockMerkleDB)(nil).Compact), start, limit) } // Delete mocks base method. -func (m *MockMerkleDB) Delete(arg0 []byte) error { +func (m *MockMerkleDB) Delete(key []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", arg0) + ret := m.ctrl.Call(m, "Delete", key) ret0, _ := ret[0].(error) return ret0 } // Delete indicates an expected call of Delete. -func (mr *MockMerkleDBMockRecorder) Delete(arg0 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) Delete(key any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockMerkleDB)(nil).Delete), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockMerkleDB)(nil).Delete), key) } // Get mocks base method. -func (m *MockMerkleDB) Get(arg0 []byte) ([]byte, error) { +func (m *MockMerkleDB) Get(key []byte) ([]byte, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", arg0) + ret := m.ctrl.Call(m, "Get", key) ret0, _ := ret[0].([]byte) ret1, _ := ret[1].(error) return ret0, ret1 } // Get indicates an expected call of Get. -func (mr *MockMerkleDBMockRecorder) Get(arg0 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) Get(key any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockMerkleDB)(nil).Get), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockMerkleDB)(nil).Get), key) } // GetChangeProof mocks base method. -func (m *MockMerkleDB) GetChangeProof(arg0 context.Context, arg1, arg2 ids.ID, arg3, arg4 maybe.Maybe[[]uint8], arg5 int) (*ChangeProof, error) { +func (m *MockMerkleDB) GetChangeProof(ctx context.Context, startRootID, endRootID ids.ID, start, end maybe.Maybe[[]byte], maxLength int) (*ChangeProof, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetChangeProof", arg0, arg1, arg2, arg3, arg4, arg5) + ret := m.ctrl.Call(m, "GetChangeProof", ctx, startRootID, endRootID, start, end, maxLength) ret0, _ := ret[0].(*ChangeProof) ret1, _ := ret[1].(error) return ret0, ret1 } // GetChangeProof indicates an expected call of GetChangeProof. -func (mr *MockMerkleDBMockRecorder) GetChangeProof(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) GetChangeProof(ctx, startRootID, endRootID, start, end, maxLength any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).GetChangeProof), arg0, arg1, arg2, arg3, arg4, arg5) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).GetChangeProof), ctx, startRootID, endRootID, start, end, maxLength) } // GetMerkleRoot mocks base method. -func (m *MockMerkleDB) GetMerkleRoot(arg0 context.Context) (ids.ID, error) { +func (m *MockMerkleDB) GetMerkleRoot(ctx context.Context) (ids.ID, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMerkleRoot", arg0) + ret := m.ctrl.Call(m, "GetMerkleRoot", ctx) ret0, _ := ret[0].(ids.ID) ret1, _ := ret[1].(error) return ret0, ret1 } // GetMerkleRoot indicates an expected call of GetMerkleRoot. -func (mr *MockMerkleDBMockRecorder) GetMerkleRoot(arg0 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) GetMerkleRoot(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMerkleRoot", reflect.TypeOf((*MockMerkleDB)(nil).GetMerkleRoot), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMerkleRoot", reflect.TypeOf((*MockMerkleDB)(nil).GetMerkleRoot), ctx) } // GetProof mocks base method. -func (m *MockMerkleDB) GetProof(arg0 context.Context, arg1 []byte) (*Proof, error) { +func (m *MockMerkleDB) GetProof(ctx context.Context, keyBytes []byte) (*Proof, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProof", arg0, arg1) + ret := m.ctrl.Call(m, "GetProof", ctx, keyBytes) ret0, _ := ret[0].(*Proof) ret1, _ := ret[1].(error) return ret0, ret1 } // GetProof indicates an expected call of GetProof. -func (mr *MockMerkleDBMockRecorder) GetProof(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) GetProof(ctx, keyBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProof", reflect.TypeOf((*MockMerkleDB)(nil).GetProof), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProof", reflect.TypeOf((*MockMerkleDB)(nil).GetProof), ctx, keyBytes) } // GetRangeProof mocks base method. -func (m *MockMerkleDB) GetRangeProof(arg0 context.Context, arg1, arg2 maybe.Maybe[[]uint8], arg3 int) (*RangeProof, error) { +func (m *MockMerkleDB) GetRangeProof(ctx context.Context, start, end maybe.Maybe[[]byte], maxLength int) (*RangeProof, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRangeProof", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "GetRangeProof", ctx, start, end, maxLength) ret0, _ := ret[0].(*RangeProof) ret1, _ := ret[1].(error) return ret0, ret1 } // GetRangeProof indicates an expected call of GetRangeProof. -func (mr *MockMerkleDBMockRecorder) GetRangeProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) GetRangeProof(ctx, start, end, maxLength any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProof", reflect.TypeOf((*MockMerkleDB)(nil).GetRangeProof), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProof", reflect.TypeOf((*MockMerkleDB)(nil).GetRangeProof), ctx, start, end, maxLength) } // GetRangeProofAtRoot mocks base method. -func (m *MockMerkleDB) GetRangeProofAtRoot(arg0 context.Context, arg1 ids.ID, arg2, arg3 maybe.Maybe[[]uint8], arg4 int) (*RangeProof, error) { +func (m *MockMerkleDB) GetRangeProofAtRoot(ctx context.Context, rootID ids.ID, start, end maybe.Maybe[[]byte], maxLength int) (*RangeProof, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRangeProofAtRoot", arg0, arg1, arg2, arg3, arg4) + ret := m.ctrl.Call(m, "GetRangeProofAtRoot", ctx, rootID, start, end, maxLength) ret0, _ := ret[0].(*RangeProof) ret1, _ := ret[1].(error) return ret0, ret1 } // GetRangeProofAtRoot indicates an expected call of GetRangeProofAtRoot. -func (mr *MockMerkleDBMockRecorder) GetRangeProofAtRoot(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) GetRangeProofAtRoot(ctx, rootID, start, end, maxLength any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProofAtRoot", reflect.TypeOf((*MockMerkleDB)(nil).GetRangeProofAtRoot), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProofAtRoot", reflect.TypeOf((*MockMerkleDB)(nil).GetRangeProofAtRoot), ctx, rootID, start, end, maxLength) } // GetValue mocks base method. -func (m *MockMerkleDB) GetValue(arg0 context.Context, arg1 []byte) ([]byte, error) { +func (m *MockMerkleDB) GetValue(ctx context.Context, key []byte) ([]byte, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValue", arg0, arg1) + ret := m.ctrl.Call(m, "GetValue", ctx, key) ret0, _ := ret[0].([]byte) ret1, _ := ret[1].(error) return ret0, ret1 } // GetValue indicates an expected call of GetValue. -func (mr *MockMerkleDBMockRecorder) GetValue(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) GetValue(ctx, key any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValue", reflect.TypeOf((*MockMerkleDB)(nil).GetValue), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValue", reflect.TypeOf((*MockMerkleDB)(nil).GetValue), ctx, key) } // GetValues mocks base method. -func (m *MockMerkleDB) GetValues(arg0 context.Context, arg1 [][]byte) ([][]byte, []error) { +func (m *MockMerkleDB) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValues", arg0, arg1) + ret := m.ctrl.Call(m, "GetValues", ctx, keys) ret0, _ := ret[0].([][]byte) ret1, _ := ret[1].([]error) return ret0, ret1 } // GetValues indicates an expected call of GetValues. -func (mr *MockMerkleDBMockRecorder) GetValues(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) GetValues(ctx, keys any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValues", reflect.TypeOf((*MockMerkleDB)(nil).GetValues), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValues", reflect.TypeOf((*MockMerkleDB)(nil).GetValues), ctx, keys) } // Has mocks base method. -func (m *MockMerkleDB) Has(arg0 []byte) (bool, error) { +func (m *MockMerkleDB) Has(key []byte) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Has", arg0) + ret := m.ctrl.Call(m, "Has", key) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // Has indicates an expected call of Has. -func (mr *MockMerkleDBMockRecorder) Has(arg0 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) Has(key any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockMerkleDB)(nil).Has), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockMerkleDB)(nil).Has), key) } // HealthCheck mocks base method. -func (m *MockMerkleDB) HealthCheck(arg0 context.Context) (interface{}, error) { +func (m *MockMerkleDB) HealthCheck(arg0 context.Context) (any, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "HealthCheck", arg0) - ret0, _ := ret[0].(interface{}) + ret0, _ := ret[0].(any) ret1, _ := ret[1].(error) return ret0, ret1 } // HealthCheck indicates an expected call of HealthCheck. -func (mr *MockMerkleDBMockRecorder) HealthCheck(arg0 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) HealthCheck(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockMerkleDB)(nil).HealthCheck), arg0) } @@ -289,144 +305,187 @@ func (mr *MockMerkleDBMockRecorder) NewIterator() *gomock.Call { } // NewIteratorWithPrefix mocks base method. -func (m *MockMerkleDB) NewIteratorWithPrefix(arg0 []byte) database.Iterator { +func (m *MockMerkleDB) NewIteratorWithPrefix(prefix []byte) database.Iterator { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewIteratorWithPrefix", arg0) + ret := m.ctrl.Call(m, "NewIteratorWithPrefix", prefix) ret0, _ := ret[0].(database.Iterator) return ret0 } // NewIteratorWithPrefix indicates an expected call of NewIteratorWithPrefix. -func (mr *MockMerkleDBMockRecorder) NewIteratorWithPrefix(arg0 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) NewIteratorWithPrefix(prefix any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithPrefix", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithPrefix), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithPrefix", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithPrefix), prefix) } // NewIteratorWithStart mocks base method. -func (m *MockMerkleDB) NewIteratorWithStart(arg0 []byte) database.Iterator { +func (m *MockMerkleDB) NewIteratorWithStart(start []byte) database.Iterator { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewIteratorWithStart", arg0) + ret := m.ctrl.Call(m, "NewIteratorWithStart", start) ret0, _ := ret[0].(database.Iterator) return ret0 } // NewIteratorWithStart indicates an expected call of NewIteratorWithStart. -func (mr *MockMerkleDBMockRecorder) NewIteratorWithStart(arg0 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) NewIteratorWithStart(start any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithStart", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithStart), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithStart", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithStart), start) } // NewIteratorWithStartAndPrefix mocks base method. -func (m *MockMerkleDB) NewIteratorWithStartAndPrefix(arg0, arg1 []byte) database.Iterator { +func (m *MockMerkleDB) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewIteratorWithStartAndPrefix", arg0, arg1) + ret := m.ctrl.Call(m, "NewIteratorWithStartAndPrefix", start, prefix) ret0, _ := ret[0].(database.Iterator) return ret0 } // NewIteratorWithStartAndPrefix indicates an expected call of NewIteratorWithStartAndPrefix. -func (mr *MockMerkleDBMockRecorder) NewIteratorWithStartAndPrefix(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) NewIteratorWithStartAndPrefix(start, prefix any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithStartAndPrefix", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithStartAndPrefix), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithStartAndPrefix", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithStartAndPrefix), start, prefix) } // NewView mocks base method. -func (m *MockMerkleDB) NewView(arg0 context.Context, arg1 ViewChanges) (TrieView, error) { +func (m *MockMerkleDB) NewView(ctx context.Context, changes ViewChanges) (View, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewView", arg0, arg1) - ret0, _ := ret[0].(TrieView) + ret := m.ctrl.Call(m, "NewView", ctx, changes) + ret0, _ := ret[0].(View) ret1, _ := ret[1].(error) return ret0, ret1 } // NewView indicates an expected call of NewView. -func (mr *MockMerkleDBMockRecorder) NewView(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) NewView(ctx, changes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewView", reflect.TypeOf((*MockMerkleDB)(nil).NewView), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewView", reflect.TypeOf((*MockMerkleDB)(nil).NewView), ctx, changes) } // PrefetchPath mocks base method. -func (m *MockMerkleDB) PrefetchPath(arg0 []byte) error { +func (m *MockMerkleDB) PrefetchPath(key []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PrefetchPath", arg0) + ret := m.ctrl.Call(m, "PrefetchPath", key) ret0, _ := ret[0].(error) return ret0 } // PrefetchPath indicates an expected call of PrefetchPath. -func (mr *MockMerkleDBMockRecorder) PrefetchPath(arg0 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) PrefetchPath(key any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrefetchPath", reflect.TypeOf((*MockMerkleDB)(nil).PrefetchPath), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrefetchPath", reflect.TypeOf((*MockMerkleDB)(nil).PrefetchPath), key) } // PrefetchPaths mocks base method. -func (m *MockMerkleDB) PrefetchPaths(arg0 [][]byte) error { +func (m *MockMerkleDB) PrefetchPaths(keys [][]byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PrefetchPaths", arg0) + ret := m.ctrl.Call(m, "PrefetchPaths", keys) ret0, _ := ret[0].(error) return ret0 } // PrefetchPaths indicates an expected call of PrefetchPaths. -func (mr *MockMerkleDBMockRecorder) PrefetchPaths(arg0 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) PrefetchPaths(keys any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrefetchPaths", reflect.TypeOf((*MockMerkleDB)(nil).PrefetchPaths), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrefetchPaths", reflect.TypeOf((*MockMerkleDB)(nil).PrefetchPaths), keys) } // Put mocks base method. -func (m *MockMerkleDB) Put(arg0, arg1 []byte) error { +func (m *MockMerkleDB) Put(key, value []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Put", arg0, arg1) + ret := m.ctrl.Call(m, "Put", key, value) ret0, _ := ret[0].(error) return ret0 } // Put indicates an expected call of Put. -func (mr *MockMerkleDBMockRecorder) Put(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) Put(key, value any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockMerkleDB)(nil).Put), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockMerkleDB)(nil).Put), key, value) } // VerifyChangeProof mocks base method. -func (m *MockMerkleDB) VerifyChangeProof(arg0 context.Context, arg1 *ChangeProof, arg2, arg3 maybe.Maybe[[]uint8], arg4 ids.ID) error { +func (m *MockMerkleDB) VerifyChangeProof(ctx context.Context, proof *ChangeProof, start, end maybe.Maybe[[]byte], expectedEndRootID ids.ID) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyChangeProof", arg0, arg1, arg2, arg3, arg4) + ret := m.ctrl.Call(m, "VerifyChangeProof", ctx, proof, start, end, expectedEndRootID) ret0, _ := ret[0].(error) return ret0 } // VerifyChangeProof indicates an expected call of VerifyChangeProof. -func (mr *MockMerkleDBMockRecorder) VerifyChangeProof(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) VerifyChangeProof(ctx, proof, start, end, expectedEndRootID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).VerifyChangeProof), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).VerifyChangeProof), ctx, proof, start, end, expectedEndRootID) } // getEditableNode mocks base method. -func (m *MockMerkleDB) getEditableNode(arg0 Key, arg1 bool) (*node, error) { +func (m *MockMerkleDB) getEditableNode(key Key, hasValue bool) (*node, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getEditableNode", arg0, arg1) + ret := m.ctrl.Call(m, "getEditableNode", key, hasValue) ret0, _ := ret[0].(*node) ret1, _ := ret[1].(error) return ret0, ret1 } // getEditableNode indicates an expected call of getEditableNode. -func (mr *MockMerkleDBMockRecorder) getEditableNode(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) getEditableNode(key, hasValue any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getEditableNode", reflect.TypeOf((*MockMerkleDB)(nil).getEditableNode), key, hasValue) +} + +// getNode mocks base method. +func (m *MockMerkleDB) getNode(key Key, hasValue bool) (*node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getNode", key, hasValue) + ret0, _ := ret[0].(*node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// getNode indicates an expected call of getNode. +func (mr *MockMerkleDBMockRecorder) getNode(key, hasValue any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getNode", reflect.TypeOf((*MockMerkleDB)(nil).getNode), key, hasValue) +} + +// getRoot mocks base method. +func (m *MockMerkleDB) getRoot() maybe.Maybe[*node] { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getRoot") + ret0, _ := ret[0].(maybe.Maybe[*node]) + return ret0 +} + +// getRoot indicates an expected call of getRoot. +func (mr *MockMerkleDBMockRecorder) getRoot() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getRoot", reflect.TypeOf((*MockMerkleDB)(nil).getRoot)) +} + +// getTokenSize mocks base method. +func (m *MockMerkleDB) getTokenSize() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getTokenSize") + ret0, _ := ret[0].(int) + return ret0 +} + +// getTokenSize indicates an expected call of getTokenSize. +func (mr *MockMerkleDBMockRecorder) getTokenSize() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getEditableNode", reflect.TypeOf((*MockMerkleDB)(nil).getEditableNode), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getTokenSize", reflect.TypeOf((*MockMerkleDB)(nil).getTokenSize)) } // getValue mocks base method. -func (m *MockMerkleDB) getValue(arg0 Key) ([]byte, error) { +func (m *MockMerkleDB) getValue(key Key) ([]byte, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getValue", arg0) + ret := m.ctrl.Call(m, "getValue", key) ret0, _ := ret[0].([]byte) ret1, _ := ret[1].(error) return ret0, ret1 } // getValue indicates an expected call of getValue. -func (mr *MockMerkleDBMockRecorder) getValue(arg0 interface{}) *gomock.Call { +func (mr *MockMerkleDBMockRecorder) getValue(key any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getValue", reflect.TypeOf((*MockMerkleDB)(nil).getValue), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getValue", reflect.TypeOf((*MockMerkleDB)(nil).getValue), key) } diff --git a/x/merkledb/node.go b/x/merkledb/node.go index 259e048c1793..701e120e4b52 100644 --- a/x/merkledb/node.go +++ b/x/merkledb/node.go @@ -1,10 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb import ( - "golang.org/x/exp/maps" "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/ids" @@ -14,17 +13,10 @@ import ( const HashLength = 32 -// the values that go into the node's id -type hashValues struct { - Children map[byte]child - Value maybe.Maybe[[]byte] - Key Key -} - // Representation of a node stored in the database. type dbNode struct { value maybe.Maybe[[]byte] - children map[byte]child + children map[byte]*child } type child struct { @@ -36,37 +28,29 @@ type child struct { // node holds additional information on top of the dbNode that makes calculations easier to do type node struct { dbNode - id ids.ID key Key - nodeBytes []byte valueDigest maybe.Maybe[[]byte] } // Returns a new node with the given [key] and no value. -// If [parent] isn't nil, the new node is added as a child of [parent]. -func newNode(parent *node, key Key) *node { - newNode := &node{ +func newNode(key Key) *node { + return &node{ dbNode: dbNode{ - children: make(map[byte]child, key.branchFactor), + children: make(map[byte]*child, 2), }, key: key, } - if parent != nil { - parent.addChild(newNode) - } - return newNode } // Parse [nodeBytes] to a node and set its key to [key]. func parseNode(key Key, nodeBytes []byte) (*node, error) { n := dbNode{} - if err := codec.decodeDBNode(nodeBytes, &n, key.branchFactor); err != nil { + if err := codec.decodeDBNode(nodeBytes, &n); err != nil { return nil, err } result := &node{ - dbNode: n, - key: key, - nodeBytes: nodeBytes, + dbNode: n, + key: key, } result.setValueDigest() @@ -80,38 +64,18 @@ func (n *node) hasValue() bool { // Returns the byte representation of this node. func (n *node) bytes() []byte { - if n.nodeBytes == nil { - n.nodeBytes = codec.encodeDBNode(&n.dbNode) - } - - return n.nodeBytes -} - -// clear the cached values that will need to be recalculated whenever the node changes -// for example, node ID and byte representation -func (n *node) onNodeChanged() { - n.id = ids.Empty - n.nodeBytes = nil + return codec.encodeDBNode(&n.dbNode) } // Returns and caches the ID of this node. -func (n *node) calculateID(metrics merkleMetrics) { - if n.id != ids.Empty { - return - } - +func (n *node) calculateID(metrics merkleMetrics) ids.ID { metrics.HashCalculated() - bytes := codec.encodeHashValues(&hashValues{ - Children: n.children, - Value: n.valueDigest, - Key: n.key, - }) - n.id = hashing.ComputeHash256Array(bytes) + bytes := codec.encodeHashValues(n) + return hashing.ComputeHash256Array(bytes) } // Set [n]'s value to [val]. func (n *node) setValue(val maybe.Maybe[[]byte]) { - n.onNodeChanged() n.value = val n.setValueDigest() } @@ -127,27 +91,29 @@ func (n *node) setValueDigest() { // Adds [child] as a child of [n]. // Assumes [child]'s key is valid as a child of [n]. // That is, [n.key] is a prefix of [child.key]. -func (n *node) addChild(childNode *node) { +func (n *node) addChild(childNode *node, tokenSize int) { + n.addChildWithID(childNode, tokenSize, ids.Empty) +} + +func (n *node) addChildWithID(childNode *node, tokenSize int, childID ids.ID) { n.setChildEntry( - childNode.key.Token(n.key.tokenLength), - child{ - compressedKey: childNode.key.Skip(n.key.tokenLength + 1), - id: childNode.id, + childNode.key.Token(n.key.length, tokenSize), + &child{ + compressedKey: childNode.key.Skip(n.key.length + tokenSize), + id: childID, hasValue: childNode.hasValue(), }, ) } // Adds a child to [n] without a reference to the child node. -func (n *node) setChildEntry(index byte, childEntry child) { - n.onNodeChanged() +func (n *node) setChildEntry(index byte, childEntry *child) { n.children[index] = childEntry } // Removes [child] from [n]'s children. -func (n *node) removeChild(child *node) { - n.onNodeChanged() - delete(n.children, child.key.Token(n.key.tokenLength)) +func (n *node) removeChild(child *node, tokenSize int) { + delete(n.children, child.key.Token(n.key.length, tokenSize)) } // clone Returns a copy of [n]. @@ -155,16 +121,22 @@ func (n *node) removeChild(child *node) { // if this ever changes, value will need to be copied as well // it is safe to clone all fields because they are only written/read while one or both of the db locks are held func (n *node) clone() *node { - return &node{ - id: n.id, + result := &node{ key: n.key, dbNode: dbNode{ value: n.value, - children: maps.Clone(n.children), + children: make(map[byte]*child, len(n.children)), }, valueDigest: n.valueDigest, - nodeBytes: n.nodeBytes, } + for key, existing := range n.children { + result.children[key] = &child{ + compressedKey: existing.compressedKey, + id: existing.id, + hasValue: existing.hasValue, + } + } + return result } // Returns the ProofNode representation of this node. diff --git a/x/merkledb/node_test.go b/x/merkledb/node_test.go index 9632b7c7dacb..3c09679570f3 100644 --- a/x/merkledb/node_test.go +++ b/x/merkledb/node_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -13,54 +13,57 @@ import ( ) func Test_Node_Marshal(t *testing.T) { - root := newNode(nil, emptyKey(BranchFactor16)) + root := newNode(Key{}) require.NotNil(t, root) - fullKey := ToKey([]byte("key"), BranchFactor16) - childNode := newNode(root, fullKey) + fullKey := ToKey([]byte("key")) + childNode := newNode(fullKey) + root.addChild(childNode, 4) childNode.setValue(maybe.Some([]byte("value"))) require.NotNil(t, childNode) childNode.calculateID(&mockMetrics{}) - root.addChild(childNode) + root.addChild(childNode, 4) data := root.bytes() - rootParsed, err := parseNode(ToKey([]byte(""), BranchFactor16), data) + rootParsed, err := parseNode(ToKey([]byte("")), data) require.NoError(t, err) require.Len(t, rootParsed.children, 1) - rootIndex := getSingleChildKey(root).Token(root.key.tokenLength) - parsedIndex := getSingleChildKey(rootParsed).Token(rootParsed.key.tokenLength) + rootIndex := getSingleChildKey(root, 4).Token(0, 4) + parsedIndex := getSingleChildKey(rootParsed, 4).Token(0, 4) rootChildEntry := root.children[rootIndex] parseChildEntry := rootParsed.children[parsedIndex] require.Equal(t, rootChildEntry.id, parseChildEntry.id) } func Test_Node_Marshal_Errors(t *testing.T) { - root := newNode(nil, emptyKey(BranchFactor16)) + root := newNode(Key{}) require.NotNil(t, root) - fullKey := ToKey([]byte{255}, BranchFactor16) - childNode1 := newNode(root, fullKey) + fullKey := ToKey([]byte{255}) + childNode1 := newNode(fullKey) + root.addChild(childNode1, 4) childNode1.setValue(maybe.Some([]byte("value1"))) require.NotNil(t, childNode1) childNode1.calculateID(&mockMetrics{}) - root.addChild(childNode1) + root.addChild(childNode1, 4) - fullKey = ToKey([]byte{237}, BranchFactor16) - childNode2 := newNode(root, fullKey) + fullKey = ToKey([]byte{237}) + childNode2 := newNode(fullKey) + root.addChild(childNode2, 4) childNode2.setValue(maybe.Some([]byte("value2"))) require.NotNil(t, childNode2) childNode2.calculateID(&mockMetrics{}) - root.addChild(childNode2) + root.addChild(childNode2, 4) data := root.bytes() for i := 1; i < len(data); i++ { broken := data[:i] - _, err := parseNode(ToKey([]byte(""), BranchFactor16), broken) + _, err := parseNode(ToKey([]byte("")), broken) require.ErrorIs(t, err, io.ErrUnexpectedEOF) } } diff --git a/x/merkledb/proof.go b/x/merkledb/proof.go index 63ea34542c9b..8ddd97ffa5f9 100644 --- a/x/merkledb/proof.go +++ b/x/merkledb/proof.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -20,10 +20,7 @@ import ( pb "github.com/ava-labs/avalanchego/proto/pb/sync" ) -const ( - verificationEvictionBatchSize = 0 - verificationCacheSize = math.MaxInt -) +const verificationCacheSize = math.MaxUint16 var ( ErrInvalidProof = errors.New("proof obtained an invalid root ID") @@ -33,16 +30,15 @@ var ( ErrNonIncreasingProofNodes = errors.New("each proof node key must be a strict prefix of the next") ErrExtraProofNodes = errors.New("extra proof nodes in path") ErrDataInMissingRootProof = errors.New("there should be no state or deleted keys in a change proof that had a missing root") + ErrEmptyProof = errors.New("proof is empty") ErrNoMerkleProof = errors.New("empty key response must include merkle proof") ErrShouldJustBeRoot = errors.New("end proof should only contain root") ErrNoStartProof = errors.New("no start proof") ErrNoEndProof = errors.New("no end proof") - ErrNoProof = errors.New("proof has no nodes") ErrProofNodeNotForKey = errors.New("the provided node has a key that is not a prefix of the specified key") ErrProofValueDoesntMatch = errors.New("the provided value does not match the proof node for the provided key's value") ErrProofNodeHasUnincludedValue = errors.New("the provided proof has a value for a key within the range that is not present in the provided key/values") ErrInvalidMaybe = errors.New("maybe is nothing but has value") - ErrInvalidChildIndex = errors.New("child index must be less than branch factor") ErrNilProofNode = errors.New("proof node is nil") ErrNilValueOrHash = errors.New("proof node's valueOrHash field is nil") ErrNilKey = errors.New("key is nil") @@ -53,7 +49,6 @@ var ( ErrNilProof = errors.New("proof is nil") ErrNilValue = errors.New("value is nil") ErrUnexpectedEndProof = errors.New("end proof should be empty") - ErrInconsistentBranchFactor = errors.New("all keys in proof nodes should have the same branch factor") ) type ProofNode struct { @@ -65,11 +60,12 @@ type ProofNode struct { Children map[byte]ids.ID } +// ToProto converts the ProofNode into the protobuf version of a proof node // Assumes [node.Key.Key.length] <= math.MaxUint64. func (node *ProofNode) ToProto() *pb.ProofNode { pbNode := &pb.ProofNode{ Key: &pb.Key{ - Length: uint64(node.Key.tokenLength), + Length: uint64(node.Key.length), Value: node.Key.Bytes(), }, ValueOrHash: &pb.MaybeBytes{ @@ -87,7 +83,7 @@ func (node *ProofNode) ToProto() *pb.ProofNode { return pbNode } -func (node *ProofNode) UnmarshalProto(pbNode *pb.ProofNode, bf BranchFactor) error { +func (node *ProofNode) UnmarshalProto(pbNode *pb.ProofNode) error { switch { case pbNode == nil: return ErrNilProofNode @@ -97,17 +93,14 @@ func (node *ProofNode) UnmarshalProto(pbNode *pb.ProofNode, bf BranchFactor) err return ErrInvalidMaybe case pbNode.Key == nil: return ErrNilKey - } - node.Key = ToKey(pbNode.Key.Value, bf).Take(int(pbNode.Key.Length)) - - if len(pbNode.Key.Value) != node.Key.bytesNeeded(node.Key.tokenLength) { + case len(pbNode.Key.Value) != bytesNeeded(int(pbNode.Key.Length)): return ErrInvalidKeyLength } - + node.Key = ToKey(pbNode.Key.Value).Take(int(pbNode.Key.Length)) node.Children = make(map[byte]ids.ID, len(pbNode.Children)) for childIndex, childIDBytes := range pbNode.Children { - if childIndex >= uint32(bf) { - return ErrInvalidChildIndex + if childIndex > math.MaxUint8 { + return errChildIndexTooLarge } childID, err := ids.ToID(childIDBytes) if err != nil { @@ -123,28 +116,29 @@ func (node *ProofNode) UnmarshalProto(pbNode *pb.ProofNode, bf BranchFactor) err return nil } -// An inclusion/exclustion proof of a key. +// Proof represents an inclusion/exclusion proof of a key. type Proof struct { // Nodes in the proof path from root --> target key // (or node that would be where key is if it doesn't exist). - // Must always be non-empty (i.e. have the root node). + // Always contains at least the root. Path []ProofNode // This is a proof that [key] exists/doesn't exist. Key Key // Nothing if [Key] isn't in the trie. - // Otherwise the value corresponding to [Key]. + // Otherwise, the value corresponding to [Key]. Value maybe.Maybe[[]byte] } -// Returns nil if the trie given in [proof] has root [expectedRootID]. +// Verify returns nil if the trie given in [proof] has root [expectedRootID]. // That is, this is a valid proof that [proof.Key] exists/doesn't exist // in the trie with root [expectedRootID]. -func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID) error { +func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID, tokenSize int) error { // Make sure the proof is well-formed. if len(proof.Path) == 0 { - return ErrNoProof + return ErrEmptyProof } + if err := verifyProofPath(proof.Path, maybe.Some(proof.Key)); err != nil { return err } @@ -172,17 +166,17 @@ func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID) error { } // Don't bother locking [view] -- nobody else has a reference to it. - view, err := getStandaloneTrieView(ctx, nil, proof.Key.branchFactor) + view, err := getStandaloneView(ctx, nil, tokenSize) if err != nil { return err } // Insert all proof nodes. - // [provenPath] is the path that we are proving exists, or the path - // that is where the path we are proving doesn't exist should be. - provenPath := maybe.Some(proof.Path[len(proof.Path)-1].Key) + // [provenKey] is the key that we are proving exists, or the key + // that is the next key along the node path, proving that [proof.Key] doesn't exist in the trie. + provenKey := maybe.Some(proof.Path[len(proof.Path)-1].Key) - if err = addPathInfo(view, proof.Path, provenPath, provenPath); err != nil { + if err = addPathInfo(view, proof.Path, provenKey, provenKey); err != nil { return err } @@ -215,7 +209,7 @@ func (proof *Proof) ToProto() *pb.Proof { return pbProof } -func (proof *Proof) UnmarshalProto(pbProof *pb.Proof, bf BranchFactor) error { +func (proof *Proof) UnmarshalProto(pbProof *pb.Proof) error { switch { case pbProof == nil: return ErrNilProof @@ -225,7 +219,7 @@ func (proof *Proof) UnmarshalProto(pbProof *pb.Proof, bf BranchFactor) error { return ErrInvalidMaybe } - proof.Key = ToKey(pbProof.Key, bf) + proof.Key = ToKey(pbProof.Key) if !pbProof.Value.IsNothing { proof.Value = maybe.Some(pbProof.Value.Value) @@ -233,7 +227,7 @@ func (proof *Proof) UnmarshalProto(pbProof *pb.Proof, bf BranchFactor) error { proof.Path = make([]ProofNode, len(pbProof.Proof)) for i, pbNode := range pbProof.Proof { - if err := proof.Path[i].UnmarshalProto(pbNode, bf); err != nil { + if err := proof.Path[i].UnmarshalProto(pbNode); err != nil { return err } } @@ -246,7 +240,7 @@ type KeyValue struct { Value []byte } -// A proof that a given set of key-value pairs are in a trie. +// RangeProof is a proof that a given set of key-value pairs are in a trie. type RangeProof struct { // Invariant: At least one of [StartProof], [EndProof], [KeyValues] is non-empty. @@ -255,16 +249,12 @@ type RangeProof struct { // they are also in [EndProof]. StartProof []ProofNode - // If no upper range bound was given, [KeyValues] is empty, - // and [StartProof] is non-empty, this is empty. - // - // If no upper range bound was given, [KeyValues] is empty, - // and [StartProof] is empty, this is the root. + // If no upper range bound was given and [KeyValues] is empty, this is empty. // - // If an upper range bound was given and [KeyValues] is empty, - // this is a proof for the upper range bound. + // If no upper range bound was given and [KeyValues] is non-empty, this is + // a proof for the largest key in [KeyValues]. // - // Otherwise, this is a proof for the largest key in [KeyValues]. + // Otherwise this is a proof for the upper range bound. EndProof []ProofNode // This proof proves that the key-value pairs in [KeyValues] are in the trie. @@ -287,70 +277,57 @@ func (proof *RangeProof) Verify( start maybe.Maybe[[]byte], end maybe.Maybe[[]byte], expectedRootID ids.ID, + tokenSize int, ) error { switch { case start.HasValue() && end.HasValue() && bytes.Compare(start.Value(), end.Value()) > 0: return ErrStartAfterEnd case len(proof.KeyValues) == 0 && len(proof.StartProof) == 0 && len(proof.EndProof) == 0: - return ErrNoMerkleProof - case end.IsNothing() && len(proof.KeyValues) == 0 && len(proof.StartProof) > 0 && len(proof.EndProof) != 0: + return ErrEmptyProof + case end.IsNothing() && len(proof.KeyValues) == 0 && len(proof.EndProof) != 0: return ErrUnexpectedEndProof - case end.IsNothing() && len(proof.KeyValues) == 0 && len(proof.StartProof) == 0 && len(proof.EndProof) != 1: - return ErrShouldJustBeRoot case len(proof.EndProof) == 0 && (end.HasValue() || len(proof.KeyValues) > 0): return ErrNoEndProof } - // determine branch factor based on proof paths - var branchFactor BranchFactor - if len(proof.StartProof) > 0 { - branchFactor = proof.StartProof[0].Key.branchFactor - } else { - // safe because invariants prevent both start proof and end proof from being empty at the same time - branchFactor = proof.EndProof[0].Key.branchFactor - } - // Make sure the key-value pairs are sorted and in [start, end]. if err := verifyKeyValues(proof.KeyValues, start, end); err != nil { return err } // [proof] allegedly provides and proves all key-value - // pairs in [smallestProvenPath, largestProvenPath]. - // If [smallestProvenPath] is Nothing, [proof] should - // provide and prove all keys < [largestProvenPath]. - // If [largestProvenPath] is Nothing, [proof] should - // provide and prove all keys > [smallestProvenPath]. + // pairs in [smallestProvenKey, largestProvenKey]. + // If [smallestProvenKey] is Nothing, [proof] should + // provide and prove all keys < [largestProvenKey]. + // If [largestProvenKey] is Nothing, [proof] should + // provide and prove all keys > [smallestProvenKey]. // If both are Nothing, [proof] should prove the entire trie. - smallestProvenPath := maybe.Bind(start, func(b []byte) Key { - return ToKey(b, branchFactor) - }) + smallestProvenKey := maybe.Bind(start, ToKey) + + largestProvenKey := maybe.Bind(end, ToKey) - largestProvenPath := maybe.Bind(end, func(b []byte) Key { - return ToKey(b, branchFactor) - }) if len(proof.KeyValues) > 0 { // If [proof] has key-value pairs, we should insert children - // greater than [largestProvenPath] to ancestors of the node containing - // [largestProvenPath] so that we get the expected root ID. - largestProvenPath = maybe.Some(ToKey(proof.KeyValues[len(proof.KeyValues)-1].Key, branchFactor)) + // greater than [largestProvenKey] to ancestors of the node containing + // [largestProvenKey] so that we get the expected root ID. + largestProvenKey = maybe.Some(ToKey(proof.KeyValues[len(proof.KeyValues)-1].Key)) } // The key-value pairs (allegedly) proven by [proof]. keyValues := make(map[Key][]byte, len(proof.KeyValues)) for _, keyValue := range proof.KeyValues { - keyValues[ToKey(keyValue.Key, branchFactor)] = keyValue.Value + keyValues[ToKey(keyValue.Key)] = keyValue.Value } // Ensure that the start proof is valid and contains values that // match the key/values that were sent. - if err := verifyProofPath(proof.StartProof, smallestProvenPath); err != nil { + if err := verifyProofPath(proof.StartProof, smallestProvenKey); err != nil { return err } if err := verifyAllRangeProofKeyValuesPresent( proof.StartProof, - smallestProvenPath, - largestProvenPath, + smallestProvenKey, + largestProvenKey, keyValues, ); err != nil { return err @@ -358,13 +335,13 @@ func (proof *RangeProof) Verify( // Ensure that the end proof is valid and contains values that // match the key/values that were sent. - if err := verifyProofPath(proof.EndProof, largestProvenPath); err != nil { + if err := verifyProofPath(proof.EndProof, largestProvenKey); err != nil { return err } if err := verifyAllRangeProofKeyValuesPresent( proof.EndProof, - smallestProvenPath, - largestProvenPath, + smallestProvenKey, + largestProvenKey, keyValues, ); err != nil { return err @@ -380,30 +357,30 @@ func (proof *RangeProof) Verify( } // Don't need to lock [view] because nobody else has a reference to it. - view, err := getStandaloneTrieView(ctx, ops, branchFactor) + view, err := getStandaloneView(ctx, ops, tokenSize) if err != nil { return err } // For all the nodes along the edges of the proof, insert children - // < [smallestProvenPath] and > [largestProvenPath] + // < [smallestProvenKey] and > [largestProvenKey] // into the trie so that we get the expected root ID (if this proof is valid). - // By inserting all children < [smallestProvenPath], we prove that there are no keys - // > [smallestProvenPath] but less than the first key given. + // By inserting all children < [smallestProvenKey], we prove that there are no keys + // > [smallestProvenKey] but less than the first key given. // That is, the peer who gave us this proof is not omitting nodes. if err := addPathInfo( view, proof.StartProof, - smallestProvenPath, - largestProvenPath, + smallestProvenKey, + largestProvenKey, ); err != nil { return err } if err := addPathInfo( view, proof.EndProof, - smallestProvenPath, - largestProvenPath, + smallestProvenKey, + largestProvenKey, ); err != nil { return err } @@ -444,21 +421,21 @@ func (proof *RangeProof) ToProto() *pb.RangeProof { } } -func (proof *RangeProof) UnmarshalProto(pbProof *pb.RangeProof, bf BranchFactor) error { +func (proof *RangeProof) UnmarshalProto(pbProof *pb.RangeProof) error { if pbProof == nil { return ErrNilRangeProof } proof.StartProof = make([]ProofNode, len(pbProof.StartProof)) for i, protoNode := range pbProof.StartProof { - if err := proof.StartProof[i].UnmarshalProto(protoNode, bf); err != nil { + if err := proof.StartProof[i].UnmarshalProto(protoNode); err != nil { return err } } proof.EndProof = make([]ProofNode, len(pbProof.EndProof)) for i, protoNode := range pbProof.EndProof { - if err := proof.EndProof[i].UnmarshalProto(protoNode, bf); err != nil { + if err := proof.EndProof[i].UnmarshalProto(protoNode); err != nil { return err } } @@ -479,13 +456,13 @@ func (proof *RangeProof) UnmarshalProto(pbProof *pb.RangeProof, bf BranchFactor) func verifyAllRangeProofKeyValuesPresent(proof []ProofNode, start maybe.Maybe[Key], end maybe.Maybe[Key], keysValues map[Key][]byte) error { for i := 0; i < len(proof); i++ { var ( - node = proof[i] - nodePath = node.Key + node = proof[i] + nodeKey = node.Key ) // Skip keys that cannot have a value (enforced by [verifyProofPath]). - if !nodePath.hasPartialByte() && (start.IsNothing() || !nodePath.Less(start.Value())) && (end.IsNothing() || !nodePath.Greater(end.Value())) { - value, ok := keysValues[nodePath] + if !nodeKey.hasPartialByte() && (start.IsNothing() || !nodeKey.Less(start.Value())) && (end.IsNothing() || !nodeKey.Greater(end.Value())) { + value, ok := keysValues[nodeKey] if !ok && node.ValueOrHash.HasValue() { // We didn't get a key-value pair for this key, but the proof node has a value. return ErrProofNodeHasUnincludedValue @@ -505,7 +482,7 @@ type KeyChange struct { Value maybe.Maybe[[]byte] } -// A change proof proves that a set of key-value changes occurred +// ChangeProof proves that a set of key-value changes occurred // between two trie roots, where each key-value pair's key is // between some lower and upper bound (inclusive). type ChangeProof struct { @@ -596,21 +573,21 @@ func (proof *ChangeProof) ToProto() *pb.ChangeProof { } } -func (proof *ChangeProof) UnmarshalProto(pbProof *pb.ChangeProof, bf BranchFactor) error { +func (proof *ChangeProof) UnmarshalProto(pbProof *pb.ChangeProof) error { if pbProof == nil { return ErrNilChangeProof } proof.StartProof = make([]ProofNode, len(pbProof.StartProof)) for i, protoNode := range pbProof.StartProof { - if err := proof.StartProof[i].UnmarshalProto(protoNode, bf); err != nil { + if err := proof.StartProof[i].UnmarshalProto(protoNode); err != nil { return err } } proof.EndProof = make([]ProofNode, len(pbProof.EndProof)) for i, protoNode := range pbProof.EndProof { - if err := proof.EndProof[i].UnmarshalProto(protoNode, bf); err != nil { + if err := proof.EndProof[i].UnmarshalProto(protoNode); err != nil { return err } } @@ -639,8 +616,8 @@ func (proof *ChangeProof) UnmarshalProto(pbProof *pb.ChangeProof, bf BranchFacto } // Verifies that all values present in the [proof]: -// - Are nothing when deleted, not in the db, or the node has path partial byte length -// - if the node's path is within the key range, that has a value that matches the value passed in the change list or in the db +// - Are nothing when deleted, not in the db, or the node has key partial byte length +// - if the node's key is within the key range, that has a value that matches the value passed in the change list or in the db func verifyAllChangeProofKeyValuesPresent( ctx context.Context, db MerkleDB, @@ -651,19 +628,19 @@ func verifyAllChangeProofKeyValuesPresent( ) error { for i := 0; i < len(proof); i++ { var ( - node = proof[i] - nodePath = node.Key + node = proof[i] + nodeKey = node.Key ) // Check the value of any node with a key that is within the range. // Skip keys that cannot have a value (enforced by [verifyProofPath]). - if !nodePath.hasPartialByte() && (start.IsNothing() || !nodePath.Less(start.Value())) && (end.IsNothing() || !nodePath.Greater(end.Value())) { - value, ok := keysValues[nodePath] + if !nodeKey.hasPartialByte() && (start.IsNothing() || !nodeKey.Less(start.Value())) && (end.IsNothing() || !nodeKey.Greater(end.Value())) { + value, ok := keysValues[nodeKey] if !ok { // This value isn't in the list of key-value pairs we got. - dbValue, err := db.GetValue(ctx, nodePath.Bytes()) + dbValue, err := db.GetValue(ctx, nodeKey.Bytes()) if err != nil { - if err != database.ErrNotFound { + if !errors.Is(err, database.ErrNotFound) { return err } // This key isn't in the database so proof node should have Nothing. @@ -686,7 +663,7 @@ func (proof *ChangeProof) Empty() bool { len(proof.StartProof) == 0 && len(proof.EndProof) == 0 } -// Exactly one of [ChangeProof] or [RangeProof] is non-nil. +// ChangeOrRangeProof has exactly one of [ChangeProof] or [RangeProof] is non-nil. type ChangeOrRangeProof struct { ChangeProof *ChangeProof RangeProof *RangeProof @@ -754,10 +731,8 @@ func verifyProofPath(proof []ProofNode, key maybe.Maybe[Key]) error { // loop over all but the last node since it will not have the prefix in exclusion proofs for i := 0; i < len(proof)-1; i++ { - nodeKey := proof[i].Key - if key.HasValue() && nodeKey.branchFactor != key.Value().branchFactor { - return ErrInconsistentBranchFactor - } + currentProofNode := proof[i] + nodeKey := currentProofNode.Key // Because the interface only support []byte keys, // a key with a partial byte should store a value @@ -770,11 +745,8 @@ func verifyProofPath(proof []ProofNode, key maybe.Maybe[Key]) error { return ErrProofNodeNotForKey } - // each node should have a key that has a matching BranchFactor and is a prefix of the next node's key + // each node should have a key that has a matching TokenConfig and is a prefix of the next node's key nextKey := proof[i+1].Key - if nextKey.branchFactor != nodeKey.branchFactor { - return ErrInconsistentBranchFactor - } if !nextKey.HasStrictPrefix(nodeKey) { return ErrNonIncreasingProofNodes } @@ -819,9 +791,9 @@ func valueOrHashMatches(value maybe.Maybe[[]byte], valueOrHash maybe.Maybe[[]byt // < [insertChildrenLessThan] or > [insertChildrenGreaterThan]. // If [insertChildrenLessThan] is Nothing, no children are < [insertChildrenLessThan]. // If [insertChildrenGreaterThan] is Nothing, no children are > [insertChildrenGreaterThan]. -// Assumes [t.lock] is held. +// Assumes [v.lock] is held. func addPathInfo( - t *trieView, + v *view, proofPath []ProofNode, insertChildrenLessThan maybe.Maybe[Key], insertChildrenGreaterThan maybe.Maybe[Key], @@ -841,7 +813,7 @@ func addPathInfo( // load the node associated with the key or create a new one // pass nothing because we are going to overwrite the value digest below - n, err := t.insert(key, maybe.Nothing[[]byte]()) + n, err := v.insert(key, maybe.Nothing[[]byte]()) if err != nil { return err } @@ -857,21 +829,21 @@ func addPathInfo( // Add [proofNode]'s children which are outside the range // [insertChildrenLessThan, insertChildrenGreaterThan]. - compressedPath := emptyKey(key.branchFactor) + compressedKey := Key{} for index, childID := range proofNode.Children { if existingChild, ok := n.children[index]; ok { - compressedPath = existingChild.compressedKey + compressedKey = existingChild.compressedKey } - childPath := key.AppendExtend(index, compressedPath) - if (shouldInsertLeftChildren && childPath.Less(insertChildrenLessThan.Value())) || - (shouldInsertRightChildren && childPath.Greater(insertChildrenGreaterThan.Value())) { + childKey := key.Extend(ToToken(index, v.tokenSize), compressedKey) + if (shouldInsertLeftChildren && childKey.Less(insertChildrenLessThan.Value())) || + (shouldInsertRightChildren && childKey.Greater(insertChildrenGreaterThan.Value())) { // We didn't set the other values on the child entry, but it doesn't matter. // We only need the IDs to be correct so that the calculated hash is correct. n.setChildEntry( index, - child{ + &child{ id: childID, - compressedKey: compressedPath, + compressedKey: compressedKey, }) } } @@ -880,17 +852,18 @@ func addPathInfo( return nil } -// getStandaloneTrieView returns a new view that has nothing in it besides the changes due to [ops] -func getStandaloneTrieView(ctx context.Context, ops []database.BatchOp, factor BranchFactor) (*trieView, error) { +// getStandaloneView returns a new view that has nothing in it besides the changes due to [ops] +func getStandaloneView(ctx context.Context, ops []database.BatchOp, size int) (*view, error) { db, err := newDatabase( ctx, memdb.New(), Config{ - EvictionBatchSize: verificationEvictionBatchSize, - Tracer: trace.Noop, - ValueNodeCacheSize: verificationCacheSize, - IntermediateNodeCacheSize: verificationCacheSize, - BranchFactor: factor, + BranchFactor: tokenSizeToBranchFactor[size], + Tracer: trace.Noop, + ValueNodeCacheSize: verificationCacheSize, + IntermediateNodeCacheSize: verificationCacheSize, + IntermediateWriteBufferSize: verificationCacheSize, + IntermediateWriteBatchSize: verificationCacheSize, }, &mockMetrics{}, ) @@ -898,5 +871,5 @@ func getStandaloneTrieView(ctx context.Context, ops []database.BatchOp, factor B return nil, err } - return newTrieView(db, db, ViewChanges{BatchOps: ops, ConsumeBytes: true}) + return newView(db, db, ViewChanges{BatchOps: ops, ConsumeBytes: true}) } diff --git a/x/merkledb/proof_test.go b/x/merkledb/proof_test.go index bf9d9da18996..fa047e87d4a4 100644 --- a/x/merkledb/proof_test.go +++ b/x/merkledb/proof_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -23,8 +23,8 @@ import ( func Test_Proof_Empty(t *testing.T) { proof := &Proof{} - err := proof.Verify(context.Background(), ids.Empty) - require.ErrorIs(t, err, ErrNoProof) + err := proof.Verify(context.Background(), ids.Empty, 4) + require.ErrorIs(t, err, ErrEmptyProof) } func Test_Proof_Simple(t *testing.T) { @@ -43,7 +43,7 @@ func Test_Proof_Simple(t *testing.T) { proof, err := db.GetProof(ctx, []byte{}) require.NoError(err) - require.NoError(proof.Verify(ctx, expectedRoot)) + require.NoError(proof.Verify(ctx, expectedRoot, 4)) } func Test_Proof_Verify_Bad_Data(t *testing.T) { @@ -60,9 +60,16 @@ func Test_Proof_Verify_Bad_Data(t *testing.T) { expectedErr: nil, }, { - name: "odd length key with value", + name: "empty", malform: func(proof *Proof) { - proof.Path[1].ValueOrHash = maybe.Some([]byte{1, 2}) + proof.Path = nil + }, + expectedErr: ErrEmptyProof, + }, + { + name: "odd length key path with value", + malform: func(proof *Proof) { + proof.Path[0].ValueOrHash = maybe.Some([]byte{1, 2}) }, expectedErr: ErrPartialByteLengthWithValue, }, @@ -112,7 +119,7 @@ func Test_Proof_Verify_Bad_Data(t *testing.T) { tt.malform(proof) - err = proof.Verify(context.Background(), db.getMerkleRoot()) + err = proof.Verify(context.Background(), db.getMerkleRoot(), 4) require.ErrorIs(err, tt.expectedErr) }) } @@ -150,7 +157,8 @@ func Test_RangeProof_Extra_Value(t *testing.T) { context.Background(), maybe.Some([]byte{1}), maybe.Some([]byte{5, 5}), - db.root.id, + db.rootID, + db.tokenSize, )) proof.KeyValues = append(proof.KeyValues, KeyValue{Key: []byte{5}, Value: []byte{5}}) @@ -159,7 +167,8 @@ func Test_RangeProof_Extra_Value(t *testing.T) { context.Background(), maybe.Some([]byte{1}), maybe.Some([]byte{5, 5}), - db.root.id, + db.rootID, + db.tokenSize, ) require.ErrorIs(err, ErrInvalidProof) } @@ -177,6 +186,15 @@ func Test_RangeProof_Verify_Bad_Data(t *testing.T) { malform: func(proof *RangeProof) {}, expectedErr: nil, }, + { + name: "empty", + malform: func(proof *RangeProof) { + proof.KeyValues = nil + proof.StartProof = nil + proof.EndProof = nil + }, + expectedErr: ErrEmptyProof, + }, { name: "StartProof: last proof node has missing value", malform: func(proof *RangeProof) { @@ -185,9 +203,9 @@ func Test_RangeProof_Verify_Bad_Data(t *testing.T) { expectedErr: ErrProofValueDoesntMatch, }, { - name: "EndProof: odd length key with value", + name: "EndProof: odd length key path with value", malform: func(proof *RangeProof) { - proof.EndProof[1].ValueOrHash = maybe.Some([]byte{1, 2}) + proof.EndProof[0].ValueOrHash = maybe.Some([]byte{1, 2}) }, expectedErr: ErrPartialByteLengthWithValue, }, @@ -221,7 +239,7 @@ func Test_RangeProof_Verify_Bad_Data(t *testing.T) { tt.malform(proof) - err = proof.Verify(context.Background(), maybe.Some([]byte{2}), maybe.Some([]byte{3, 0}), db.getMerkleRoot()) + err = proof.Verify(context.Background(), maybe.Some([]byte{2}), maybe.Some([]byte{3, 0}), db.getMerkleRoot(), db.tokenSize) require.ErrorIs(err, tt.expectedErr) }) } @@ -253,6 +271,7 @@ func Test_Proof(t *testing.T) { context.Background(), ViewChanges{ BatchOps: []database.BatchOp{ + {Key: []byte("key"), Value: []byte("value")}, {Key: []byte("key0"), Value: []byte("value0")}, {Key: []byte("key1"), Value: []byte("value1")}, {Key: []byte("key2"), Value: []byte("value2")}, @@ -271,20 +290,20 @@ func Test_Proof(t *testing.T) { require.Len(proof.Path, 3) - require.Equal(ToKey([]byte("key1"), BranchFactor16), proof.Path[2].Key) + require.Equal(ToKey([]byte("key")), proof.Path[0].Key) + require.Equal(maybe.Some([]byte("value")), proof.Path[0].ValueOrHash) + require.Equal(ToKey([]byte("key0")).Take(28), proof.Path[1].Key) + require.True(proof.Path[1].ValueOrHash.IsNothing()) // intermediate node + require.Equal(ToKey([]byte("key1")), proof.Path[2].Key) require.Equal(maybe.Some([]byte("value1")), proof.Path[2].ValueOrHash) - require.Equal(ToKey([]byte{}, BranchFactor16), proof.Path[0].Key) - require.True(proof.Path[0].ValueOrHash.IsNothing()) - expectedRootID, err := trie.GetMerkleRoot(context.Background()) require.NoError(err) - require.NoError(proof.Verify(context.Background(), expectedRootID)) + require.NoError(proof.Verify(context.Background(), expectedRootID, dbTrie.tokenSize)) - proof.Path[0].ValueOrHash = maybe.Some([]byte("value2")) - - err = proof.Verify(context.Background(), expectedRootID) - require.ErrorIs(err, ErrInvalidProof) + proof.Path[0].Key = ToKey([]byte("key1")) + err = proof.Verify(context.Background(), expectedRootID, dbTrie.tokenSize) + require.ErrorIs(err, ErrProofNodeNotForKey) } func Test_RangeProof_Syntactic_Verify(t *testing.T) { @@ -305,11 +324,11 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { expectedErr: ErrStartAfterEnd, }, { - name: "empty", // Also tests start can be > end if end is nil + name: "empty", start: maybe.Some([]byte{1}), end: maybe.Nothing[[]byte](), proof: &RangeProof{}, - expectedErr: ErrNoMerkleProof, + expectedErr: ErrEmptyProof, }, { name: "unexpected end proof", @@ -321,15 +340,6 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { }, expectedErr: ErrUnexpectedEndProof, }, - { - name: "should just be root", - start: maybe.Nothing[[]byte](), - end: maybe.Nothing[[]byte](), - proof: &RangeProof{ - EndProof: []ProofNode{{}, {}}, - }, - expectedErr: ErrShouldJustBeRoot, - }, { name: "no end proof; has end bound", start: maybe.Some([]byte{1}), @@ -357,7 +367,7 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { {Key: []byte{1}, Value: []byte{1}}, {Key: []byte{0}, Value: []byte{0}}, }, - EndProof: []ProofNode{{Key: emptyKey(BranchFactor16)}}, + EndProof: []ProofNode{{Key: Key{}}}, }, expectedErr: ErrNonIncreasingValues, }, @@ -369,7 +379,7 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { KeyValues: []KeyValue{ {Key: []byte{0}, Value: []byte{0}}, }, - EndProof: []ProofNode{{Key: emptyKey(BranchFactor16)}}, + EndProof: []ProofNode{{Key: Key{}}}, }, expectedErr: ErrStateFromOutsideOfRange, }, @@ -381,7 +391,7 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { KeyValues: []KeyValue{ {Key: []byte{2}, Value: []byte{0}}, }, - EndProof: []ProofNode{{Key: emptyKey(BranchFactor16)}}, + EndProof: []ProofNode{{Key: Key{}}}, }, expectedErr: ErrStateFromOutsideOfRange, }, @@ -395,13 +405,13 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { }, StartProof: []ProofNode{ { - Key: ToKey([]byte{2}, BranchFactor16), + Key: ToKey([]byte{2}), }, { - Key: ToKey([]byte{1}, BranchFactor16), + Key: ToKey([]byte{1}), }, }, - EndProof: []ProofNode{{Key: emptyKey(BranchFactor16)}}, + EndProof: []ProofNode{{Key: Key{}}}, }, expectedErr: ErrProofNodeNotForKey, }, @@ -415,16 +425,16 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { }, StartProof: []ProofNode{ { - Key: ToKey([]byte{1}, BranchFactor16), + Key: ToKey([]byte{1}), }, { - Key: ToKey([]byte{1, 2, 3}, BranchFactor16), // Not a prefix of [1, 2] + Key: ToKey([]byte{1, 2, 3}), // Not a prefix of [1, 2] }, { - Key: ToKey([]byte{1, 2, 3, 4}, BranchFactor16), + Key: ToKey([]byte{1, 2, 3, 4}), }, }, - EndProof: []ProofNode{{Key: emptyKey(BranchFactor16)}}, + EndProof: []ProofNode{{Key: Key{}}}, }, expectedErr: ErrProofNodeNotForKey, }, @@ -438,39 +448,15 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { }, EndProof: []ProofNode{ { - Key: ToKey([]byte{2}, BranchFactor16), + Key: ToKey([]byte{2}), }, { - Key: ToKey([]byte{1}, BranchFactor16), + Key: ToKey([]byte{1}), }, }, }, expectedErr: ErrProofNodeNotForKey, }, - { - name: "inconsistent branching factor", - start: maybe.Some([]byte{1, 2}), - end: maybe.Some([]byte{1, 2}), - proof: &RangeProof{ - StartProof: []ProofNode{ - { - Key: ToKey([]byte{1}, BranchFactor16), - }, - { - Key: ToKey([]byte{1, 2}, BranchFactor16), - }, - }, - EndProof: []ProofNode{ - { - Key: ToKey([]byte{1}, BranchFactor4), - }, - { - Key: ToKey([]byte{1, 2}, BranchFactor4), - }, - }, - }, - expectedErr: ErrInconsistentBranchFactor, - }, { name: "end proof has node for wrong key", start: maybe.Nothing[[]byte](), @@ -481,13 +467,13 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { }, EndProof: []ProofNode{ { - Key: ToKey([]byte{1}, BranchFactor16), + Key: ToKey([]byte{1}), }, { - Key: ToKey([]byte{1, 2, 3}, BranchFactor16), // Not a prefix of [1, 2] + Key: ToKey([]byte{1, 2, 3}), // Not a prefix of [1, 2] }, { - Key: ToKey([]byte{1, 2, 3, 4}, BranchFactor16), + Key: ToKey([]byte{1, 2, 3, 4}), }, }, }, @@ -497,7 +483,7 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.proof.Verify(context.Background(), tt.start, tt.end, ids.Empty) + err := tt.proof.Verify(context.Background(), tt.start, tt.end, ids.Empty, 4) require.ErrorIs(t, err, tt.expectedErr) }) } @@ -523,9 +509,10 @@ func Test_RangeProof(t *testing.T) { require.Equal([]byte{2}, proof.KeyValues[1].Value) require.Equal([]byte{3}, proof.KeyValues[2].Value) - require.Nil(proof.EndProof[0].Key.Bytes()) - require.Equal([]byte{0}, proof.EndProof[1].Key.Bytes()) - require.Equal([]byte{3}, proof.EndProof[2].Key.Bytes()) + require.Len(proof.EndProof, 2) + require.Equal([]byte{0}, proof.EndProof[0].Key.Bytes()) + require.Len(proof.EndProof[0].Children, 5) // 0,1,2,3,4 + require.Equal([]byte{3}, proof.EndProof[1].Key.Bytes()) // only a single node here since others are duplicates in endproof require.Equal([]byte{1}, proof.StartProof[0].Key.Bytes()) @@ -534,7 +521,8 @@ func Test_RangeProof(t *testing.T) { context.Background(), maybe.Some([]byte{1}), maybe.Some([]byte{3, 5}), - db.root.id, + db.rootID, + db.tokenSize, )) } @@ -544,6 +532,8 @@ func Test_RangeProof_BadBounds(t *testing.T) { db, err := getBasicDB() require.NoError(err) + require.NoError(db.Put(nil, nil)) + // non-nil start/end proof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte{4}), maybe.Some([]byte{3}), 50) require.ErrorIs(err, ErrStartAfterEnd) @@ -578,15 +568,15 @@ func Test_RangeProof_NilStart(t *testing.T) { require.Equal([]byte("value1"), proof.KeyValues[0].Value) require.Equal([]byte("value2"), proof.KeyValues[1].Value) - require.Equal(ToKey([]byte("key2"), BranchFactor16), proof.EndProof[2].Key, BranchFactor16) - require.Equal(ToKey([]byte("key2"), BranchFactor16).Take(7), proof.EndProof[1].Key) - require.Equal(ToKey([]byte(""), BranchFactor16), proof.EndProof[0].Key, BranchFactor16) + require.Equal(ToKey([]byte("key2")), proof.EndProof[1].Key, db.tokenSize) + require.Equal(ToKey([]byte("key2")).Take(28), proof.EndProof[0].Key) require.NoError(proof.Verify( context.Background(), maybe.Nothing[[]byte](), maybe.Some([]byte("key35")), - db.root.id, + db.rootID, + db.tokenSize, )) } @@ -595,10 +585,16 @@ func Test_RangeProof_NilEnd(t *testing.T) { db, err := getBasicDB() require.NoError(err) + writeBasicBatch(t, db) require.NoError(err) - proof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte{1}), maybe.Nothing[[]byte](), 2) + proof, err := db.GetRangeProof( // Should have keys [1], [2] + context.Background(), + maybe.Some([]byte{1}), + maybe.Nothing[[]byte](), + 2, + ) require.NoError(err) require.NotNil(proof) @@ -612,15 +608,15 @@ func Test_RangeProof_NilEnd(t *testing.T) { require.Equal([]byte{1}, proof.StartProof[0].Key.Bytes()) - require.Nil(proof.EndProof[0].Key.Bytes()) - require.Equal([]byte{0}, proof.EndProof[1].Key.Bytes()) - require.Equal([]byte{2}, proof.EndProof[2].Key.Bytes()) + require.Equal(db.root.Value().key, proof.EndProof[0].Key) + require.Equal([]byte{2}, proof.EndProof[1].Key.Bytes()) require.NoError(proof.Verify( context.Background(), maybe.Some([]byte{1}), maybe.Nothing[[]byte](), - db.root.id, + db.rootID, + db.tokenSize, )) } @@ -652,30 +648,71 @@ func Test_RangeProof_EmptyValues(t *testing.T) { require.Empty(proof.KeyValues[2].Value) require.Len(proof.StartProof, 1) - require.Equal(ToKey([]byte("key1"), BranchFactor16), proof.StartProof[0].Key, BranchFactor16) + require.Equal(ToKey([]byte("key1")), proof.StartProof[0].Key) - require.Len(proof.EndProof, 3) - require.Equal(ToKey([]byte("key2"), BranchFactor16), proof.EndProof[2].Key, BranchFactor16) - require.Equal(ToKey([]byte{}, BranchFactor16), proof.EndProof[0].Key, BranchFactor16) + require.Len(proof.EndProof, 2) + require.Equal(ToKey([]byte("key1")).Take(28), proof.EndProof[0].Key, db.tokenSize) // root + require.Equal(ToKey([]byte("key2")), proof.EndProof[1].Key, db.tokenSize) require.NoError(proof.Verify( context.Background(), maybe.Some([]byte("key1")), maybe.Some([]byte("key2")), - db.root.id, + db.rootID, + db.tokenSize, )) } func Test_ChangeProof_Missing_History_For_EndRoot(t *testing.T) { require := require.New(t) + seed := time.Now().UnixNano() + t.Logf("Seed: %d", seed) + rand := rand.New(rand.NewSource(seed)) // #nosec G404 db, err := getBasicDB() require.NoError(err) - startRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(err) - _, err = db.GetChangeProof(context.Background(), startRoot, ids.Empty, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 50) + roots := []ids.ID{} + for i := 0; i < defaultHistoryLength+1; i++ { + key := make([]byte, 16) + _, _ = rand.Read(key) + require.NoError(db.Put(key, nil)) + root, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + roots = append(roots, root) + } + + _, err = db.GetChangeProof( + context.Background(), + roots[len(roots)-1], + ids.GenerateTestID(), + maybe.Nothing[[]byte](), + maybe.Nothing[[]byte](), + 50, + ) + require.ErrorIs(err, ErrNoEndRoot) + require.ErrorIs(err, ErrInsufficientHistory) + + _, err = db.GetChangeProof( + context.Background(), + roots[0], + roots[len(roots)-1], + maybe.Nothing[[]byte](), + maybe.Nothing[[]byte](), + 50, + ) + require.NotErrorIs(err, ErrNoEndRoot) require.ErrorIs(err, ErrInsufficientHistory) + + _, err = db.GetChangeProof( + context.Background(), + roots[1], + roots[len(roots)-1], + maybe.Nothing[[]byte](), + maybe.Nothing[[]byte](), + 50, + ) + require.NoError(err) } func Test_ChangeProof_BadBounds(t *testing.T) { @@ -797,7 +834,7 @@ func Test_ChangeProof_Verify_Bad_Data(t *testing.T) { { name: "odd length key path with value", malform: func(proof *ChangeProof) { - proof.EndProof[1].ValueOrHash = maybe.Some([]byte{1, 2}) + proof.EndProof[0].ValueOrHash = maybe.Some([]byte{1, 2}) }, expectedErr: ErrPartialByteLengthWithValue, }, @@ -836,13 +873,26 @@ func Test_ChangeProof_Verify_Bad_Data(t *testing.T) { dbClone, err := getBasicDB() require.NoError(err) - proof, err := db.GetChangeProof(context.Background(), startRoot, endRoot, maybe.Some([]byte{2}), maybe.Some([]byte{3, 0}), 50) + proof, err := db.GetChangeProof( + context.Background(), + startRoot, + endRoot, + maybe.Some([]byte{2}), + maybe.Some([]byte{3, 0}), + 50, + ) require.NoError(err) require.NotNil(proof) tt.malform(proof) - err = dbClone.VerifyChangeProof(context.Background(), proof, maybe.Some([]byte{2}), maybe.Some([]byte{3, 0}), db.getMerkleRoot()) + err = dbClone.VerifyChangeProof( + context.Background(), + proof, + maybe.Some([]byte{2}), + maybe.Some([]byte{3, 0}), + db.getMerkleRoot(), + ) require.ErrorIs(err, tt.expectedErr) }) } @@ -870,7 +920,7 @@ func Test_ChangeProof_Syntactic_Verify(t *testing.T) { proof: &ChangeProof{}, start: maybe.Nothing[[]byte](), end: maybe.Nothing[[]byte](), - expectedErr: ErrNoMerkleProof, + expectedErr: ErrEmptyProof, }, { name: "no end proof", @@ -942,8 +992,8 @@ func Test_ChangeProof_Syntactic_Verify(t *testing.T) { name: "start proof node has wrong prefix", proof: &ChangeProof{ StartProof: []ProofNode{ - {Key: ToKey([]byte{2}, BranchFactor16)}, - {Key: ToKey([]byte{2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{2})}, + {Key: ToKey([]byte{2, 3})}, }, }, start: maybe.Some([]byte{1, 2, 3}), @@ -954,8 +1004,8 @@ func Test_ChangeProof_Syntactic_Verify(t *testing.T) { name: "start proof non-increasing", proof: &ChangeProof{ StartProof: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{2, 3})}, }, }, start: maybe.Some([]byte{1, 2, 3}), @@ -969,8 +1019,8 @@ func Test_ChangeProof_Syntactic_Verify(t *testing.T) { {Key: []byte{1, 2}, Value: maybe.Some([]byte{0})}, }, EndProof: []ProofNode{ - {Key: ToKey([]byte{2}, BranchFactor16)}, - {Key: ToKey([]byte{2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{2})}, + {Key: ToKey([]byte{2, 3})}, }, }, start: maybe.Nothing[[]byte](), @@ -984,8 +1034,8 @@ func Test_ChangeProof_Syntactic_Verify(t *testing.T) { {Key: []byte{1, 2, 3}}, }, EndProof: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{2, 3})}, }, }, start: maybe.Nothing[[]byte](), @@ -1100,119 +1150,118 @@ func TestVerifyProofPath(t *testing.T) { }, { name: "1 element", - path: []ProofNode{{Key: ToKey([]byte{1}, BranchFactor16)}}, + path: []ProofNode{{Key: ToKey([]byte{1})}}, proofKey: maybe.Nothing[Key](), expectedErr: nil, }, { name: "non-increasing keys", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 3})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrNonIncreasingProofNodes, }, { name: "invalid key", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 4}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 4})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrProofNodeNotForKey, }, { name: "extra node inclusion proof", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2})), expectedErr: ErrProofNodeNotForKey, }, { name: "extra node exclusion proof", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 3}, BranchFactor16)}, - {Key: ToKey([]byte{1, 3, 4}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 3})}, + {Key: ToKey([]byte{1, 3, 4})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2})), expectedErr: ErrProofNodeNotForKey, }, { name: "happy path exclusion proof", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 4}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 4})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: nil, }, { name: "happy path inclusion proof", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: nil, }, { name: "repeat nodes", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrNonIncreasingProofNodes, }, { name: "repeat nodes 2", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrNonIncreasingProofNodes, }, { name: "repeat nodes 3", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 3}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 3})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrProofNodeNotForKey, }, { name: "oddLength key with value", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, { Key: Key{ - value: string([]byte{1, 2, 240}), - tokenLength: 5, - tokenConfig: branchFactorToTokenConfig[BranchFactor16], + value: string([]byte{1, 2, 240}), + length: 20, }, ValueOrHash: maybe.Some([]byte{1}), }, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrPartialByteLengthWithValue, }, } @@ -1240,7 +1289,7 @@ func TestProofNodeUnmarshalProtoInvalidMaybe(t *testing.T) { } var unmarshaledNode ProofNode - err := unmarshaledNode.UnmarshalProto(protoNode, BranchFactor16) + err := unmarshaledNode.UnmarshalProto(protoNode) require.ErrorIs(t, err, ErrInvalidMaybe) } @@ -1257,7 +1306,7 @@ func TestProofNodeUnmarshalProtoInvalidChildBytes(t *testing.T) { } var unmarshaledNode ProofNode - err := unmarshaledNode.UnmarshalProto(protoNode, BranchFactor16) + err := unmarshaledNode.UnmarshalProto(protoNode) require.ErrorIs(t, err, hashing.ErrInvalidHashLen) } @@ -1270,11 +1319,11 @@ func TestProofNodeUnmarshalProtoInvalidChildIndex(t *testing.T) { protoNode := node.ToProto() childID := ids.GenerateTestID() - protoNode.Children[uint32(BranchFactor16)] = childID[:] + protoNode.Children[256] = childID[:] var unmarshaledNode ProofNode - err := unmarshaledNode.UnmarshalProto(protoNode, BranchFactor16) - require.ErrorIs(t, err, ErrInvalidChildIndex) + err := unmarshaledNode.UnmarshalProto(protoNode) + require.ErrorIs(t, err, errChildIndexTooLarge) } func TestProofNodeUnmarshalProtoMissingFields(t *testing.T) { @@ -1321,7 +1370,7 @@ func TestProofNodeUnmarshalProtoMissingFields(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var node ProofNode - err := node.UnmarshalProto(tt.nodeFunc(), BranchFactor16) + err := node.UnmarshalProto(tt.nodeFunc()) require.ErrorIs(t, err, tt.expectedErr) }) } @@ -1340,7 +1389,7 @@ func FuzzProofNodeProtoMarshalUnmarshal(f *testing.F) { // Assert the unmarshaled one is the same as the original. protoNode := node.ToProto() var unmarshaledNode ProofNode - require.NoError(unmarshaledNode.UnmarshalProto(protoNode, BranchFactor16)) + require.NoError(unmarshaledNode.UnmarshalProto(protoNode)) require.Equal(node, unmarshaledNode) // Marshaling again should yield same result. @@ -1397,7 +1446,7 @@ func FuzzRangeProofProtoMarshalUnmarshal(f *testing.F) { // Assert the unmarshaled one is the same as the original. var unmarshaledProof RangeProof protoProof := proof.ToProto() - require.NoError(unmarshaledProof.UnmarshalProto(protoProof, BranchFactor16)) + require.NoError(unmarshaledProof.UnmarshalProto(protoProof)) require.Equal(proof, unmarshaledProof) // Marshaling again should yield same result. @@ -1459,7 +1508,7 @@ func FuzzChangeProofProtoMarshalUnmarshal(f *testing.F) { // Assert the unmarshaled one is the same as the original. var unmarshaledProof ChangeProof protoProof := proof.ToProto() - require.NoError(unmarshaledProof.UnmarshalProto(protoProof, BranchFactor16)) + require.NoError(unmarshaledProof.UnmarshalProto(protoProof)) require.Equal(proof, unmarshaledProof) // Marshaling again should yield same result. @@ -1470,7 +1519,7 @@ func FuzzChangeProofProtoMarshalUnmarshal(f *testing.F) { func TestChangeProofUnmarshalProtoNil(t *testing.T) { var proof ChangeProof - err := proof.UnmarshalProto(nil, BranchFactor16) + err := proof.UnmarshalProto(nil) require.ErrorIs(t, err, ErrNilChangeProof) } @@ -1524,7 +1573,7 @@ func TestChangeProofUnmarshalProtoNilValue(t *testing.T) { protoProof.KeyChanges[0].Value = nil var unmarshaledProof ChangeProof - err := unmarshaledProof.UnmarshalProto(protoProof, BranchFactor16) + err := unmarshaledProof.UnmarshalProto(protoProof) require.ErrorIs(t, err, ErrNilMaybeBytes) } @@ -1542,7 +1591,7 @@ func TestChangeProofUnmarshalProtoInvalidMaybe(t *testing.T) { } var proof ChangeProof - err := proof.UnmarshalProto(protoProof, BranchFactor16) + err := proof.UnmarshalProto(protoProof) require.ErrorIs(t, err, ErrInvalidMaybe) } @@ -1575,7 +1624,7 @@ func FuzzProofProtoMarshalUnmarshal(f *testing.F) { } proof := Proof{ - Key: ToKey(key, BranchFactor16), + Key: ToKey(key), Value: value, Path: proofPath, } @@ -1584,7 +1633,7 @@ func FuzzProofProtoMarshalUnmarshal(f *testing.F) { // Assert the unmarshaled one is the same as the original. var unmarshaledProof Proof protoProof := proof.ToProto() - require.NoError(unmarshaledProof.UnmarshalProto(protoProof, BranchFactor16)) + require.NoError(unmarshaledProof.UnmarshalProto(protoProof)) require.Equal(proof, unmarshaledProof) // Marshaling again should yield same result. @@ -1626,7 +1675,7 @@ func TestProofProtoUnmarshal(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var proof Proof - err := proof.UnmarshalProto(tt.proof, BranchFactor16) + err := proof.UnmarshalProto(tt.proof) require.ErrorIs(t, err, tt.expectedErr) }) } @@ -1648,6 +1697,9 @@ func FuzzRangeProofInvariants(f *testing.F) { if maxProofLen == 0 { t.SkipNow() } + if numKeyValues == 0 { + t.SkipNow() + } // Make sure proof bounds are valid if len(endBytes) != 0 && bytes.Compare(startBytes, endBytes) > 0 { @@ -1678,15 +1730,19 @@ func FuzzRangeProofInvariants(f *testing.F) { end = maybe.Some(endBytes) } + rootID, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + rangeProof, err := db.GetRangeProof( context.Background(), start, end, int(maxProofLen), ) - require.NoError(err) - - rootID, err := db.GetMerkleRoot(context.Background()) + if rootID == ids.Empty { + require.ErrorIs(err, ErrEmptyProof) + return + } require.NoError(err) require.NoError(rangeProof.Verify( @@ -1694,6 +1750,7 @@ func FuzzRangeProofInvariants(f *testing.F) { start, end, rootID, + db.tokenSize, )) // Make sure the start proof doesn't contain any nodes @@ -1732,14 +1789,14 @@ func FuzzRangeProofInvariants(f *testing.F) { proof := Proof{ Path: rangeProof.EndProof, - Key: ToKey(endBytes, BranchFactor16), + Key: ToKey(endBytes), Value: value, } rootID, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - require.NoError(proof.Verify(context.Background(), rootID)) + require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize)) default: require.NotEmpty(rangeProof.EndProof) @@ -1747,14 +1804,14 @@ func FuzzRangeProofInvariants(f *testing.F) { // EndProof should be a proof for largest key-value. proof := Proof{ Path: rangeProof.EndProof, - Key: ToKey(greatestKV.Key, BranchFactor16), + Key: ToKey(greatestKV.Key), Value: maybe.Some(greatestKV.Value), } rootID, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - require.NoError(proof.Verify(context.Background(), rootID)) + require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize)) } }) } @@ -1781,16 +1838,21 @@ func FuzzProofVerification(f *testing.F) { deletePortion, ) + if db.getMerkleRoot() == ids.Empty { + return + } + proof, err := db.GetProof( context.Background(), key, ) + require.NoError(err) rootID, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - require.NoError(proof.Verify(context.Background(), rootID)) + require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize)) // Insert a new key-value pair newKey := make([]byte, 32) diff --git a/x/merkledb/tracer.go b/x/merkledb/tracer.go index 707028f2c9cd..d4e7a6fce4d7 100644 --- a/x/merkledb/tracer.go +++ b/x/merkledb/tracer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb diff --git a/x/merkledb/trie.go b/x/merkledb/trie.go index d4b01d2de29a..bc2b4db81541 100644 --- a/x/merkledb/trie.go +++ b/x/merkledb/trie.go @@ -1,30 +1,66 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb import ( + "bytes" "context" + "fmt" + + "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/maybe" ) +type ViewChanges struct { + BatchOps []database.BatchOp + MapOps map[string]maybe.Maybe[[]byte] + // ConsumeBytes when set to true will skip copying of bytes and assume + // ownership of the provided bytes. + ConsumeBytes bool +} + type MerkleRootGetter interface { - // GetMerkleRoot returns the merkle root of the Trie + // GetMerkleRoot returns the merkle root of the trie. + // Returns ids.Empty if the trie is empty. GetMerkleRoot(ctx context.Context) (ids.ID, error) } type ProofGetter interface { // GetProof generates a proof of the value associated with a particular key, // or a proof of its absence from the trie + // Returns ErrEmptyProof if the trie is empty. GetProof(ctx context.Context, keyBytes []byte) (*Proof, error) } -type ReadOnlyTrie interface { +type trieInternals interface { + // get the value associated with the key in path form + // database.ErrNotFound if the key is not present + getValue(key Key) ([]byte, error) + + // get an editable copy of the node with the given key path + // hasValue indicates which db to look in (value or intermediate) + getEditableNode(key Key, hasValue bool) (*node, error) + + // get the node associated with the key without locking + getNode(key Key, hasValue bool) (*node, error) + + // If this trie is non-empty, returns the root node. + // Must be copied before modification. + // Otherwise returns Nothing. + getRoot() maybe.Maybe[*node] + + getTokenSize() int +} + +type Trie interface { + trieInternals MerkleRootGetter ProofGetter + database.Iteratee // GetValue gets the value associated with the specified key // database.ErrNotFound if the key is not present @@ -34,46 +70,202 @@ type ReadOnlyTrie interface { // database.ErrNotFound if the key is not present GetValues(ctx context.Context, keys [][]byte) ([][]byte, []error) - // get the value associated with the key in path form - // database.ErrNotFound if the key is not present - getValue(key Key) ([]byte, error) - - // get an editable copy of the node with the given key path - // hasValue indicates which db to look in (value or intermediate) - getEditableNode(key Key, hasValue bool) (*node, error) - // GetRangeProof returns a proof of up to [maxLength] key-value pairs with // keys in range [start, end]. // If [start] is Nothing, there's no lower bound on the range. // If [end] is Nothing, there's no upper bound on the range. + // Returns ErrEmptyProof if the trie is empty. GetRangeProof(ctx context.Context, start maybe.Maybe[[]byte], end maybe.Maybe[[]byte], maxLength int) (*RangeProof, error) - database.Iteratee -} - -type ViewChanges struct { - BatchOps []database.BatchOp - MapOps map[string]maybe.Maybe[[]byte] - // ConsumeBytes when set to true will skip copying of bytes and assume - // ownership of the provided bytes. - ConsumeBytes bool -} - -type Trie interface { - ReadOnlyTrie - // NewView returns a new view on top of this Trie where the passed changes // have been applied. NewView( ctx context.Context, changes ViewChanges, - ) (TrieView, error) + ) (View, error) } -type TrieView interface { +type View interface { Trie // CommitToDB writes the changes in this view to the database. // Takes the DB commit lock. CommitToDB(ctx context.Context) error } + +// Returns the nodes along the path to [key]. +// The first node is the root, and the last node is either the node with the +// given [key], if it's in the trie, or the node with the largest prefix of +// the [key] if it isn't in the trie. +// Always returns at least the root node. +// Assumes [t] doesn't change while this function is running. +func visitPathToKey(t Trie, key Key, visitNode func(*node) error) error { + maybeRoot := t.getRoot() + if maybeRoot.IsNothing() { + return nil + } + root := maybeRoot.Value() + if !key.HasPrefix(root.key) { + return nil + } + var ( + // all node paths start at the root + currentNode = root + tokenSize = t.getTokenSize() + err error + ) + if err := visitNode(currentNode); err != nil { + return err + } + // while the entire path hasn't been matched + for currentNode.key.length < key.length { + // confirm that a child exists and grab its ID before attempting to load it + nextChildEntry, hasChild := currentNode.children[key.Token(currentNode.key.length, tokenSize)] + + if !hasChild || !key.iteratedHasPrefix(nextChildEntry.compressedKey, currentNode.key.length+tokenSize, tokenSize) { + // there was no child along the path or the child that was there doesn't match the remaining path + return nil + } + // grab the next node along the path + currentNode, err = t.getNode(key.Take(currentNode.key.length+tokenSize+nextChildEntry.compressedKey.length), nextChildEntry.hasValue) + if err != nil { + return err + } + if err := visitNode(currentNode); err != nil { + return err + } + } + return nil +} + +// Returns a proof that [bytesPath] is in or not in trie [t]. +// Assumes [t] doesn't change while this function is running. +func getProof(t Trie, key []byte) (*Proof, error) { + root := t.getRoot() + if root.IsNothing() { + return nil, ErrEmptyProof + } + + proof := &Proof{ + Key: ToKey(key), + } + + var closestNode *node + if err := visitPathToKey(t, proof.Key, func(n *node) error { + closestNode = n + // From root --> node from left --> right. + proof.Path = append(proof.Path, n.asProofNode()) + return nil + }); err != nil { + return nil, err + } + + if len(proof.Path) == 0 { + // No key in [t] is a prefix of [key]. + // The root alone proves that [key] isn't in [t]. + proof.Path = append(proof.Path, root.Value().asProofNode()) + return proof, nil + } + + if closestNode.key == proof.Key { + // There is a node with the given [key]. + proof.Value = maybe.Bind(closestNode.value, slices.Clone[[]byte]) + return proof, nil + } + + // There is no node with the given [key]. + // If there is a child at the index where the node would be + // if it existed, include that child in the proof. + nextIndex := proof.Key.Token(closestNode.key.length, t.getTokenSize()) + child, ok := closestNode.children[nextIndex] + if !ok { + return proof, nil + } + + childNode, err := t.getNode( + closestNode.key.Extend(ToToken(nextIndex, t.getTokenSize()), child.compressedKey), + child.hasValue, + ) + if err != nil { + return nil, err + } + proof.Path = append(proof.Path, childNode.asProofNode()) + return proof, nil +} + +// GetRangeProof returns a range proof for (at least part of) the key range [start, end]. +// The returned proof's [KeyValues] has at most [maxLength] values. +// [maxLength] must be > 0. +// Assumes [t] doesn't change while this function is running. +func getRangeProof( + t Trie, + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], + maxLength int, +) (*RangeProof, error) { + switch { + case start.HasValue() && end.HasValue() && bytes.Compare(start.Value(), end.Value()) == 1: + return nil, ErrStartAfterEnd + case maxLength <= 0: + return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) + case t.getRoot().IsNothing(): + return nil, ErrEmptyProof + } + + result := RangeProof{ + KeyValues: make([]KeyValue, 0, initKeyValuesSize), + } + it := t.NewIteratorWithStart(start.Value()) + for it.Next() && len(result.KeyValues) < maxLength && (end.IsNothing() || bytes.Compare(it.Key(), end.Value()) <= 0) { + // clone the value to prevent editing of the values stored within the trie + result.KeyValues = append(result.KeyValues, KeyValue{ + Key: it.Key(), + Value: slices.Clone(it.Value()), + }) + } + it.Release() + if err := it.Error(); err != nil { + return nil, err + } + + // This proof may not contain all key-value pairs in [start, end] due to size limitations. + // The end proof we provide should be for the last key-value pair in the proof, not for + // the last key-value pair requested, which may not be in this proof. + var ( + endProof *Proof + err error + ) + if len(result.KeyValues) > 0 { + greatestKey := result.KeyValues[len(result.KeyValues)-1].Key + endProof, err = getProof(t, greatestKey) + if err != nil { + return nil, err + } + } else if end.HasValue() { + endProof, err = getProof(t, end.Value()) + if err != nil { + return nil, err + } + } + if endProof != nil { + result.EndProof = endProof.Path + } + + if start.HasValue() { + startProof, err := getProof(t, start.Value()) + if err != nil { + return nil, err + } + result.StartProof = startProof.Path + + // strip out any common nodes to reduce proof size + i := 0 + for ; i < len(result.StartProof) && + i < len(result.EndProof) && + result.StartProof[i].Key == result.EndProof[i].Key; i++ { + } + result.StartProof = result.StartProof[i:] + } + + return &result, nil +} diff --git a/x/merkledb/trie_test.go b/x/merkledb/trie_test.go index 7908c1266af7..f6dc0351f549 100644 --- a/x/merkledb/trie_test.go +++ b/x/merkledb/trie_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -18,36 +18,24 @@ import ( "github.com/ava-labs/avalanchego/utils/hashing" ) -func getNodeValue(t ReadOnlyTrie, key string) ([]byte, error) { - return getNodeValueWithBranchFactor(t, key, BranchFactor16) -} - -func getNodeValueWithBranchFactor(t ReadOnlyTrie, key string, bf BranchFactor) ([]byte, error) { - var view *trieView - if asTrieView, ok := t.(*trieView); ok { - if err := asTrieView.calculateNodeIDs(context.Background()); err != nil { - return nil, err - } - view = asTrieView - } - if asDatabases, ok := t.(*merkleDB); ok { - dbView, err := asDatabases.NewView(context.Background(), ViewChanges{}) - if err != nil { +func getNodeValue(t Trie, key string) ([]byte, error) { + path := ToKey([]byte(key)) + if asView, ok := t.(*view); ok { + if err := asView.calculateNodeIDs(context.Background()); err != nil { return nil, err } - view = dbView.(*trieView) } - path := ToKey([]byte(key), bf) var result *node - err := view.visitPathToKey(path, func(n *node) error { + + err := visitPathToKey(t, path, func(n *node) error { result = n return nil }) if err != nil { return nil, err } - if result.key != path || result == nil { + if result == nil || result.key != path { return nil, database.ErrNotFound } @@ -60,7 +48,7 @@ func Test_GetValue_Safety(t *testing.T) { db, err := getBasicDB() require.NoError(err) - trieView, err := db.NewView( + view, err := db.NewView( context.Background(), ViewChanges{ BatchOps: []database.BatchOp{ @@ -70,13 +58,13 @@ func Test_GetValue_Safety(t *testing.T) { ) require.NoError(err) - trieVal, err := trieView.GetValue(context.Background(), []byte{0}) + trieVal, err := view.GetValue(context.Background(), []byte{0}) require.NoError(err) require.Equal([]byte{0}, trieVal) trieVal[0] = 1 // should still be []byte{0} after edit - trieVal, err = trieView.GetValue(context.Background(), []byte{0}) + trieVal, err = view.GetValue(context.Background(), []byte{0}) require.NoError(err) require.Equal([]byte{0}, trieVal) } @@ -87,7 +75,7 @@ func Test_GetValues_Safety(t *testing.T) { db, err := getBasicDB() require.NoError(err) - trieView, err := db.NewView( + view, err := db.NewView( context.Background(), ViewChanges{ BatchOps: []database.BatchOp{ @@ -97,7 +85,7 @@ func Test_GetValues_Safety(t *testing.T) { ) require.NoError(err) - trieVals, errs := trieView.GetValues(context.Background(), [][]byte{{0}}) + trieVals, errs := view.GetValues(context.Background(), [][]byte{{0}}) require.Len(errs, 1) require.NoError(errs[0]) require.Equal([]byte{0}, trieVals[0]) @@ -105,13 +93,13 @@ func Test_GetValues_Safety(t *testing.T) { require.Equal([]byte{1}, trieVals[0]) // should still be []byte{0} after edit - trieVals, errs = trieView.GetValues(context.Background(), [][]byte{{0}}) + trieVals, errs = view.GetValues(context.Background(), [][]byte{{0}}) require.Len(errs, 1) require.NoError(errs[0]) require.Equal([]byte{0}, trieVals[0]) } -func TestTrieViewVisitPathToKey(t *testing.T) { +func TestVisitPathToKey(t *testing.T) { require := require.New(t) db, err := getBasicDB() @@ -119,18 +107,16 @@ func TestTrieViewVisitPathToKey(t *testing.T) { trieIntf, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.IsType(&trieView{}, trieIntf) - trie := trieIntf.(*trieView) + require.IsType(&view{}, trieIntf) + trie := trieIntf.(*view) var nodePath []*node - require.NoError(trie.visitPathToKey(ToKey(nil, BranchFactor16), func(n *node) error { + require.NoError(visitPathToKey(trie, ToKey(nil), func(n *node) error { nodePath = append(nodePath, n) return nil })) - // Just the root - require.Len(nodePath, 1) - require.Equal(trie.root, nodePath[0]) + require.Empty(nodePath) // Insert a key key1 := []byte{0} @@ -143,20 +129,19 @@ func TestTrieViewVisitPathToKey(t *testing.T) { }, ) require.NoError(err) - require.IsType(&trieView{}, trieIntf) - trie = trieIntf.(*trieView) + require.IsType(&view{}, trieIntf) + trie = trieIntf.(*view) require.NoError(trie.calculateNodeIDs(context.Background())) - nodePath = make([]*node, 0, 2) - require.NoError(trie.visitPathToKey(ToKey(key1, BranchFactor16), func(n *node) error { + nodePath = make([]*node, 0, 1) + require.NoError(visitPathToKey(trie, ToKey(key1), func(n *node) error { nodePath = append(nodePath, n) return nil })) - // Root and 1 value - require.Len(nodePath, 2) - require.Equal(trie.root, nodePath[0]) - require.Equal(ToKey(key1, BranchFactor16), nodePath[1].key) + // 1 value + require.Len(nodePath, 1) + require.Equal(ToKey(key1), nodePath[0].key) // Insert another key which is a child of the first key2 := []byte{0, 1} @@ -169,20 +154,24 @@ func TestTrieViewVisitPathToKey(t *testing.T) { }, ) require.NoError(err) - require.IsType(&trieView{}, trieIntf) - trie = trieIntf.(*trieView) + require.IsType(&view{}, trieIntf) + trie = trieIntf.(*view) require.NoError(trie.calculateNodeIDs(context.Background())) - nodePath = make([]*node, 0, 3) - require.NoError(trie.visitPathToKey(ToKey(key2, BranchFactor16), func(n *node) error { + nodePath = make([]*node, 0, 2) + require.NoError(visitPathToKey(trie, ToKey(key2), func(n *node) error { nodePath = append(nodePath, n) return nil })) - require.Len(nodePath, 3) - require.Equal(trie.root, nodePath[0]) - require.Equal(ToKey(key1, BranchFactor16), nodePath[1].key) - require.Equal(ToKey(key2, BranchFactor16), nodePath[2].key) - + require.Len(nodePath, 2) + require.Equal(trie.root.Value(), nodePath[0]) + require.Equal(ToKey(key1), nodePath[0].key) + require.Equal(ToKey(key2), nodePath[1].key) + + // Trie is: + // [0] + // | + // [0,1] // Insert a key which shares no prefix with the others key3 := []byte{255} trieIntf, err = trie.NewView( @@ -194,51 +183,60 @@ func TestTrieViewVisitPathToKey(t *testing.T) { }, ) require.NoError(err) - require.IsType(&trieView{}, trieIntf) - trie = trieIntf.(*trieView) + require.IsType(&view{}, trieIntf) + trie = trieIntf.(*view) require.NoError(trie.calculateNodeIDs(context.Background())) + // Trie is: + // [] + // / \ + // [0] [255] + // | + // [0,1] nodePath = make([]*node, 0, 2) - require.NoError(trie.visitPathToKey(ToKey(key3, BranchFactor16), func(n *node) error { + require.NoError(visitPathToKey(trie, ToKey(key3), func(n *node) error { nodePath = append(nodePath, n) return nil })) + require.Len(nodePath, 2) - require.Equal(trie.root, nodePath[0]) - require.Equal(ToKey(key3, BranchFactor16), nodePath[1].key) + require.Equal(trie.root.Value(), nodePath[0]) + require.Zero(trie.root.Value().key.length) + require.Equal(ToKey(key3), nodePath[1].key) // Other key path not affected nodePath = make([]*node, 0, 3) - require.NoError(trie.visitPathToKey(ToKey(key2, BranchFactor16), func(n *node) error { + require.NoError(visitPathToKey(trie, ToKey(key2), func(n *node) error { nodePath = append(nodePath, n) return nil })) require.Len(nodePath, 3) - require.Equal(trie.root, nodePath[0]) - require.Equal(ToKey(key1, BranchFactor16), nodePath[1].key) - require.Equal(ToKey(key2, BranchFactor16), nodePath[2].key) + require.Equal(trie.root.Value(), nodePath[0]) + require.Equal(ToKey(key1), nodePath[1].key) + require.Equal(ToKey(key2), nodePath[2].key) // Gets closest node when key doesn't exist key4 := []byte{0, 1, 2} nodePath = make([]*node, 0, 3) - require.NoError(trie.visitPathToKey(ToKey(key4, BranchFactor16), func(n *node) error { + require.NoError(visitPathToKey(trie, ToKey(key4), func(n *node) error { nodePath = append(nodePath, n) return nil })) + require.Len(nodePath, 3) - require.Equal(trie.root, nodePath[0]) - require.Equal(ToKey(key1, BranchFactor16), nodePath[1].key) - require.Equal(ToKey(key2, BranchFactor16), nodePath[2].key) + require.Equal(trie.root.Value(), nodePath[0]) + require.Equal(ToKey(key1), nodePath[1].key) + require.Equal(ToKey(key2), nodePath[2].key) // Gets just root when key doesn't exist and no key shares a prefix key5 := []byte{128} nodePath = make([]*node, 0, 1) - require.NoError(trie.visitPathToKey(ToKey(key5, BranchFactor16), func(n *node) error { + require.NoError(visitPathToKey(trie, ToKey(key5), func(n *node) error { nodePath = append(nodePath, n) return nil })) require.Len(nodePath, 1) - require.Equal(trie.root, nodePath[0]) + require.Equal(trie.root.Value(), nodePath[0]) } func Test_Trie_ViewOnCommitedView(t *testing.T) { @@ -288,7 +286,7 @@ func Test_Trie_WriteToDB(t *testing.T) { trieIntf1, err := dbTrie.NewView(context.Background(), ViewChanges{}) require.NoError(err) - trie1 := trieIntf1.(*trieView) + trie1 := trieIntf1.(*view) // value hasn't been inserted so shouldn't exist value, err := trie1.GetValue(context.Background(), []byte("key")) @@ -304,7 +302,7 @@ func Test_Trie_WriteToDB(t *testing.T) { }, ) require.NoError(err) - trie2 := trieIntf2.(*trieView) + trie2 := trieIntf2.(*view) value, err = getNodeValue(trie2, "key") require.NoError(err) @@ -320,7 +318,7 @@ func Test_Trie_WriteToDB(t *testing.T) { rawBytes, err := dbTrie.baseDB.Get(prefixedKey) require.NoError(err) - node, err := parseNode(ToKey(key, BranchFactor16), rawBytes) + node, err := parseNode(ToKey(key), rawBytes) require.NoError(err) require.Equal([]byte("value"), node.value.Value()) } @@ -439,7 +437,7 @@ func Test_Trie_ExpandOnKeyPath(t *testing.T) { }, ) require.NoError(err) - trie := trieIntf.(*trieView) + trie := trieIntf.(*view) value, err := getNodeValue(trie, "key") require.NoError(err) @@ -454,7 +452,7 @@ func Test_Trie_ExpandOnKeyPath(t *testing.T) { }, ) require.NoError(err) - trie = trieIntf.(*trieView) + trie = trieIntf.(*view) value, err = getNodeValue(trie, "key") require.NoError(err) @@ -473,7 +471,7 @@ func Test_Trie_ExpandOnKeyPath(t *testing.T) { }, ) require.NoError(err) - trie = trieIntf.(*trieView) + trie = trieIntf.(*view) value, err = getNodeValue(trie, "key") require.NoError(err) @@ -488,7 +486,7 @@ func Test_Trie_ExpandOnKeyPath(t *testing.T) { require.Equal([]byte("value12"), value) } -func Test_Trie_CompressedPaths(t *testing.T) { +func Test_Trie_CompressedKeys(t *testing.T) { require := require.New(t) dbTrie, err := getBasicDB() @@ -503,7 +501,7 @@ func Test_Trie_CompressedPaths(t *testing.T) { }, ) require.NoError(err) - trie := trieIntf.(*trieView) + trie := trieIntf.(*view) value, err := getNodeValue(trie, "key12") require.NoError(err) @@ -518,7 +516,7 @@ func Test_Trie_CompressedPaths(t *testing.T) { }, ) require.NoError(err) - trie = trieIntf.(*trieView) + trie = trieIntf.(*view) value, err = getNodeValue(trie, "key12") require.NoError(err) @@ -537,7 +535,7 @@ func Test_Trie_CompressedPaths(t *testing.T) { }, ) require.NoError(err) - trie = trieIntf.(*trieView) + trie = trieIntf.(*view) value, err = getNodeValue(trie, "key12") require.NoError(err) @@ -587,9 +585,9 @@ func Test_Trie_HashCountOnBranch(t *testing.T) { require.NoError(err) require.NotNil(dbTrie) - key1, key2, keyPrefix := []byte("key12"), []byte("key1F"), []byte("key1") + key1, key2, keyPrefix := []byte("12"), []byte("1F"), []byte("1") - trieIntf, err := dbTrie.NewView( + view1, err := dbTrie.NewView( context.Background(), ViewChanges{ BatchOps: []database.BatchOp{ @@ -597,11 +595,13 @@ func Test_Trie_HashCountOnBranch(t *testing.T) { }, }) require.NoError(err) - trie := trieIntf.(*trieView) + + // trie is: + // [1] // create new node with common prefix whose children // are key1, key2 - view2, err := trie.NewView( + view2, err := view1.NewView( context.Background(), ViewChanges{ BatchOps: []database.BatchOp{ @@ -610,20 +610,27 @@ func Test_Trie_HashCountOnBranch(t *testing.T) { }) require.NoError(err) + // trie is: + // [1] + // / \ + // [12] [1F] + // clear the hash count to ignore setup dbTrie.metrics.(*mockMetrics).hashCount = 0 - // force the new root to calculate + // calculate the root _, err = view2.GetMerkleRoot(context.Background()) require.NoError(err) - // Make sure the branch node with the common prefix was created. + // Make sure the root is an intermediate node with the expected common prefix. // Note it's only created on call to GetMerkleRoot, not in NewView. - _, err = view2.getEditableNode(ToKey(keyPrefix, BranchFactor16), false) + prefixNode, err := view2.getEditableNode(ToKey(keyPrefix), false) require.NoError(err) + root := view2.getRoot().Value() + require.Equal(root, prefixNode) + require.Len(root.children, 2) - // only hashes the new branch node, the new child node, and root - // shouldn't hash the existing node + // Had to hash each of the new nodes ("12" and "1F") and the new root require.Equal(int64(3), dbTrie.metrics.(*mockMetrics).hashCount) } @@ -665,7 +672,16 @@ func Test_Trie_HashCountOnDelete(t *testing.T) { require.NoError(err) require.NoError(view.CommitToDB(context.Background())) - // the root is the only updated node so only one new hash + // trie is: + // [key0] (first 28 bits) + // / \ + // [key1] [key2] + root := view.getRoot().Value() + expectedRootKey := ToKey([]byte("key0")).Take(28) + require.Equal(expectedRootKey, root.key) + require.Len(root.children, 2) + + // Had to hash the new root but not [key1] or [key2] nodes require.Equal(oldCount+1, dbTrie.metrics.(*mockMetrics).hashCount) } @@ -759,10 +775,12 @@ func Test_Trie_ChainDeletion(t *testing.T) { ) require.NoError(err) - require.NoError(newTrie.(*trieView).calculateNodeIDs(context.Background())) - root, err := newTrie.getEditableNode(emptyKey(BranchFactor16), false) + require.NoError(newTrie.(*view).calculateNodeIDs(context.Background())) + maybeRoot := newTrie.getRoot() require.NoError(err) - require.Len(root.children, 1) + require.True(maybeRoot.HasValue()) + require.Equal([]byte("value0"), maybeRoot.Value().value.Value()) + require.Len(maybeRoot.Value().children, 1) newTrie, err = newTrie.NewView( context.Background(), @@ -776,11 +794,11 @@ func Test_Trie_ChainDeletion(t *testing.T) { }, ) require.NoError(err) - require.NoError(newTrie.(*trieView).calculateNodeIDs(context.Background())) - root, err = newTrie.getEditableNode(emptyKey(BranchFactor16), false) - require.NoError(err) - // since all values have been deleted, the nodes should have been cleaned up - require.Empty(root.children) + require.NoError(newTrie.(*view).calculateNodeIDs(context.Background())) + + // trie should be empty + root := newTrie.getRoot() + require.False(root.HasValue()) } func Test_Trie_Invalidate_Siblings_On_Commit(t *testing.T) { @@ -809,15 +827,15 @@ func Test_Trie_Invalidate_Siblings_On_Commit(t *testing.T) { sibling2, err := view1.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.False(sibling1.(*trieView).isInvalid()) - require.False(sibling2.(*trieView).isInvalid()) + require.False(sibling1.(*view).isInvalid()) + require.False(sibling2.(*view).isInvalid()) require.NoError(view1.CommitToDB(context.Background())) require.NoError(view2.CommitToDB(context.Background())) - require.True(sibling1.(*trieView).isInvalid()) - require.True(sibling2.(*trieView).isInvalid()) - require.False(view2.(*trieView).isInvalid()) + require.True(sibling1.(*view).isInvalid()) + require.True(sibling2.(*view).isInvalid()) + require.False(view2.(*view).isInvalid()) } func Test_Trie_NodeCollapse(t *testing.T) { @@ -827,54 +845,63 @@ func Test_Trie_NodeCollapse(t *testing.T) { require.NoError(err) require.NotNil(dbTrie) + kvs := []database.BatchOp{ + {Key: []byte("k"), Value: []byte("value0")}, + {Key: []byte("ke"), Value: []byte("value1")}, + {Key: []byte("key"), Value: []byte("value2")}, + {Key: []byte("key1"), Value: []byte("value3")}, + {Key: []byte("key2"), Value: []byte("value4")}, + } + trie, err := dbTrie.NewView( context.Background(), ViewChanges{ - BatchOps: []database.BatchOp{ - {Key: []byte("k"), Value: []byte("value0")}, - {Key: []byte("ke"), Value: []byte("value1")}, - {Key: []byte("key"), Value: []byte("value2")}, - {Key: []byte("key1"), Value: []byte("value3")}, - {Key: []byte("key2"), Value: []byte("value4")}, - }, + BatchOps: kvs, }, ) require.NoError(err) - require.NoError(trie.(*trieView).calculateNodeIDs(context.Background())) - root, err := trie.getEditableNode(emptyKey(BranchFactor16), false) - require.NoError(err) - require.Len(root.children, 1) + require.NoError(trie.(*view).calculateNodeIDs(context.Background())) - root, err = trie.getEditableNode(emptyKey(BranchFactor16), false) - require.NoError(err) - require.Len(root.children, 1) + for _, kv := range kvs { + node, err := trie.getEditableNode(ToKey(kv.Key), true) + require.NoError(err) - firstNode, err := trie.getEditableNode(getSingleChildKey(root), true) - require.NoError(err) - require.Len(firstNode.children, 1) + require.Equal(kv.Value, node.value.Value()) + } + + // delete some values + deletedKVs, remainingKVs := kvs[:3], kvs[3:] + deleteOps := make([]database.BatchOp, len(deletedKVs)) + for i, kv := range deletedKVs { + deleteOps[i] = database.BatchOp{ + Key: kv.Key, + Delete: true, + } + } - // delete the middle values trie, err = trie.NewView( context.Background(), ViewChanges{ - BatchOps: []database.BatchOp{ - {Key: []byte("k"), Delete: true}, - {Key: []byte("ke"), Delete: true}, - {Key: []byte("key"), Delete: true}, - }, + BatchOps: deleteOps, }, ) require.NoError(err) - require.NoError(trie.(*trieView).calculateNodeIDs(context.Background())) - root, err = trie.getEditableNode(emptyKey(BranchFactor16), false) - require.NoError(err) - require.Len(root.children, 1) + require.NoError(trie.(*view).calculateNodeIDs(context.Background())) - firstNode, err = trie.getEditableNode(getSingleChildKey(root), true) - require.NoError(err) - require.Len(firstNode.children, 2) + for _, kv := range deletedKVs { + _, err := trie.getEditableNode(ToKey(kv.Key), true) + require.ErrorIs(err, database.ErrNotFound) + } + + // make sure the other values are still there + for _, kv := range remainingKVs { + node, err := trie.getEditableNode(ToKey(kv.Key), true) + require.NoError(err) + + require.Equal(kv.Value, node.value.Value()) + } } func Test_Trie_MultipleStates(t *testing.T) { @@ -994,8 +1021,8 @@ func TestNewViewOnCommittedView(t *testing.T) { // Create a view view1Intf, err := db.NewView(context.Background(), ViewChanges{BatchOps: []database.BatchOp{{Key: []byte{1}, Value: []byte{1}}}}) require.NoError(err) - require.IsType(&trieView{}, view1Intf) - view1 := view1Intf.(*trieView) + require.IsType(&view{}, view1Intf) + view1 := view1Intf.(*view) // view1 // | @@ -1019,8 +1046,8 @@ func TestNewViewOnCommittedView(t *testing.T) { // Create a new view on the committed view view2Intf, err := view1.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.IsType(&trieView{}, view2Intf) - view2 := view2Intf.(*trieView) + require.IsType(&view{}, view2Intf) + view2 := view2Intf.(*view) // view2 // | @@ -1041,8 +1068,8 @@ func TestNewViewOnCommittedView(t *testing.T) { // Make another view view3Intf, err := view2.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.IsType(&trieView{}, view3Intf) - view3 := view3Intf.(*trieView) + require.IsType(&view{}, view3Intf) + view3 := view3Intf.(*view) // view3 // | @@ -1087,7 +1114,7 @@ func TestNewViewOnCommittedView(t *testing.T) { require.Equal(db, view3.parentTrie) } -func Test_TrieView_NewView(t *testing.T) { +func Test_View_NewView(t *testing.T) { require := require.New(t) db, err := getBasicDB() @@ -1096,14 +1123,14 @@ func Test_TrieView_NewView(t *testing.T) { // Create a view view1Intf, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.IsType(&trieView{}, view1Intf) - view1 := view1Intf.(*trieView) + require.IsType(&view{}, view1Intf) + view1 := view1Intf.(*view) // Create a view atop view1 view2Intf, err := view1.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.IsType(&trieView{}, view2Intf) - view2 := view2Intf.(*trieView) + require.IsType(&view{}, view2Intf) + view2 := view2Intf.(*view) // view2 // | @@ -1122,8 +1149,8 @@ func Test_TrieView_NewView(t *testing.T) { // Make another view atop view1 view3Intf, err := view1.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.IsType(&trieView{}, view3Intf) - view3 := view3Intf.(*trieView) + require.IsType(&view{}, view3Intf) + view3 := view3Intf.(*view) // view3 // | @@ -1139,12 +1166,12 @@ func Test_TrieView_NewView(t *testing.T) { require.NotContains(view1.childViews, view3) // Assert that NewPreallocatedView on an invalid view fails - invalidView := &trieView{invalidated: true} + invalidView := &view{invalidated: true} _, err = invalidView.NewView(context.Background(), ViewChanges{}) require.ErrorIs(err, ErrInvalid) } -func TestTrieViewInvalidate(t *testing.T) { +func TestViewInvalidate(t *testing.T) { require := require.New(t) db, err := getBasicDB() @@ -1153,19 +1180,19 @@ func TestTrieViewInvalidate(t *testing.T) { // Create a view view1Intf, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.IsType(&trieView{}, view1Intf) - view1 := view1Intf.(*trieView) + require.IsType(&view{}, view1Intf) + view1 := view1Intf.(*view) // Create 2 views atop view1 view2Intf, err := view1.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.IsType(&trieView{}, view2Intf) - view2 := view2Intf.(*trieView) + require.IsType(&view{}, view2Intf) + view2 := view2Intf.(*view) view3Intf, err := view1.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.IsType(&trieView{}, view3Intf) - view3 := view3Intf.(*trieView) + require.IsType(&view{}, view3Intf) + view3 := view3Intf.(*view) // view2 view3 // | / @@ -1215,9 +1242,9 @@ func Test_Trie_ConcurrentNewViewAndCommit(t *testing.T) { // Returns the path of the only child of this node. // Assumes this node has exactly one child. -func getSingleChildKey(n *node) Key { +func getSingleChildKey(n *node, tokenSize int) Key { for index, entry := range n.children { - return n.key.AppendExtend(index, entry.compressedKey) + return n.key.Extend(ToToken(index, tokenSize), entry.compressedKey) } return Key{} } @@ -1227,7 +1254,7 @@ func TestTrieCommitToDB(t *testing.T) { type test struct { name string - trieFunc func() TrieView + trieFunc func() View expectedErr error } @@ -1238,20 +1265,20 @@ func TestTrieCommitToDB(t *testing.T) { tests := []test{ { name: "invalid", - trieFunc: func() TrieView { - view, err := db.NewView(context.Background(), ViewChanges{}) + trieFunc: func() View { + nView, err := db.NewView(context.Background(), ViewChanges{}) r.NoError(err) // Invalidate the view - view.(*trieView).invalidate() + nView.(*view).invalidate() - return view + return nView }, expectedErr: ErrInvalid, }, { name: "committed", - trieFunc: func() TrieView { + trieFunc: func() View { view, err := db.NewView(context.Background(), ViewChanges{}) r.NoError(err) @@ -1264,14 +1291,14 @@ func TestTrieCommitToDB(t *testing.T) { }, { name: "parent not database", - trieFunc: func() TrieView { - view, err := db.NewView(context.Background(), ViewChanges{}) + trieFunc: func() View { + nView, err := db.NewView(context.Background(), ViewChanges{}) r.NoError(err) // Change the parent - view.(*trieView).parentTrie = &trieView{} + nView.(*view).parentTrie = &view{} - return view + return nView }, expectedErr: ErrParentNotDatabase, }, diff --git a/x/merkledb/trieview.go b/x/merkledb/trieview.go deleted file mode 100644 index 3422379a20cc..000000000000 --- a/x/merkledb/trieview.go +++ /dev/null @@ -1,991 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package merkledb - -import ( - "bytes" - "context" - "errors" - "fmt" - "sync" - - "go.opentelemetry.io/otel/attribute" - - oteltrace "go.opentelemetry.io/otel/trace" - - "golang.org/x/exp/slices" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/maybe" -) - -const ( - initKeyValuesSize = 256 - defaultPreallocationSize = 100 -) - -var ( - _ TrieView = (*trieView)(nil) - - ErrCommitted = errors.New("view has been committed") - ErrInvalid = errors.New("the trie this view was based on has changed, rendering this view invalid") - ErrPartialByteLengthWithValue = errors.New( - "the underlying db only supports whole number of byte keys, so cannot record changes with partial byte lengths", - ) - ErrGetPathToFailure = errors.New("GetPathTo failed to return the closest node") - ErrStartAfterEnd = errors.New("start key > end key") - ErrNoValidRoot = errors.New("a valid root was not provided to the trieView constructor") - ErrParentNotDatabase = errors.New("parent trie is not database") - ErrNodesAlreadyCalculated = errors.New("cannot modify the trie after the node changes have been calculated") -) - -type trieView struct { - // If true, this view has been committed. - // [commitLock] must be held while accessing this field. - committed bool - commitLock sync.RWMutex - - // tracking bool to enforce that no changes are made to the trie after the nodes have been calculated - nodesAlreadyCalculated utils.Atomic[bool] - - // calculateNodesOnce is a once to ensure that node calculation only occurs a single time - calculateNodesOnce sync.Once - - // Controls the trie's validity related fields. - // Must be held while reading/writing [childViews], [invalidated], and [parentTrie]. - // Only use to lock current trieView or descendants of the current trieView - // DO NOT grab the [validityTrackingLock] of any ancestor trie while this is held. - validityTrackingLock sync.RWMutex - - // If true, this view has been invalidated and can't be used. - // - // Invariant: This view is marked as invalid before any of its ancestors change. - // Since we ensure that all subviews are marked invalid before making an invalidating change - // then if we are still valid at the end of the function, then no corrupting changes could have - // occurred during execution. - // Namely, if we have a method with: - // - // *Code Accessing Ancestor State* - // - // if t.isInvalid() { - // return ErrInvalid - // } - // return [result] - // - // If the invalidated check passes, then we're guaranteed that no ancestor changes occurred - // during the code that accessed ancestor state and the result of that work is still valid - // - // [validityTrackingLock] must be held when reading/writing this field. - invalidated bool - - // the uncommitted parent trie of this view - // [validityTrackingLock] must be held when reading/writing this field. - parentTrie TrieView - - // The valid children of this trie. - // [validityTrackingLock] must be held when reading/writing this field. - childViews []*trieView - - // Changes made to this view. - // May include nodes that haven't been updated - // but will when their ID is recalculated. - changes *changeSummary - - db *merkleDB - - // The root of the trie represented by this view. - root *node -} - -// NewView returns a new view on top of this Trie where the passed changes -// have been applied. -// Adds the new view to [t.childViews]. -// Assumes [t.commitLock] isn't held. -func (t *trieView) NewView( - ctx context.Context, - changes ViewChanges, -) (TrieView, error) { - if t.isInvalid() { - return nil, ErrInvalid - } - t.commitLock.RLock() - defer t.commitLock.RUnlock() - - if t.committed { - return t.getParentTrie().NewView(ctx, changes) - } - - if err := t.calculateNodeIDs(ctx); err != nil { - return nil, err - } - - newView, err := newTrieView(t.db, t, changes) - if err != nil { - return nil, err - } - - t.validityTrackingLock.Lock() - defer t.validityTrackingLock.Unlock() - - if t.invalidated { - return nil, ErrInvalid - } - t.childViews = append(t.childViews, newView) - - return newView, nil -} - -// Creates a new view with the given [parentTrie]. -// Assumes [parentTrie] isn't locked. -func newTrieView( - db *merkleDB, - parentTrie TrieView, - changes ViewChanges, -) (*trieView, error) { - root, err := parentTrie.getEditableNode(db.rootKey, false /* hasValue */) - if err != nil { - if err == database.ErrNotFound { - return nil, ErrNoValidRoot - } - return nil, err - } - - newView := &trieView{ - root: root, - db: db, - parentTrie: parentTrie, - changes: newChangeSummary(len(changes.BatchOps) + len(changes.MapOps)), - } - - for _, op := range changes.BatchOps { - key := op.Key - if !changes.ConsumeBytes { - key = slices.Clone(op.Key) - } - - newVal := maybe.Nothing[[]byte]() - if !op.Delete { - newVal = maybe.Some(op.Value) - if !changes.ConsumeBytes { - newVal = maybe.Some(slices.Clone(op.Value)) - } - } - if err := newView.recordValueChange(db.toKey(key), newVal); err != nil { - return nil, err - } - } - for key, val := range changes.MapOps { - if !changes.ConsumeBytes { - val = maybe.Bind(val, slices.Clone[[]byte]) - } - if err := newView.recordValueChange(db.toKey(stringToByteSlice(key)), val); err != nil { - return nil, err - } - } - return newView, nil -} - -// Creates a view of the db at a historical root using the provided changes -func newHistoricalTrieView( - db *merkleDB, - changes *changeSummary, -) (*trieView, error) { - if changes == nil { - return nil, ErrNoValidRoot - } - - passedRootChange, ok := changes.nodes[db.rootKey] - if !ok { - return nil, ErrNoValidRoot - } - - newView := &trieView{ - root: passedRootChange.after, - db: db, - parentTrie: db, - changes: changes, - } - // since this is a set of historical changes, all nodes have already been calculated - // since no new changes have occurred, no new calculations need to be done - newView.calculateNodesOnce.Do(func() {}) - newView.nodesAlreadyCalculated.Set(true) - return newView, nil -} - -// Recalculates the node IDs for all changed nodes in the trie. -// Cancelling [ctx] doesn't cancel calculation. It's used only for tracing. -func (t *trieView) calculateNodeIDs(ctx context.Context) error { - var err error - t.calculateNodesOnce.Do(func() { - if t.isInvalid() { - err = ErrInvalid - return - } - defer t.nodesAlreadyCalculated.Set(true) - - // We wait to create the span until after checking that we need to actually - // calculateNodeIDs to make traces more useful (otherwise there may be a span - // per key modified even though IDs are not re-calculated). - _, span := t.db.infoTracer.Start(ctx, "MerkleDB.trieview.calculateNodeIDs") - defer span.End() - - // add all the changed key/values to the nodes of the trie - for key, change := range t.changes.values { - if change.after.IsNothing() { - // Note we're setting [err] defined outside this function. - if err = t.remove(key); err != nil { - return - } - // Note we're setting [err] defined outside this function. - } else if _, err = t.insert(key, change.after); err != nil { - return - } - } - - _ = t.db.calculateNodeIDsSema.Acquire(context.Background(), 1) - t.calculateNodeIDsHelper(t.root) - t.db.calculateNodeIDsSema.Release(1) - t.changes.rootID = t.root.id - - // ensure no ancestor changes occurred during execution - if t.isInvalid() { - err = ErrInvalid - return - } - }) - return err -} - -// Calculates the ID of all descendants of [n] which need to be recalculated, -// and then calculates the ID of [n] itself. -func (t *trieView) calculateNodeIDsHelper(n *node) { - var ( - // We use [wg] to wait until all descendants of [n] have been updated. - wg sync.WaitGroup - updatedChildren = make(chan *node, len(n.children)) - ) - - for childIndex, child := range n.children { - childPath := n.key.AppendExtend(childIndex, child.compressedKey) - childNodeChange, ok := t.changes.nodes[childPath] - if !ok { - // This child wasn't changed. - continue - } - - wg.Add(1) - calculateChildID := func() { - defer wg.Done() - - t.calculateNodeIDsHelper(childNodeChange.after) - - // Note that this will never block - updatedChildren <- childNodeChange.after - } - - // Try updating the child and its descendants in a goroutine. - if ok := t.db.calculateNodeIDsSema.TryAcquire(1); ok { - go func() { - calculateChildID() - t.db.calculateNodeIDsSema.Release(1) - }() - } else { - // We're at the goroutine limit; do the work in this goroutine. - calculateChildID() - } - } - - // Wait until all descendants of [n] have been updated. - wg.Wait() - close(updatedChildren) - - keyLength := n.key.tokenLength - for updatedChild := range updatedChildren { - index := updatedChild.key.Token(keyLength) - n.setChildEntry(index, child{ - compressedKey: n.children[index].compressedKey, - id: updatedChild.id, - hasValue: updatedChild.hasValue(), - }) - } - - // The IDs [n]'s descendants are up to date so we can calculate [n]'s ID. - n.calculateID(t.db.metrics) -} - -// GetProof returns a proof that [bytesPath] is in or not in trie [t]. -func (t *trieView) GetProof(ctx context.Context, key []byte) (*Proof, error) { - _, span := t.db.infoTracer.Start(ctx, "MerkleDB.trieview.GetProof") - defer span.End() - - if err := t.calculateNodeIDs(ctx); err != nil { - return nil, err - } - - return t.getProof(ctx, key) -} - -// Returns a proof that [bytesPath] is in or not in trie [t]. -func (t *trieView) getProof(ctx context.Context, key []byte) (*Proof, error) { - _, span := t.db.infoTracer.Start(ctx, "MerkleDB.trieview.getProof") - defer span.End() - - proof := &Proof{ - Key: t.db.toKey(key), - } - - var closestNode *node - if err := t.visitPathToKey(proof.Key, func(n *node) error { - closestNode = n - proof.Path = append(proof.Path, n.asProofNode()) - return nil - }); err != nil { - return nil, err - } - - if closestNode.key == proof.Key { - // There is a node with the given [key]. - proof.Value = maybe.Bind(closestNode.value, slices.Clone[[]byte]) - return proof, nil - } - - // There is no node with the given [key]. - // If there is a child at the index where the node would be - // if it existed, include that child in the proof. - nextIndex := proof.Key.Token(closestNode.key.tokenLength) - child, ok := closestNode.children[nextIndex] - if !ok { - return proof, nil - } - - childNode, err := t.getNodeWithID( - child.id, - closestNode.key.AppendExtend(nextIndex, child.compressedKey), - child.hasValue, - ) - if err != nil { - return nil, err - } - proof.Path = append(proof.Path, childNode.asProofNode()) - if t.isInvalid() { - return nil, ErrInvalid - } - return proof, nil -} - -// GetRangeProof returns a range proof for (at least part of) the key range [start, end]. -// The returned proof's [KeyValues] has at most [maxLength] values. -// [maxLength] must be > 0. -func (t *trieView) GetRangeProof( - ctx context.Context, - start maybe.Maybe[[]byte], - end maybe.Maybe[[]byte], - maxLength int, -) (*RangeProof, error) { - ctx, span := t.db.infoTracer.Start(ctx, "MerkleDB.trieview.GetRangeProof") - defer span.End() - - if start.HasValue() && end.HasValue() && bytes.Compare(start.Value(), end.Value()) == 1 { - return nil, ErrStartAfterEnd - } - - if maxLength <= 0 { - return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) - } - - if err := t.calculateNodeIDs(ctx); err != nil { - return nil, err - } - - var result RangeProof - - result.KeyValues = make([]KeyValue, 0, initKeyValuesSize) - it := t.NewIteratorWithStart(start.Value()) - for it.Next() && len(result.KeyValues) < maxLength && (end.IsNothing() || bytes.Compare(it.Key(), end.Value()) <= 0) { - // clone the value to prevent editing of the values stored within the trie - result.KeyValues = append(result.KeyValues, KeyValue{ - Key: it.Key(), - Value: slices.Clone(it.Value()), - }) - } - it.Release() - if err := it.Error(); err != nil { - return nil, err - } - - // This proof may not contain all key-value pairs in [start, end] due to size limitations. - // The end proof we provide should be for the last key-value pair in the proof, not for - // the last key-value pair requested, which may not be in this proof. - var ( - endProof *Proof - err error - ) - if len(result.KeyValues) > 0 { - greatestKey := result.KeyValues[len(result.KeyValues)-1].Key - endProof, err = t.getProof(ctx, greatestKey) - if err != nil { - return nil, err - } - } else if end.HasValue() { - endProof, err = t.getProof(ctx, end.Value()) - if err != nil { - return nil, err - } - } - if endProof != nil { - result.EndProof = endProof.Path - } - - if start.HasValue() { - startProof, err := t.getProof(ctx, start.Value()) - if err != nil { - return nil, err - } - result.StartProof = startProof.Path - - // strip out any common nodes to reduce proof size - i := 0 - for ; i < len(result.StartProof) && - i < len(result.EndProof) && - result.StartProof[i].Key == result.EndProof[i].Key; i++ { - } - result.StartProof = result.StartProof[i:] - } - - if len(result.StartProof) == 0 && len(result.EndProof) == 0 && len(result.KeyValues) == 0 { - // If the range is empty, return the root proof. - rootProof, err := t.getProof(ctx, rootKey) - if err != nil { - return nil, err - } - result.EndProof = rootProof.Path - } - - if t.isInvalid() { - return nil, ErrInvalid - } - return &result, nil -} - -// CommitToDB commits changes from this trie to the underlying DB. -func (t *trieView) CommitToDB(ctx context.Context) error { - ctx, span := t.db.infoTracer.Start(ctx, "MerkleDB.trieview.CommitToDB") - defer span.End() - - t.db.commitLock.Lock() - defer t.db.commitLock.Unlock() - - return t.commitToDB(ctx) -} - -// Commits the changes from [trieToCommit] to this view, -// this view to its parent, and so on until committing to the db. -// Assumes [t.db.commitLock] is held. -func (t *trieView) commitToDB(ctx context.Context) error { - t.commitLock.Lock() - defer t.commitLock.Unlock() - - ctx, span := t.db.infoTracer.Start(ctx, "MerkleDB.trieview.commitToDB", oteltrace.WithAttributes( - attribute.Int("changeCount", len(t.changes.values)), - )) - defer span.End() - - // Call this here instead of in [t.db.commitChanges] - // because doing so there would be a deadlock. - if err := t.calculateNodeIDs(ctx); err != nil { - return err - } - - if err := t.db.commitChanges(ctx, t); err != nil { - return err - } - - t.committed = true - - return nil -} - -// Assumes [t.validityTrackingLock] isn't held. -func (t *trieView) isInvalid() bool { - t.validityTrackingLock.RLock() - defer t.validityTrackingLock.RUnlock() - - return t.invalidated -} - -// Invalidates this view and all descendants. -// Assumes [t.validityTrackingLock] isn't held. -func (t *trieView) invalidate() { - t.validityTrackingLock.Lock() - defer t.validityTrackingLock.Unlock() - - t.invalidated = true - - for _, childView := range t.childViews { - childView.invalidate() - } - - // after invalidating the children, they no longer need to be tracked - t.childViews = make([]*trieView, 0, defaultPreallocationSize) -} - -func (t *trieView) updateParent(newParent TrieView) { - t.validityTrackingLock.Lock() - defer t.validityTrackingLock.Unlock() - - t.parentTrie = newParent -} - -// GetMerkleRoot returns the ID of the root of this trie. -func (t *trieView) GetMerkleRoot(ctx context.Context) (ids.ID, error) { - if err := t.calculateNodeIDs(ctx); err != nil { - return ids.Empty, err - } - return t.root.id, nil -} - -func (t *trieView) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []error) { - _, span := t.db.debugTracer.Start(ctx, "MerkleDB.trieview.GetValues", oteltrace.WithAttributes( - attribute.Int("keyCount", len(keys)), - )) - defer span.End() - - results := make([][]byte, len(keys)) - valueErrors := make([]error, len(keys)) - - for i, key := range keys { - results[i], valueErrors[i] = t.getValueCopy(t.db.toKey(key)) - } - return results, valueErrors -} - -// GetValue returns the value for the given [key]. -// Returns database.ErrNotFound if it doesn't exist. -func (t *trieView) GetValue(ctx context.Context, key []byte) ([]byte, error) { - _, span := t.db.debugTracer.Start(ctx, "MerkleDB.trieview.GetValue") - defer span.End() - - return t.getValueCopy(t.db.toKey(key)) -} - -// getValueCopy returns a copy of the value for the given [key]. -// Returns database.ErrNotFound if it doesn't exist. -func (t *trieView) getValueCopy(key Key) ([]byte, error) { - val, err := t.getValue(key) - if err != nil { - return nil, err - } - return slices.Clone(val), nil -} - -func (t *trieView) getValue(key Key) ([]byte, error) { - if t.isInvalid() { - return nil, ErrInvalid - } - - if change, ok := t.changes.values[key]; ok { - t.db.metrics.ViewValueCacheHit() - if change.after.IsNothing() { - return nil, database.ErrNotFound - } - return change.after.Value(), nil - } - t.db.metrics.ViewValueCacheMiss() - - // if we don't have local copy of the key, then grab a copy from the parent trie - value, err := t.getParentTrie().getValue(key) - if err != nil { - return nil, err - } - - // ensure no ancestor changes occurred during execution - if t.isInvalid() { - return nil, ErrInvalid - } - - return value, nil -} - -// Must not be called after [calculateNodeIDs] has returned. -func (t *trieView) remove(key Key) error { - if t.nodesAlreadyCalculated.Get() { - return ErrNodesAlreadyCalculated - } - - // confirm a node exists with a value - keyNode, err := t.getNodeWithID(ids.Empty, key, true) - if err != nil { - if errors.Is(err, database.ErrNotFound) { - // key didn't exist - return nil - } - return err - } - - // node doesn't contain a value - if !keyNode.hasValue() { - return nil - } - - // if the node exists and contains a value - // mark all ancestor for change - // grab parent and grandparent nodes for path compression - var grandParent, parent, nodeToDelete *node - if err := t.visitPathToKey(key, func(n *node) error { - grandParent = parent - parent = nodeToDelete - nodeToDelete = n - return t.recordNodeChange(n) - }); err != nil { - return err - } - - nodeToDelete.setValue(maybe.Nothing[[]byte]()) - if len(nodeToDelete.children) != 0 { - // merge this node and its child into a single node if possible - return t.compressNodePath(parent, nodeToDelete) - } - - // if the removed node has no children, the node can be removed from the trie - if err := t.recordNodeDeleted(nodeToDelete); err != nil { - return err - } - if parent != nil { - parent.removeChild(nodeToDelete) - - // merge the parent node and its child into a single node if possible - return t.compressNodePath(grandParent, parent) - } - return nil -} - -// Merges together nodes in the inclusive descendants of [node] that -// have no value and a single child into one node with a compressed -// path until a node that doesn't meet those criteria is reached. -// [parent] is [node]'s parent. -// Assumes at least one of the following is true: -// * [node] has a value. -// * [node] has children. -// Must not be called after [calculateNodeIDs] has returned. -func (t *trieView) compressNodePath(parent, node *node) error { - if t.nodesAlreadyCalculated.Get() { - return ErrNodesAlreadyCalculated - } - - // don't collapse into this node if it's the root, doesn't have 1 child, or has a value - if parent == nil || len(node.children) != 1 || node.hasValue() { - return nil - } - - if err := t.recordNodeDeleted(node); err != nil { - return err - } - - var ( - childEntry child - childKey Key - ) - // There is only one child, but we don't know the index. - // "Cycle" over the key/values to find the only child. - // Note this iteration once because len(node.children) == 1. - for index, entry := range node.children { - childKey = node.key.AppendExtend(index, entry.compressedKey) - childEntry = entry - } - - // [node] is the first node with multiple children. - // combine it with the [node] passed in. - parent.setChildEntry(childKey.Token(parent.key.tokenLength), - child{ - compressedKey: childKey.Skip(parent.key.tokenLength + 1), - id: childEntry.id, - hasValue: childEntry.hasValue, - }) - return t.recordNodeChange(parent) -} - -// Returns the nodes along the path to [key]. -// The first node is the root, and the last node is either the node with the -// given [key], if it's in the trie, or the node with the largest prefix of -// the [key] if it isn't in the trie. -// Always returns at least the root node. -func (t *trieView) visitPathToKey(key Key, visitNode func(*node) error) error { - var ( - // all node paths start at the root - currentNode = t.root - err error - ) - if err := visitNode(currentNode); err != nil { - return err - } - // while the entire path hasn't been matched - for currentNode.key.tokenLength < key.tokenLength { - // confirm that a child exists and grab its ID before attempting to load it - nextChildEntry, hasChild := currentNode.children[key.Token(currentNode.key.tokenLength)] - - if !hasChild || !key.iteratedHasPrefix(currentNode.key.tokenLength+1, nextChildEntry.compressedKey) { - // there was no child along the path or the child that was there doesn't match the remaining path - return nil - } - - // grab the next node along the path - currentNode, err = t.getNodeWithID(nextChildEntry.id, key.Take(currentNode.key.tokenLength+1+nextChildEntry.compressedKey.tokenLength), nextChildEntry.hasValue) - if err != nil { - return err - } - if err := visitNode(currentNode); err != nil { - return err - } - } - return nil -} - -func getLengthOfCommonPrefix(first, second Key, secondOffset int) int { - commonIndex := 0 - for first.tokenLength > commonIndex && second.tokenLength > (commonIndex+secondOffset) && first.Token(commonIndex) == second.Token(commonIndex+secondOffset) { - commonIndex++ - } - return commonIndex -} - -// Get a copy of the node matching the passed key from the trie. -// Used by views to get nodes from their ancestors. -func (t *trieView) getEditableNode(key Key, hadValue bool) (*node, error) { - if t.isInvalid() { - return nil, ErrInvalid - } - - // grab the node in question - n, err := t.getNodeWithID(ids.Empty, key, hadValue) - if err != nil { - return nil, err - } - - // ensure no ancestor changes occurred during execution - if t.isInvalid() { - return nil, ErrInvalid - } - - // return a clone of the node, so it can be edited without affecting this trie - return n.clone(), nil -} - -// insert a key/value pair into the correct node of the trie. -// Must not be called after [calculateNodeIDs] has returned. -func (t *trieView) insert( - key Key, - value maybe.Maybe[[]byte], -) (*node, error) { - if t.nodesAlreadyCalculated.Get() { - return nil, ErrNodesAlreadyCalculated - } - - var closestNode *node - if err := t.visitPathToKey(key, func(n *node) error { - closestNode = n - return t.recordNodeChange(n) - }); err != nil { - return nil, err - } - - // a node with that exact path already exists so update its value - if closestNode.key == key { - closestNode.setValue(value) - // closestNode was already marked as changed in the ancestry loop above - return closestNode, nil - } - - closestNodeKeyLength := closestNode.key.tokenLength - - // A node with the exact key doesn't exist so determine the portion of the - // key that hasn't been matched yet - // Note that [key] has prefix [closestNodeFullPath] but exactMatch was false, - // so [key] must be longer than [closestNodeFullPath] and the following index and slice won't OOB. - existingChildEntry, hasChild := closestNode.children[key.Token(closestNodeKeyLength)] - if !hasChild { - // there are no existing nodes along the path [fullPath], so create a new node to insert [value] - newNode := newNode( - closestNode, - key, - ) - newNode.setValue(value) - return newNode, t.recordNewNode(newNode) - } - - // if we have reached this point, then the [fullpath] we are trying to insert and - // the existing path node have some common prefix. - // a new branching node will be created that will represent this common prefix and - // have the existing path node and the value being inserted as children. - - // generate the new branch node - // find how many tokens are common between the existing child's compressed path and - // the current key(offset by the closest node's key), - // then move all the common tokens into the branch node - commonPrefixLength := getLengthOfCommonPrefix(existingChildEntry.compressedKey, key, closestNodeKeyLength+1) - - // If the length of the existing child's compressed path is less than or equal to the branch node's key that implies that the existing child's key matched the key to be inserted. - // Since it matched the key to be inserted, it should have been the last node returned by GetPathTo - if existingChildEntry.compressedKey.tokenLength <= commonPrefixLength { - return nil, ErrGetPathToFailure - } - - branchNode := newNode( - closestNode, - key.Take(closestNodeKeyLength+1+commonPrefixLength), - ) - nodeWithValue := branchNode - - if key.tokenLength == branchNode.key.tokenLength { - // the branch node has exactly the key to be inserted as its key, so set the value on the branch node - branchNode.setValue(value) - } else { - // the key to be inserted is a child of the branch node - // create a new node and add the value to it - newNode := newNode( - branchNode, - key, - ) - newNode.setValue(value) - if err := t.recordNewNode(newNode); err != nil { - return nil, err - } - nodeWithValue = newNode - } - - // add the existing child onto the branch node - branchNode.setChildEntry( - existingChildEntry.compressedKey.Token(commonPrefixLength), - child{ - compressedKey: existingChildEntry.compressedKey.Skip(commonPrefixLength + 1), - id: existingChildEntry.id, - hasValue: existingChildEntry.hasValue, - }) - - return nodeWithValue, t.recordNewNode(branchNode) -} - -// Records that a node has been created. -// Must not be called after [calculateNodeIDs] has returned. -func (t *trieView) recordNewNode(after *node) error { - return t.recordKeyChange(after.key, after, after.hasValue(), true /* newNode */) -} - -// Records that an existing node has been changed. -// Must not be called after [calculateNodeIDs] has returned. -func (t *trieView) recordNodeChange(after *node) error { - return t.recordKeyChange(after.key, after, after.hasValue(), false /* newNode */) -} - -// Records that the node associated with the given key has been deleted. -// Must not be called after [calculateNodeIDs] has returned. -func (t *trieView) recordNodeDeleted(after *node) error { - // don't delete the root. - if after.key.tokenLength == 0 { - return t.recordKeyChange(after.key, after, after.hasValue(), false /* newNode */) - } - return t.recordKeyChange(after.key, nil, after.hasValue(), false /* newNode */) -} - -// Records that the node associated with the given key has been changed. -// If it is an existing node, record what its value was before it was changed. -// Must not be called after [calculateNodeIDs] has returned. -func (t *trieView) recordKeyChange(key Key, after *node, hadValue bool, newNode bool) error { - if t.nodesAlreadyCalculated.Get() { - return ErrNodesAlreadyCalculated - } - - if existing, ok := t.changes.nodes[key]; ok { - existing.after = after - return nil - } - - if newNode { - t.changes.nodes[key] = &change[*node]{ - after: after, - } - return nil - } - - before, err := t.getParentTrie().getEditableNode(key, hadValue) - if err != nil && err != database.ErrNotFound { - return err - } - t.changes.nodes[key] = &change[*node]{ - before: before, - after: after, - } - return nil -} - -// Records that a key's value has been added or updated. -// Doesn't actually change the trie data structure. -// That's deferred until we call [calculateNodeIDs]. -// Must not be called after [calculateNodeIDs] has returned. -func (t *trieView) recordValueChange(key Key, value maybe.Maybe[[]byte]) error { - if t.nodesAlreadyCalculated.Get() { - return ErrNodesAlreadyCalculated - } - - // update the existing change if it exists - if existing, ok := t.changes.values[key]; ok { - existing.after = value - return nil - } - - // grab the before value - var beforeMaybe maybe.Maybe[[]byte] - before, err := t.getParentTrie().getValue(key) - switch err { - case nil: - beforeMaybe = maybe.Some(before) - case database.ErrNotFound: - beforeMaybe = maybe.Nothing[[]byte]() - default: - return err - } - - t.changes.values[key] = &change[maybe.Maybe[[]byte]]{ - before: beforeMaybe, - after: value, - } - return nil -} - -// Retrieves a node with the given [key]. -// If the node is fetched from [t.parentTrie] and [id] isn't empty, -// sets the node's ID to [id]. -// If the node is loaded from the baseDB, [hasValue] determines which database the node is stored in. -// Returns database.ErrNotFound if the node doesn't exist. -func (t *trieView) getNodeWithID(id ids.ID, key Key, hasValue bool) (*node, error) { - // check for the key within the changed nodes - if nodeChange, isChanged := t.changes.nodes[key]; isChanged { - t.db.metrics.ViewNodeCacheHit() - if nodeChange.after == nil { - return nil, database.ErrNotFound - } - return nodeChange.after, nil - } - - // get the node from the parent trie and store a local copy - parentTrieNode, err := t.getParentTrie().getEditableNode(key, hasValue) - if err != nil { - return nil, err - } - - // only need to initialize the id if it's from the parent trie. - // nodes in the current view change list have already been initialized. - if id != ids.Empty { - parentTrieNode.id = id - } - return parentTrieNode, nil -} - -// Get the parent trie of the view -func (t *trieView) getParentTrie() TrieView { - t.validityTrackingLock.RLock() - defer t.validityTrackingLock.RUnlock() - return t.parentTrie -} diff --git a/x/merkledb/value_node_db.go b/x/merkledb/value_node_db.go index 8f168560d7fa..16cabe3d718f 100644 --- a/x/merkledb/value_node_db.go +++ b/x/merkledb/value_node_db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -27,8 +27,7 @@ type valueNodeDB struct { nodeCache cache.Cacher[Key, *node] metrics merkleMetrics - closed utils.Atomic[bool] - branchFactor BranchFactor + closed utils.Atomic[bool] } func newValueNodeDB( @@ -36,14 +35,12 @@ func newValueNodeDB( bufferPool *sync.Pool, metrics merkleMetrics, cacheSize int, - branchFactor BranchFactor, ) *valueNodeDB { return &valueNodeDB{ - metrics: metrics, - baseDB: db, - bufferPool: bufferPool, - nodeCache: cache.NewSizedLRU(cacheSize, cacheEntrySize), - branchFactor: branchFactor, + metrics: metrics, + baseDB: db, + bufferPool: bufferPool, + nodeCache: cache.NewSizedLRU(cacheSize, cacheEntrySize), } } @@ -92,6 +89,11 @@ func (db *valueNodeDB) Get(key Key) (*node, error) { return parseNode(key, nodeBytes) } +func (db *valueNodeDB) Clear() error { + db.nodeCache.Flush() + return database.AtomicClearPrefix(db.baseDB, db.baseDB, valueNodePrefix) +} + // Batch of database operations type valueNodeBatch struct { db *valueNodeDB @@ -170,7 +172,7 @@ func (i *iterator) Next() bool { i.db.metrics.DatabaseNodeRead() key := i.nodeIter.Key() key = key[valueNodePrefixLen:] - n, err := parseNode(ToKey(key, i.db.branchFactor), i.nodeIter.Value()) + n, err := parseNode(ToKey(key), i.nodeIter.Value()) if err != nil { i.err = err return false diff --git a/x/merkledb/value_node_db_test.go b/x/merkledb/value_node_db_test.go index 910c6e1e9d6b..224a4fe94ac1 100644 --- a/x/merkledb/value_node_db_test.go +++ b/x/merkledb/value_node_db_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -20,19 +20,18 @@ func TestValueNodeDB(t *testing.T) { baseDB := memdb.New() - size := 10 + cacheSize := 10_000 db := newValueNodeDB( baseDB, &sync.Pool{ New: func() interface{} { return make([]byte, 0) }, }, &mockMetrics{}, - size, - BranchFactor16, + cacheSize, ) // Getting a key that doesn't exist should return an error. - key := ToKey([]byte{0x01}, BranchFactor16) + key := ToKey([]byte{0x01}) _, err := db.Get(key) require.ErrorIs(err, database.ErrNotFound) @@ -124,12 +123,11 @@ func TestValueNodeDBIterator(t *testing.T) { }, &mockMetrics{}, cacheSize, - BranchFactor16, ) // Put key-node pairs. for i := 0; i < cacheSize; i++ { - key := ToKey([]byte{byte(i)}, BranchFactor16) + key := ToKey([]byte{byte(i)}) node := &node{ dbNode: dbNode{ value: maybe.Some([]byte{byte(i)}), @@ -167,7 +165,7 @@ func TestValueNodeDBIterator(t *testing.T) { it.Release() // Put key-node pairs with a common prefix. - key := ToKey([]byte{0xFF, 0x00}, BranchFactor16) + key := ToKey([]byte{0xFF, 0x00}) n := &node{ dbNode: dbNode{ value: maybe.Some([]byte{0xFF, 0x00}), @@ -178,7 +176,7 @@ func TestValueNodeDBIterator(t *testing.T) { batch.Put(key, n) require.NoError(batch.Write()) - key = ToKey([]byte{0xFF, 0x01}, BranchFactor16) + key = ToKey([]byte{0xFF, 0x01}) n = &node{ dbNode: dbNode{ value: maybe.Some([]byte{0xFF, 0x01}), @@ -220,3 +218,34 @@ func TestValueNodeDBIterator(t *testing.T) { err := it.Error() require.ErrorIs(err, database.ErrClosed) } + +func TestValueNodeDBClear(t *testing.T) { + require := require.New(t) + cacheSize := 200 + baseDB := memdb.New() + db := newValueNodeDB( + baseDB, + &sync.Pool{ + New: func() interface{} { return make([]byte, 0) }, + }, + &mockMetrics{}, + cacheSize, + ) + + batch := db.NewBatch() + for _, b := range [][]byte{{1}, {2}, {3}} { + batch.Put(ToKey(b), newNode(ToKey(b))) + } + require.NoError(batch.Write()) + + // Assert the db is not empty + iter := baseDB.NewIteratorWithPrefix(valueNodePrefix) + require.True(iter.Next()) + iter.Release() + + require.NoError(db.Clear()) + + iter = baseDB.NewIteratorWithPrefix(valueNodePrefix) + defer iter.Release() + require.False(iter.Next()) +} diff --git a/x/merkledb/view.go b/x/merkledb/view.go new file mode 100644 index 000000000000..8f9e688efc26 --- /dev/null +++ b/x/merkledb/view.go @@ -0,0 +1,865 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "context" + "errors" + "sync" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "golang.org/x/exp/slices" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/maybe" +) + +const ( + initKeyValuesSize = 256 + defaultPreallocationSize = 100 +) + +var ( + _ View = (*view)(nil) + + ErrCommitted = errors.New("view has been committed") + ErrInvalid = errors.New("the trie this view was based on has changed, rendering this view invalid") + ErrPartialByteLengthWithValue = errors.New( + "the underlying db only supports whole number of byte keys, so cannot record changes with partial byte lengths", + ) + ErrVisitPathToKey = errors.New("failed to visit expected node during insertion") + ErrStartAfterEnd = errors.New("start key > end key") + ErrNoChanges = errors.New("no changes provided") + ErrParentNotDatabase = errors.New("parent trie is not database") + ErrNodesAlreadyCalculated = errors.New("cannot modify the trie after the node changes have been calculated") +) + +type view struct { + // If true, this view has been committed. + // [commitLock] must be held while accessing this field. + committed bool + commitLock sync.RWMutex + + // tracking bool to enforce that no changes are made to the trie after the nodes have been calculated + nodesAlreadyCalculated utils.Atomic[bool] + + // calculateNodesOnce is a once to ensure that node calculation only occurs a single time + calculateNodesOnce sync.Once + + // Controls the view's validity related fields. + // Must be held while reading/writing [childViews], [invalidated], and [parentTrie]. + // Only use to lock current view or descendants of the current view + // DO NOT grab the [validityTrackingLock] of any ancestor trie while this is held. + validityTrackingLock sync.RWMutex + + // If true, this view has been invalidated and can't be used. + // + // Invariant: This view is marked as invalid before any of its ancestors change. + // Since we ensure that all subviews are marked invalid before making an invalidating change + // then if we are still valid at the end of the function, then no corrupting changes could have + // occurred during execution. + // Namely, if we have a method with: + // + // *Code Accessing Ancestor State* + // + // if v.isInvalid() { + // return ErrInvalid + // } + // return [result] + // + // If the invalidated check passes, then we're guaranteed that no ancestor changes occurred + // during the code that accessed ancestor state and the result of that work is still valid + // + // [validityTrackingLock] must be held when reading/writing this field. + invalidated bool + + // the uncommitted parent trie of this view + // [validityTrackingLock] must be held when reading/writing this field. + parentTrie View + + // The valid children of this view. + // [validityTrackingLock] must be held when reading/writing this field. + childViews []*view + + // Changes made to this view. + // May include nodes that haven't been updated + // but will when their ID is recalculated. + changes *changeSummary + + db *merkleDB + + // The root of the trie represented by this view. + root maybe.Maybe[*node] + + tokenSize int +} + +// NewView returns a new view on top of this view where the passed changes +// have been applied. +// Adds the new view to [v.childViews]. +// Assumes [v.commitLock] isn't held. +func (v *view) NewView( + ctx context.Context, + changes ViewChanges, +) (View, error) { + if v.isInvalid() { + return nil, ErrInvalid + } + v.commitLock.RLock() + defer v.commitLock.RUnlock() + + if v.committed { + return v.getParentTrie().NewView(ctx, changes) + } + + if err := v.calculateNodeIDs(ctx); err != nil { + return nil, err + } + + newView, err := newView(v.db, v, changes) + if err != nil { + return nil, err + } + + v.validityTrackingLock.Lock() + defer v.validityTrackingLock.Unlock() + + if v.invalidated { + return nil, ErrInvalid + } + v.childViews = append(v.childViews, newView) + + return newView, nil +} + +// Creates a new view with the given [parentTrie]. +func newView( + db *merkleDB, + parentTrie View, + changes ViewChanges, +) (*view, error) { + newView := &view{ + root: maybe.Bind(parentTrie.getRoot(), (*node).clone), + db: db, + parentTrie: parentTrie, + changes: newChangeSummary(len(changes.BatchOps) + len(changes.MapOps)), + tokenSize: db.tokenSize, + } + + for _, op := range changes.BatchOps { + key := op.Key + if !changes.ConsumeBytes { + key = slices.Clone(op.Key) + } + + newVal := maybe.Nothing[[]byte]() + if !op.Delete { + newVal = maybe.Some(op.Value) + if !changes.ConsumeBytes { + newVal = maybe.Some(slices.Clone(op.Value)) + } + } + if err := newView.recordValueChange(toKey(key), newVal); err != nil { + return nil, err + } + } + for key, val := range changes.MapOps { + if !changes.ConsumeBytes { + val = maybe.Bind(val, slices.Clone[[]byte]) + } + if err := newView.recordValueChange(toKey(stringToByteSlice(key)), val); err != nil { + return nil, err + } + } + return newView, nil +} + +// Creates a view of the db at a historical root using the provided [changes]. +// Returns ErrNoChanges if [changes] is empty. +func newViewWithChanges( + db *merkleDB, + changes *changeSummary, +) (*view, error) { + if changes == nil { + return nil, ErrNoChanges + } + + newView := &view{ + root: changes.rootChange.after, + db: db, + parentTrie: db, + changes: changes, + tokenSize: db.tokenSize, + } + // since this is a set of historical changes, all nodes have already been calculated + // since no new changes have occurred, no new calculations need to be done + newView.calculateNodesOnce.Do(func() {}) + newView.nodesAlreadyCalculated.Set(true) + return newView, nil +} + +func (v *view) getTokenSize() int { + return v.tokenSize +} + +func (v *view) getRoot() maybe.Maybe[*node] { + return v.root +} + +// Recalculates the node IDs for all changed nodes in the trie. +// Cancelling [ctx] doesn't cancel calculation. It's used only for tracing. +func (v *view) calculateNodeIDs(ctx context.Context) error { + var err error + v.calculateNodesOnce.Do(func() { + if v.isInvalid() { + err = ErrInvalid + return + } + defer v.nodesAlreadyCalculated.Set(true) + + oldRoot := maybe.Bind(v.root, (*node).clone) + + // We wait to create the span until after checking that we need to actually + // calculateNodeIDs to make traces more useful (otherwise there may be a span + // per key modified even though IDs are not re-calculated). + _, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.calculateNodeIDs") + defer span.End() + + // add all the changed key/values to the nodes of the trie + for key, change := range v.changes.values { + if change.after.IsNothing() { + // Note we're setting [err] defined outside this function. + if err = v.remove(key); err != nil { + return + } + // Note we're setting [err] defined outside this function. + } else if _, err = v.insert(key, change.after); err != nil { + return + } + } + + if !v.root.IsNothing() { + _ = v.db.calculateNodeIDsSema.Acquire(context.Background(), 1) + v.changes.rootID = v.calculateNodeIDsHelper(v.root.Value()) + v.db.calculateNodeIDsSema.Release(1) + } else { + v.changes.rootID = ids.Empty + } + + v.changes.rootChange = change[maybe.Maybe[*node]]{ + before: oldRoot, + after: v.root, + } + + // ensure no ancestor changes occurred during execution + if v.isInvalid() { + err = ErrInvalid + return + } + }) + return err +} + +// Calculates the ID of all descendants of [n] which need to be recalculated, +// and then calculates the ID of [n] itself. +func (v *view) calculateNodeIDsHelper(n *node) ids.ID { + // We use [wg] to wait until all descendants of [n] have been updated. + var wg sync.WaitGroup + + for childIndex := range n.children { + childEntry := n.children[childIndex] + childKey := n.key.Extend(ToToken(childIndex, v.tokenSize), childEntry.compressedKey) + childNodeChange, ok := v.changes.nodes[childKey] + if !ok { + // This child wasn't changed. + continue + } + childEntry.hasValue = childNodeChange.after.hasValue() + + // Try updating the child and its descendants in a goroutine. + if ok := v.db.calculateNodeIDsSema.TryAcquire(1); ok { + wg.Add(1) + go func() { + childEntry.id = v.calculateNodeIDsHelper(childNodeChange.after) + v.db.calculateNodeIDsSema.Release(1) + wg.Done() + }() + } else { + // We're at the goroutine limit; do the work in this goroutine. + childEntry.id = v.calculateNodeIDsHelper(childNodeChange.after) + } + } + + // Wait until all descendants of [n] have been updated. + wg.Wait() + + // The IDs [n]'s descendants are up to date so we can calculate [n]'s ID. + return n.calculateID(v.db.metrics) +} + +// GetProof returns a proof that [bytesPath] is in or not in trie [t]. +func (v *view) GetProof(ctx context.Context, key []byte) (*Proof, error) { + _, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.GetProof") + defer span.End() + + if err := v.calculateNodeIDs(ctx); err != nil { + return nil, err + } + + result, err := getProof(v, key) + if err != nil { + return nil, err + } + if v.isInvalid() { + return nil, ErrInvalid + } + return result, nil +} + +// GetRangeProof returns a range proof for (at least part of) the key range [start, end]. +// The returned proof's [KeyValues] has at most [maxLength] values. +// [maxLength] must be > 0. +func (v *view) GetRangeProof( + ctx context.Context, + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], + maxLength int, +) (*RangeProof, error) { + _, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.GetRangeProof") + defer span.End() + + if err := v.calculateNodeIDs(ctx); err != nil { + return nil, err + } + result, err := getRangeProof(v, start, end, maxLength) + if err != nil { + return nil, err + } + if v.isInvalid() { + return nil, ErrInvalid + } + return result, nil +} + +// CommitToDB commits changes from this view to the underlying DB. +func (v *view) CommitToDB(ctx context.Context) error { + ctx, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.CommitToDB") + defer span.End() + + v.db.commitLock.Lock() + defer v.db.commitLock.Unlock() + + return v.commitToDB(ctx) +} + +// Commits the changes from [trieToCommit] to this view, +// this view to its parent, and so on until committing to the db. +// Assumes [v.db.commitLock] is held. +func (v *view) commitToDB(ctx context.Context) error { + v.commitLock.Lock() + defer v.commitLock.Unlock() + + ctx, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.commitToDB", oteltrace.WithAttributes( + attribute.Int("changeCount", len(v.changes.values)), + )) + defer span.End() + + // Call this here instead of in [v.db.commitChanges] + // because doing so there would be a deadlock. + if err := v.calculateNodeIDs(ctx); err != nil { + return err + } + + if err := v.db.commitChanges(ctx, v); err != nil { + return err + } + + v.committed = true + + return nil +} + +// Assumes [v.validityTrackingLock] isn't held. +func (v *view) isInvalid() bool { + v.validityTrackingLock.RLock() + defer v.validityTrackingLock.RUnlock() + + return v.invalidated +} + +// Invalidates this view and all descendants. +// Assumes [v.validityTrackingLock] isn't held. +func (v *view) invalidate() { + v.validityTrackingLock.Lock() + defer v.validityTrackingLock.Unlock() + + v.invalidated = true + + for _, childView := range v.childViews { + childView.invalidate() + } + + // after invalidating the children, they no longer need to be tracked + v.childViews = make([]*view, 0, defaultPreallocationSize) +} + +func (v *view) updateParent(newParent View) { + v.validityTrackingLock.Lock() + defer v.validityTrackingLock.Unlock() + + v.parentTrie = newParent +} + +// GetMerkleRoot returns the ID of the root of this view. +func (v *view) GetMerkleRoot(ctx context.Context) (ids.ID, error) { + if err := v.calculateNodeIDs(ctx); err != nil { + return ids.Empty, err + } + return v.changes.rootID, nil +} + +func (v *view) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []error) { + _, span := v.db.debugTracer.Start(ctx, "MerkleDB.view.GetValues", oteltrace.WithAttributes( + attribute.Int("keyCount", len(keys)), + )) + defer span.End() + + results := make([][]byte, len(keys)) + valueErrors := make([]error, len(keys)) + + for i, key := range keys { + results[i], valueErrors[i] = v.getValueCopy(ToKey(key)) + } + return results, valueErrors +} + +// GetValue returns the value for the given [key]. +// Returns database.ErrNotFound if it doesn't exist. +func (v *view) GetValue(ctx context.Context, key []byte) ([]byte, error) { + _, span := v.db.debugTracer.Start(ctx, "MerkleDB.view.GetValue") + defer span.End() + + return v.getValueCopy(ToKey(key)) +} + +// getValueCopy returns a copy of the value for the given [key]. +// Returns database.ErrNotFound if it doesn't exist. +func (v *view) getValueCopy(key Key) ([]byte, error) { + val, err := v.getValue(key) + if err != nil { + return nil, err + } + return slices.Clone(val), nil +} + +func (v *view) getValue(key Key) ([]byte, error) { + if v.isInvalid() { + return nil, ErrInvalid + } + + if change, ok := v.changes.values[key]; ok { + v.db.metrics.ViewValueCacheHit() + if change.after.IsNothing() { + return nil, database.ErrNotFound + } + return change.after.Value(), nil + } + v.db.metrics.ViewValueCacheMiss() + + // if we don't have local copy of the key, then grab a copy from the parent trie + value, err := v.getParentTrie().getValue(key) + if err != nil { + return nil, err + } + + // ensure no ancestor changes occurred during execution + if v.isInvalid() { + return nil, ErrInvalid + } + + return value, nil +} + +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) remove(key Key) error { + if v.nodesAlreadyCalculated.Get() { + return ErrNodesAlreadyCalculated + } + + // confirm a node exists with a value + keyNode, err := v.getNode(key, true) + if err != nil { + if errors.Is(err, database.ErrNotFound) { + // [key] isn't in the trie. + return nil + } + return err + } + + if !keyNode.hasValue() { + // [key] doesn't have a value. + return nil + } + + // if the node exists and contains a value + // mark all ancestor for change + // grab parent and grandparent nodes for path compression + var grandParent, parent, nodeToDelete *node + if err := visitPathToKey(v, key, func(n *node) error { + grandParent = parent + parent = nodeToDelete + nodeToDelete = n + return v.recordNodeChange(n) + }); err != nil { + return err + } + + nodeToDelete.setValue(maybe.Nothing[[]byte]()) + + // if the removed node has no children, the node can be removed from the trie + if len(nodeToDelete.children) == 0 { + if err := v.recordNodeDeleted(nodeToDelete); err != nil { + return err + } + + if nodeToDelete.key == v.root.Value().key { + // We deleted the root. The trie is empty now. + v.root = maybe.Nothing[*node]() + return nil + } + + // Note [parent] != nil since [nodeToDelete.key] != [v.root.key]. + // i.e. There's the root and at least one more node. + parent.removeChild(nodeToDelete, v.tokenSize) + + // merge the parent node and its child into a single node if possible + return v.compressNodePath(grandParent, parent) + } + + // merge this node and its descendants into a single node if possible + return v.compressNodePath(parent, nodeToDelete) +} + +// Merges together nodes in the inclusive descendants of [n] that +// have no value and a single child into one node with a compressed +// path until a node that doesn't meet those criteria is reached. +// [parent] is [n]'s parent. If [parent] is nil, [n] is the root +// node and [v.root] is updated to [n]. +// Assumes at least one of the following is true: +// * [n] has a value. +// * [n] has children. +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) compressNodePath(parent, n *node) error { + if v.nodesAlreadyCalculated.Get() { + return ErrNodesAlreadyCalculated + } + + if len(n.children) != 1 || n.hasValue() { + return nil + } + + if err := v.recordNodeDeleted(n); err != nil { + return err + } + + var ( + childEntry *child + childKey Key + ) + // There is only one child, but we don't know the index. + // "Cycle" over the key/values to find the only child. + // Note this iteration once because len(node.children) == 1. + for index, entry := range n.children { + childKey = n.key.Extend(ToToken(index, v.tokenSize), entry.compressedKey) + childEntry = entry + } + + if parent == nil { + root, err := v.getNode(childKey, childEntry.hasValue) + if err != nil { + return err + } + v.root = maybe.Some(root) + return nil + } + + parent.setChildEntry(childKey.Token(parent.key.length, v.tokenSize), + &child{ + compressedKey: childKey.Skip(parent.key.length + v.tokenSize), + id: childEntry.id, + hasValue: childEntry.hasValue, + }) + return v.recordNodeChange(parent) +} + +// Get a copy of the node matching the passed key from the view. +// Used by views to get nodes from their ancestors. +func (v *view) getEditableNode(key Key, hadValue bool) (*node, error) { + if v.isInvalid() { + return nil, ErrInvalid + } + + // grab the node in question + n, err := v.getNode(key, hadValue) + if err != nil { + return nil, err + } + + // ensure no ancestor changes occurred during execution + if v.isInvalid() { + return nil, ErrInvalid + } + + // return a clone of the node, so it can be edited without affecting this view + return n.clone(), nil +} + +// insert a key/value pair into the correct node of the trie. +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) insert( + key Key, + value maybe.Maybe[[]byte], +) (*node, error) { + if v.nodesAlreadyCalculated.Get() { + return nil, ErrNodesAlreadyCalculated + } + + if v.root.IsNothing() { + // the trie is empty, so create a new root node. + root := newNode(key) + root.setValue(value) + v.root = maybe.Some(root) + return root, v.recordNewNode(root) + } + + // Find the node that most closely matches [key]. + var closestNode *node + if err := visitPathToKey(v, key, func(n *node) error { + closestNode = n + // Need to recalculate ID for all nodes on path to [key]. + return v.recordNodeChange(n) + }); err != nil { + return nil, err + } + + if closestNode == nil { + // [v.root.key] isn't a prefix of [key]. + var ( + oldRoot = v.root.Value() + commonPrefixLength = getLengthOfCommonPrefix(oldRoot.key, key, 0 /*offset*/, v.tokenSize) + commonPrefix = oldRoot.key.Take(commonPrefixLength) + newRoot = newNode(commonPrefix) + oldRootID = oldRoot.calculateID(v.db.metrics) + ) + + // Call addChildWithID instead of addChild so the old root is added + // to the new root with the correct ID. + // TODO: + // [oldRootID] shouldn't need to be calculated here. + // Either oldRootID should already be calculated or will be calculated at the end with the other nodes + // Initialize the v.changes.rootID during newView and then use that here instead of oldRootID + newRoot.addChildWithID(oldRoot, v.tokenSize, oldRootID) + if err := v.recordNewNode(newRoot); err != nil { + return nil, err + } + v.root = maybe.Some(newRoot) + + closestNode = newRoot + } + + // a node with that exact key already exists so update its value + if closestNode.key == key { + closestNode.setValue(value) + // closestNode was already marked as changed in the ancestry loop above + return closestNode, nil + } + + // A node with the exact key doesn't exist so determine the portion of the + // key that hasn't been matched yet + // Note that [key] has prefix [closestNode.key], so [key] must be longer + // and the following index won't OOB. + existingChildEntry, hasChild := closestNode.children[key.Token(closestNode.key.length, v.tokenSize)] + if !hasChild { + // there are no existing nodes along the key [key], so create a new node to insert [value] + newNode := newNode(key) + newNode.setValue(value) + closestNode.addChild(newNode, v.tokenSize) + return newNode, v.recordNewNode(newNode) + } + + // if we have reached this point, then the [key] we are trying to insert and + // the existing path node have some common prefix. + // a new branching node will be created that will represent this common prefix and + // have the existing path node and the value being inserted as children. + + // generate the new branch node + // find how many tokens are common between the existing child's compressed key and + // the current key(offset by the closest node's key), + // then move all the common tokens into the branch node + commonPrefixLength := getLengthOfCommonPrefix( + existingChildEntry.compressedKey, + key, + closestNode.key.length+v.tokenSize, + v.tokenSize, + ) + + if existingChildEntry.compressedKey.length <= commonPrefixLength { + // Since the compressed key is shorter than the common prefix, + // we should have visited [existingChildEntry] in [visitPathToKey]. + return nil, ErrVisitPathToKey + } + + branchNode := newNode(key.Take(closestNode.key.length + v.tokenSize + commonPrefixLength)) + closestNode.addChild(branchNode, v.tokenSize) + nodeWithValue := branchNode + + if key.length == branchNode.key.length { + // the branch node has exactly the key to be inserted as its key, so set the value on the branch node + branchNode.setValue(value) + } else { + // the key to be inserted is a child of the branch node + // create a new node and add the value to it + newNode := newNode(key) + newNode.setValue(value) + branchNode.addChild(newNode, v.tokenSize) + if err := v.recordNewNode(newNode); err != nil { + return nil, err + } + nodeWithValue = newNode + } + + // add the existing child onto the branch node + branchNode.setChildEntry( + existingChildEntry.compressedKey.Token(commonPrefixLength, v.tokenSize), + &child{ + compressedKey: existingChildEntry.compressedKey.Skip(commonPrefixLength + v.tokenSize), + id: existingChildEntry.id, + hasValue: existingChildEntry.hasValue, + }) + + return nodeWithValue, v.recordNewNode(branchNode) +} + +func getLengthOfCommonPrefix(first, second Key, secondOffset int, tokenSize int) int { + commonIndex := 0 + for first.length > commonIndex && second.length > commonIndex+secondOffset && + first.Token(commonIndex, tokenSize) == second.Token(commonIndex+secondOffset, tokenSize) { + commonIndex += tokenSize + } + return commonIndex +} + +// Records that a node has been created. +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) recordNewNode(after *node) error { + return v.recordKeyChange(after.key, after, after.hasValue(), true /* newNode */) +} + +// Records that an existing node has been changed. +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) recordNodeChange(after *node) error { + return v.recordKeyChange(after.key, after, after.hasValue(), false /* newNode */) +} + +// Records that the node associated with the given key has been deleted. +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) recordNodeDeleted(after *node) error { + return v.recordKeyChange(after.key, nil, after.hasValue(), false /* newNode */) +} + +// Records that the node associated with the given key has been changed. +// If it is an existing node, record what its value was before it was changed. +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) recordKeyChange(key Key, after *node, hadValue bool, newNode bool) error { + if v.nodesAlreadyCalculated.Get() { + return ErrNodesAlreadyCalculated + } + + if existing, ok := v.changes.nodes[key]; ok { + existing.after = after + return nil + } + + if newNode { + v.changes.nodes[key] = &change[*node]{ + after: after, + } + return nil + } + + before, err := v.getParentTrie().getEditableNode(key, hadValue) + if err != nil && !errors.Is(err, database.ErrNotFound) { + return err + } + v.changes.nodes[key] = &change[*node]{ + before: before, + after: after, + } + return nil +} + +// Records that a key's value has been added or updated. +// Doesn't actually change the trie data structure. +// That's deferred until we call [calculateNodeIDs]. +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) recordValueChange(key Key, value maybe.Maybe[[]byte]) error { + if v.nodesAlreadyCalculated.Get() { + return ErrNodesAlreadyCalculated + } + + // update the existing change if it exists + if existing, ok := v.changes.values[key]; ok { + existing.after = value + return nil + } + + // grab the before value + var beforeMaybe maybe.Maybe[[]byte] + before, err := v.getParentTrie().getValue(key) + switch err { + case nil: + beforeMaybe = maybe.Some(before) + case database.ErrNotFound: + beforeMaybe = maybe.Nothing[[]byte]() + default: + return err + } + + v.changes.values[key] = &change[maybe.Maybe[[]byte]]{ + before: beforeMaybe, + after: value, + } + return nil +} + +// Retrieves a node with the given [key]. +// If the node is fetched from [v.parentTrie] and [id] isn't empty, +// sets the node's ID to [id]. +// If the node is loaded from the baseDB, [hasValue] determines which database the node is stored in. +// Returns database.ErrNotFound if the node doesn't exist. +func (v *view) getNode(key Key, hasValue bool) (*node, error) { + // check for the key within the changed nodes + if nodeChange, isChanged := v.changes.nodes[key]; isChanged { + v.db.metrics.ViewNodeCacheHit() + if nodeChange.after == nil { + return nil, database.ErrNotFound + } + return nodeChange.after, nil + } + + // get the node from the parent trie and store a local copy + return v.getParentTrie().getEditableNode(key, hasValue) +} + +// Get the parent trie of the view +func (v *view) getParentTrie() View { + v.validityTrackingLock.RLock() + defer v.validityTrackingLock.RUnlock() + return v.parentTrie +} diff --git a/x/merkledb/view_iterator.go b/x/merkledb/view_iterator.go index 263aa409e882..60d1b8909e76 100644 --- a/x/merkledb/view_iterator.go +++ b/x/merkledb/view_iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -11,26 +11,26 @@ import ( "golang.org/x/exp/slices" ) -func (t *trieView) NewIterator() database.Iterator { - return t.NewIteratorWithStartAndPrefix(nil, nil) +func (v *view) NewIterator() database.Iterator { + return v.NewIteratorWithStartAndPrefix(nil, nil) } -func (t *trieView) NewIteratorWithStart(start []byte) database.Iterator { - return t.NewIteratorWithStartAndPrefix(start, nil) +func (v *view) NewIteratorWithStart(start []byte) database.Iterator { + return v.NewIteratorWithStartAndPrefix(start, nil) } -func (t *trieView) NewIteratorWithPrefix(prefix []byte) database.Iterator { - return t.NewIteratorWithStartAndPrefix(nil, prefix) +func (v *view) NewIteratorWithPrefix(prefix []byte) database.Iterator { + return v.NewIteratorWithStartAndPrefix(nil, prefix) } -func (t *trieView) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { +func (v *view) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { var ( - changes = make([]KeyChange, 0, len(t.changes.values)) - startKey = t.db.toKey(start) - prefixKey = t.db.toKey(prefix) + changes = make([]KeyChange, 0, len(v.changes.values)) + startKey = ToKey(start) + prefixKey = ToKey(prefix) ) - for key, change := range t.changes.values { + for key, change := range v.changes.values { if len(start) > 0 && startKey.Greater(key) || !key.HasPrefix(prefixKey) { continue } @@ -41,13 +41,13 @@ func (t *trieView) NewIteratorWithStartAndPrefix(start, prefix []byte) database. } // sort [changes] so they can be merged with the parent trie's state - slices.SortFunc(changes, func(a, b KeyChange) bool { - return bytes.Compare(a.Key, b.Key) == -1 + slices.SortFunc(changes, func(a, b KeyChange) int { + return bytes.Compare(a.Key, b.Key) }) return &viewIterator{ - view: t, - parentIter: t.parentTrie.NewIteratorWithStartAndPrefix(start, prefix), + view: v, + parentIter: v.parentTrie.NewIteratorWithStartAndPrefix(start, prefix), sortedChanges: changes, } } @@ -55,7 +55,7 @@ func (t *trieView) NewIteratorWithStartAndPrefix(start, prefix []byte) database. // viewIterator walks over both the in memory database and the underlying database // at the same time. type viewIterator struct { - view *trieView + view *view parentIter database.Iterator key, value []byte @@ -71,13 +71,7 @@ type viewIterator struct { // based on if the in memory changes or the underlying db should be read next func (it *viewIterator) Next() bool { switch { - case it.view.db.closed: - // Short-circuit and set an error if the underlying database has been closed. - it.key = nil - it.value = nil - it.err = database.ErrClosed - return false - case it.view.invalidated: + case it.view.isInvalid(): it.key = nil it.value = nil it.err = ErrInvalid diff --git a/x/merkledb/view_iterator_test.go b/x/merkledb/view_iterator_test.go index 6ec5c8f49b37..ba71c414c902 100644 --- a/x/merkledb/view_iterator_test.go +++ b/x/merkledb/view_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -19,7 +19,7 @@ import ( "github.com/ava-labs/avalanchego/utils/maybe" ) -func Test_TrieView_Iterator(t *testing.T) { +func Test_View_Iterator(t *testing.T) { require := require.New(t) key1 := []byte("hello1") @@ -34,7 +34,9 @@ func Test_TrieView_Iterator(t *testing.T) { require.NoError(db.Put(key1, value1)) require.NoError(db.Put(key2, value2)) - iterator := db.NewIterator() + view, err := db.NewView(context.Background(), ViewChanges{}) + require.NoError(err) + iterator := view.NewIterator() require.NotNil(iterator) defer iterator.Release() @@ -53,9 +55,36 @@ func Test_TrieView_Iterator(t *testing.T) { require.NoError(iterator.Error()) } -// Test_TrieView_IteratorStart tests to make sure the iterator can be configured to +func Test_View_Iterator_DBClosed(t *testing.T) { + require := require.New(t) + + key1 := []byte("hello1") + value1 := []byte("world1") + + db, err := getBasicDB() + require.NoError(err) + + require.NoError(db.Put(key1, value1)) + + view, err := db.NewView(context.Background(), ViewChanges{}) + require.NoError(err) + iterator := view.NewIterator() + require.NotNil(iterator) + + defer iterator.Release() + + require.NoError(db.Close()) + + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + err = iterator.Error() + require.ErrorIs(err, ErrInvalid) +} + +// Test_View_IteratorStart tests to make sure the iterator can be configured to // start midway through the database. -func Test_TrieView_IteratorStart(t *testing.T) { +func Test_View_IteratorStart(t *testing.T) { require := require.New(t) db, err := getBasicDB() require.NoError(err) @@ -69,7 +98,9 @@ func Test_TrieView_IteratorStart(t *testing.T) { require.NoError(db.Put(key1, value1)) require.NoError(db.Put(key2, value2)) - iterator := db.NewIteratorWithStart(key2) + view, err := db.NewView(context.Background(), ViewChanges{}) + require.NoError(err) + iterator := view.NewIteratorWithStart(key2) require.NotNil(iterator) defer iterator.Release() @@ -84,9 +115,9 @@ func Test_TrieView_IteratorStart(t *testing.T) { require.NoError(iterator.Error()) } -// Test_TrieView_IteratorPrefix tests to make sure the iterator can be configured to skip +// Test_View_IteratorPrefix tests to make sure the iterator can be configured to skip // keys missing the provided prefix. -func Test_TrieView_IteratorPrefix(t *testing.T) { +func Test_View_IteratorPrefix(t *testing.T) { require := require.New(t) db, err := getBasicDB() require.NoError(err) @@ -104,7 +135,9 @@ func Test_TrieView_IteratorPrefix(t *testing.T) { require.NoError(db.Put(key2, value2)) require.NoError(db.Put(key3, value3)) - iterator := db.NewIteratorWithPrefix([]byte("h")) + view, err := db.NewView(context.Background(), ViewChanges{}) + require.NoError(err) + iterator := view.NewIteratorWithPrefix([]byte("h")) require.NotNil(iterator) defer iterator.Release() @@ -119,9 +152,9 @@ func Test_TrieView_IteratorPrefix(t *testing.T) { require.NoError(iterator.Error()) } -// Test_TrieView_IteratorStartPrefix tests to make sure that the iterator can start +// Test_View_IteratorStartPrefix tests to make sure that the iterator can start // midway through the database while skipping a prefix. -func Test_TrieView_IteratorStartPrefix(t *testing.T) { +func Test_View_IteratorStartPrefix(t *testing.T) { require := require.New(t) db, err := getBasicDB() require.NoError(err) @@ -139,7 +172,9 @@ func Test_TrieView_IteratorStartPrefix(t *testing.T) { require.NoError(db.Put(key2, value2)) require.NoError(db.Put(key3, value3)) - iterator := db.NewIteratorWithStartAndPrefix(key1, []byte("h")) + view, err := db.NewView(context.Background(), ViewChanges{}) + require.NoError(err) + iterator := view.NewIteratorWithStartAndPrefix(key1, []byte("h")) require.NotNil(iterator) defer iterator.Release() @@ -161,7 +196,7 @@ func Test_TrieView_IteratorStartPrefix(t *testing.T) { // Test view iteration by creating a stack of views, // inserting random key/value pairs into them, and // iterating over the last view. -func Test_TrieView_Iterator_Random(t *testing.T) { +func Test_View_Iterator_Random(t *testing.T) { require := require.New(t) now := time.Now().UnixNano() t.Logf("seed: %d", now) diff --git a/x/sync/client.go b/x/sync/client.go index 095f515d41fb..b753e48f9f9e 100644 --- a/x/sync/client.go +++ b/x/sync/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync @@ -36,6 +36,7 @@ var ( _ Client = (*client)(nil) errInvalidRangeProof = errors.New("failed to verify range proof") + errInvalidChangeProof = errors.New("failed to verify change proof") errTooManyKeys = errors.New("response contains more than requested keys") errTooManyBytes = errors.New("response contains more than requested bytes") errUnexpectedChangeProofResponse = errors.New("unexpected response type") @@ -73,7 +74,7 @@ type client struct { stateSyncMinVersion *version.Application log logging.Logger metrics SyncMetrics - branchFactor merkledb.BranchFactor + tokenSize int } type ClientConfig struct { @@ -95,7 +96,7 @@ func NewClient(config *ClientConfig) (Client, error) { stateSyncMinVersion: config.StateSyncMinVersion, log: config.Log, metrics: config.Metrics, - branchFactor: config.BranchFactor, + tokenSize: merkledb.BranchFactorToTokenSize[config.BranchFactor], }, nil } @@ -124,7 +125,7 @@ func (c *client) GetChangeProof( case *pb.SyncGetChangeProofResponse_ChangeProof: // The server had enough history to send us a change proof var changeProof merkledb.ChangeProof - if err := changeProof.UnmarshalProto(changeProofResp.ChangeProof, c.branchFactor); err != nil { + if err := changeProof.UnmarshalProto(changeProofResp.ChangeProof); err != nil { return nil, err } @@ -149,7 +150,7 @@ func (c *client) GetChangeProof( endKey, endRoot, ); err != nil { - return nil, fmt.Errorf("%w due to %w", errInvalidRangeProof, err) + return nil, fmt.Errorf("%w due to %w", errInvalidChangeProof, err) } return &merkledb.ChangeOrRangeProof{ @@ -158,7 +159,7 @@ func (c *client) GetChangeProof( case *pb.SyncGetChangeProofResponse_RangeProof: var rangeProof merkledb.RangeProof - if err := rangeProof.UnmarshalProto(changeProofResp.RangeProof, c.branchFactor); err != nil { + if err := rangeProof.UnmarshalProto(changeProofResp.RangeProof); err != nil { return nil, err } @@ -171,6 +172,7 @@ func (c *client) GetChangeProof( startKey, endKey, req.EndRootHash, + c.tokenSize, ) if err != nil { return nil, err @@ -208,6 +210,7 @@ func verifyRangeProof( start maybe.Maybe[[]byte], end maybe.Maybe[[]byte], rootBytes []byte, + tokenSize int, ) error { root, err := ids.ToID(rootBytes) if err != nil { @@ -227,6 +230,7 @@ func verifyRangeProof( start, end, root, + tokenSize, ); err != nil { return fmt.Errorf("%w due to %w", errInvalidRangeProof, err) } @@ -253,11 +257,8 @@ func (c *client) GetRangeProof( return nil, err } - startKey := maybeBytesToMaybe(req.StartKey) - endKey := maybeBytesToMaybe(req.EndKey) - var rangeProof merkledb.RangeProof - if err := rangeProof.UnmarshalProto(&rangeProofProto, c.branchFactor); err != nil { + if err := rangeProof.UnmarshalProto(&rangeProofProto); err != nil { return nil, err } @@ -265,9 +266,10 @@ func (c *client) GetRangeProof( ctx, &rangeProof, int(req.KeyLimit), - startKey, - endKey, + maybeBytesToMaybe(req.StartKey), + maybeBytesToMaybe(req.EndKey), req.RootHash, + c.tokenSize, ); err != nil { return nil, err } diff --git a/x/sync/client_test.go b/x/sync/client_test.go index 08c1a787b474..d394aa654c14 100644 --- a/x/sync/client_test.go +++ b/x/sync/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync @@ -32,13 +32,14 @@ import ( func newDefaultDBConfig() merkledb.Config { return merkledb.Config{ - EvictionBatchSize: 100, - HistoryLength: defaultRequestKeyLimit, - ValueNodeCacheSize: defaultRequestKeyLimit, - IntermediateNodeCacheSize: defaultRequestKeyLimit, - Reg: prometheus.NewRegistry(), - Tracer: trace.Noop, - BranchFactor: merkledb.BranchFactor16, + IntermediateWriteBatchSize: 100, + HistoryLength: defaultRequestKeyLimit, + ValueNodeCacheSize: defaultRequestKeyLimit, + IntermediateWriteBufferSize: defaultRequestKeyLimit, + IntermediateNodeCacheSize: defaultRequestKeyLimit, + Reg: prometheus.NewRegistry(), + Tracer: trace.Noop, + BranchFactor: merkledb.BranchFactor16, } } @@ -122,9 +123,6 @@ func sendRangeProofRequest( }, ).AnyTimes() - // Handle bandwidth tracking calls from client. - networkClient.EXPECT().TrackBandwidth(gomock.Any(), gomock.Any()).AnyTimes() - // The server should expect to "send" a response to the client. sender.EXPECT().SendAppResponse( gomock.Any(), // ctx @@ -138,7 +136,7 @@ func sendRangeProofRequest( require.NoError(proto.Unmarshal(responseBytes, &responseProto)) var response merkledb.RangeProof - require.NoError(response.UnmarshalProto(&responseProto, merkledb.BranchFactor16)) + require.NoError(response.UnmarshalProto(&responseProto)) // modify if needed if modifyResponse != nil { @@ -159,13 +157,9 @@ func sendRangeProofRequest( } func TestGetRangeProof(t *testing.T) { - // TODO use time as random seed instead of 1 - // once we move to go 1.20 which allows for - // joining multiple errors with %w. Right now, - // for some of these tests, we may get different - // errors based on randomness but we can only - // assert one error. - r := rand.New(rand.NewSource(1)) // #nosec G404 + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 smallTrieKeyCount := defaultRequestKeyLimit smallTrieDB, _, err := generateTrieWithMinKeyLen(t, r, smallTrieKeyCount, 1) @@ -280,19 +274,7 @@ func TestGetRangeProof(t *testing.T) { response.StartProof = proof.StartProof response.EndProof = proof.EndProof }, - expectedErr: merkledb.ErrInvalidProof, - }, - "removed last key in response": { - db: largeTrieDB, - request: &pb.SyncGetRangeProofRequest{ - RootHash: largeTrieRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - modifyResponse: func(response *merkledb.RangeProof) { - response.KeyValues = response.KeyValues[:len(response.KeyValues)-2] - }, - expectedErr: merkledb.ErrProofNodeNotForKey, + expectedErr: errInvalidRangeProof, }, "removed key from middle of response": { db: largeTrieDB, @@ -319,7 +301,7 @@ func TestGetRangeProof(t *testing.T) { }, expectedErr: merkledb.ErrNoEndProof, }, - "end proof nodes removed": { + "end proof removed": { db: largeTrieDB, request: &pb.SyncGetRangeProofRequest{ RootHash: largeTrieRoot[:], @@ -339,11 +321,11 @@ func TestGetRangeProof(t *testing.T) { BytesLimit: defaultRequestByteSizeLimit, }, modifyResponse: func(response *merkledb.RangeProof) { - response.KeyValues = nil response.StartProof = nil response.EndProof = nil + response.KeyValues = nil }, - expectedErr: merkledb.ErrNoMerkleProof, + expectedErr: merkledb.ErrEmptyProof, }, } @@ -456,7 +438,7 @@ func sendChangeProofRequest( if responseProto.GetChangeProof() != nil { // Server responded with a change proof var changeProof merkledb.ChangeProof - require.NoError(changeProof.UnmarshalProto(responseProto.GetChangeProof(), merkledb.BranchFactor16)) + require.NoError(changeProof.UnmarshalProto(responseProto.GetChangeProof())) // modify if needed if modifyChangeProof != nil { @@ -478,7 +460,7 @@ func sendChangeProofRequest( // Server responded with a range proof var rangeProof merkledb.RangeProof - require.NoError(rangeProof.UnmarshalProto(responseProto.GetRangeProof(), merkledb.BranchFactor16)) + require.NoError(rangeProof.UnmarshalProto(responseProto.GetRangeProof())) // modify if needed if modifyRangeProof != nil { @@ -503,13 +485,9 @@ func sendChangeProofRequest( } func TestGetChangeProof(t *testing.T) { - // TODO use time as random seed instead of 1 - // once we move to go 1.20 which allows for - // joining multiple errors with %w. Right now, - // for some of these tests, we may get different - // errors based on randomness but we can only - // assert one error. - r := rand.New(rand.NewSource(1)) // #nosec G404 + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 serverDB, err := merkledb.New( context.Background(), @@ -524,7 +502,7 @@ func TestGetChangeProof(t *testing.T) { newDefaultDBConfig(), ) require.NoError(t, err) - startRoot, err := serverDB.GetMerkleRoot(context.Background()) // TODO uncomment + startRoot, err := serverDB.GetMerkleRoot(context.Background()) require.NoError(t, err) // create changes @@ -566,6 +544,8 @@ func TestGetChangeProof(t *testing.T) { endRoot, err := serverDB.GetMerkleRoot(context.Background()) require.NoError(t, err) + fakeRootID := ids.GenerateTestID() + tests := map[string]struct { db DB request *pb.SyncGetChangeProofRequest @@ -623,19 +603,7 @@ func TestGetChangeProof(t *testing.T) { modifyChangeProofResponse: func(response *merkledb.ChangeProof) { response.KeyChanges = response.KeyChanges[1:] }, - expectedErr: merkledb.ErrInvalidProof, - }, - "removed last key in response": { - request: &pb.SyncGetChangeProofRequest{ - StartRootHash: startRoot[:], - EndRootHash: endRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - modifyChangeProofResponse: func(response *merkledb.ChangeProof) { - response.KeyChanges = response.KeyChanges[:len(response.KeyChanges)-2] - }, - expectedErr: merkledb.ErrProofNodeNotForKey, + expectedErr: errInvalidChangeProof, }, "removed key from middle of response": { request: &pb.SyncGetChangeProofRequest{ @@ -662,24 +630,11 @@ func TestGetChangeProof(t *testing.T) { }, expectedErr: merkledb.ErrInvalidProof, }, - "range proof response happy path": { - request: &pb.SyncGetChangeProofRequest{ - // Server doesn't have the (non-existent) start root - // so should respond with range proof. - StartRootHash: ids.Empty[:], - EndRootHash: endRoot[:], - KeyLimit: defaultRequestKeyLimit, - BytesLimit: defaultRequestByteSizeLimit, - }, - modifyChangeProofResponse: nil, - expectedErr: nil, - expectRangeProof: true, - }, "range proof response; remove first key": { request: &pb.SyncGetChangeProofRequest{ // Server doesn't have the (non-existent) start root // so should respond with range proof. - StartRootHash: ids.Empty[:], + StartRootHash: fakeRootID[:], EndRootHash: endRoot[:], KeyLimit: defaultRequestKeyLimit, BytesLimit: defaultRequestByteSizeLimit, @@ -688,7 +643,7 @@ func TestGetChangeProof(t *testing.T) { modifyRangeProofResponse: func(response *merkledb.RangeProof) { response.KeyValues = response.KeyValues[1:] }, - expectedErr: merkledb.ErrInvalidProof, + expectedErr: errInvalidRangeProof, expectRangeProof: true, }, } @@ -812,7 +767,7 @@ func TestAppRequestSendFailed(t *testing.T) { gomock.Any(), gomock.Any(), gomock.Any(), - ).Return(ids.NodeID{}, nil, errAppSendFailed).Times(2) + ).Return(ids.EmptyNodeID, nil, errAppSendFailed).Times(2) _, err = client.GetChangeProof( context.Background(), diff --git a/x/sync/db.go b/x/sync/db.go index 94b5542e34c1..5ed9061b5889 100644 --- a/x/sync/db.go +++ b/x/sync/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync @@ -6,6 +6,7 @@ package sync import "github.com/ava-labs/avalanchego/x/merkledb" type DB interface { + merkledb.Clearer merkledb.MerkleRootGetter merkledb.ProofGetter merkledb.ChangeProofer diff --git a/x/sync/g_db/db_client.go b/x/sync/g_db/db_client.go index 8bd936a53975..37b3339766ae 100644 --- a/x/sync/g_db/db_client.go +++ b/x/sync/g_db/db_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gdb @@ -19,16 +19,14 @@ import ( var _ sync.DB = (*DBClient)(nil) -func NewDBClient(client pb.DBClient, branchFactor merkledb.BranchFactor) *DBClient { +func NewDBClient(client pb.DBClient) *DBClient { return &DBClient{ - client: client, - branchFactor: branchFactor, + client: client, } } type DBClient struct { - client pb.DBClient - branchFactor merkledb.BranchFactor + client pb.DBClient } func (c *DBClient) GetMerkleRoot(ctx context.Context) (ids.ID, error) { @@ -47,6 +45,10 @@ func (c *DBClient) GetChangeProof( endKey maybe.Maybe[[]byte], keyLimit int, ) (*merkledb.ChangeProof, error) { + if endRootID == ids.Empty { + return nil, merkledb.ErrEmptyProof + } + resp, err := c.client.GetChangeProof(ctx, &pb.GetChangeProofRequest{ StartRootHash: startRootID[:], EndRootHash: endRootID[:], @@ -65,12 +67,15 @@ func (c *DBClient) GetChangeProof( } // TODO handle merkledb.ErrInvalidMaxLength + // TODO disambiguate between the root not being present due to + // the end root not being present and the start root not being + // present before the end root. i.e. ErrNoEndRoot vs ErrInsufficientHistory. if resp.GetRootNotPresent() { return nil, merkledb.ErrInsufficientHistory } var proof merkledb.ChangeProof - if err := proof.UnmarshalProto(resp.GetChangeProof(), c.branchFactor); err != nil { + if err := proof.UnmarshalProto(resp.GetChangeProof()); err != nil { return nil, err } return &proof, nil @@ -122,7 +127,7 @@ func (c *DBClient) GetProof(ctx context.Context, key []byte) (*merkledb.Proof, e } var proof merkledb.Proof - if err := proof.UnmarshalProto(resp.Proof, c.branchFactor); err != nil { + if err := proof.UnmarshalProto(resp.Proof); err != nil { return nil, err } return &proof, nil @@ -135,6 +140,10 @@ func (c *DBClient) GetRangeProofAtRoot( endKey maybe.Maybe[[]byte], keyLimit int, ) (*merkledb.RangeProof, error) { + if rootID == ids.Empty { + return nil, merkledb.ErrEmptyProof + } + resp, err := c.client.GetRangeProof(ctx, &pb.GetRangeProofRequest{ RootHash: rootID[:], StartKey: &pb.MaybeBytes{ @@ -152,7 +161,7 @@ func (c *DBClient) GetRangeProofAtRoot( } var proof merkledb.RangeProof - if err := proof.UnmarshalProto(resp.Proof, c.branchFactor); err != nil { + if err := proof.UnmarshalProto(resp.Proof); err != nil { return nil, err } return &proof, nil @@ -177,3 +186,8 @@ func (c *DBClient) CommitRangeProof( }) return err } + +func (c *DBClient) Clear() error { + _, err := c.client.Clear(context.Background(), &emptypb.Empty{}) + return err +} diff --git a/x/sync/g_db/db_server.go b/x/sync/g_db/db_server.go index b6471542dcca..a65e8a4fe0de 100644 --- a/x/sync/g_db/db_server.go +++ b/x/sync/g_db/db_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gdb @@ -19,18 +19,16 @@ import ( var _ pb.DBServer = (*DBServer)(nil) -func NewDBServer(db sync.DB, branchFactor merkledb.BranchFactor) *DBServer { +func NewDBServer(db sync.DB) *DBServer { return &DBServer{ - db: db, - branchFactor: branchFactor, + db: db, } } type DBServer struct { pb.UnsafeDBServer - db sync.DB - branchFactor merkledb.BranchFactor + db sync.DB } func (s *DBServer) GetMerkleRoot( @@ -98,7 +96,7 @@ func (s *DBServer) VerifyChangeProof( req *pb.VerifyChangeProofRequest, ) (*pb.VerifyChangeProofResponse, error) { var proof merkledb.ChangeProof - if err := proof.UnmarshalProto(req.Proof, s.branchFactor); err != nil { + if err := proof.UnmarshalProto(req.Proof); err != nil { return nil, err } @@ -130,7 +128,7 @@ func (s *DBServer) CommitChangeProof( req *pb.CommitChangeProofRequest, ) (*emptypb.Empty, error) { var proof merkledb.ChangeProof - if err := proof.UnmarshalProto(req.Proof, s.branchFactor); err != nil { + if err := proof.UnmarshalProto(req.Proof); err != nil { return nil, err } @@ -201,7 +199,7 @@ func (s *DBServer) CommitRangeProof( req *pb.CommitRangeProofRequest, ) (*emptypb.Empty, error) { var proof merkledb.RangeProof - if err := proof.UnmarshalProto(req.RangeProof, s.branchFactor); err != nil { + if err := proof.UnmarshalProto(req.RangeProof); err != nil { return nil, err } @@ -218,3 +216,7 @@ func (s *DBServer) CommitRangeProof( err := s.db.CommitRangeProof(ctx, start, end, &proof) return &emptypb.Empty{}, err } + +func (s *DBServer) Clear(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { + return &emptypb.Empty{}, s.db.Clear() +} diff --git a/x/sync/manager.go b/x/sync/manager.go index 0a13a89eb32b..82f05eef08f9 100644 --- a/x/sync/manager.go +++ b/x/sync/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync @@ -10,12 +10,15 @@ import ( "fmt" "sync" + "golang.org/x/exp/maps" + "go.uber.org/zap" "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/maybe" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/x/merkledb" pb "github.com/ava-labs/avalanchego/proto/pb/sync" @@ -102,9 +105,9 @@ type Manager struct { cancelCtx context.CancelFunc // Set to true when StartSyncing is called. - syncing bool - closeOnce sync.Once - branchFactor merkledb.BranchFactor + syncing bool + closeOnce sync.Once + tokenSize int } type ManagerConfig struct { @@ -136,7 +139,7 @@ func NewManager(config ManagerConfig) (*Manager, error) { doneChan: make(chan struct{}), unprocessedWork: newWorkHeap(), processedWork: newWorkHeap(), - branchFactor: config.BranchFactor, + tokenSize: merkledb.BranchFactorToTokenSize[config.BranchFactor], } m.unprocessedWorkCond.L = &m.workLock @@ -263,6 +266,18 @@ func (m *Manager) getAndApplyChangeProof(ctx context.Context, work *workItem) { return } + if targetRootID == ids.Empty { + // The trie is empty after this change. + // Delete all the key-value pairs in the range. + if err := m.config.DB.Clear(); err != nil { + m.setError(err) + return + } + work.start = maybe.Nothing[[]byte]() + m.completeWorkItem(ctx, work, maybe.Nothing[[]byte](), targetRootID, nil) + return + } + changeOrRangeProof, err := m.config.Client.GetChangeProof( ctx, &pb.SyncGetChangeProofRequest{ @@ -329,6 +344,17 @@ func (m *Manager) getAndApplyChangeProof(ctx context.Context, work *workItem) { // Assumes [m.workLock] is not held. func (m *Manager) getAndApplyRangeProof(ctx context.Context, work *workItem) { targetRootID := m.getTargetRoot() + + if targetRootID == ids.Empty { + if err := m.config.DB.Clear(); err != nil { + m.setError(err) + return + } + work.start = maybe.Nothing[[]byte]() + m.completeWorkItem(ctx, work, maybe.Nothing[[]byte](), targetRootID, nil) + return + } + proof, err := m.config.Client.GetRangeProof(ctx, &pb.SyncGetRangeProofRequest{ RootHash: targetRootID[:], @@ -404,7 +430,7 @@ func (m *Manager) findNextKey( // and traversing them from the longest key to the shortest key. // For each node in these proofs, compare if the children of that node exist // or have the same ID in the other proof. - proofKeyPath := merkledb.ToKey(lastReceivedKey, m.branchFactor) + proofKeyPath := merkledb.ToKey(lastReceivedKey) // If the received proof is an exclusion proof, the last node may be for a // key that is after the [lastReceivedKey]. @@ -431,10 +457,32 @@ func (m *Manager) findNextKey( nextKey := maybe.Nothing[[]byte]() + // Add sentinel node back into the localProofNodes, if it is missing. + // Required to ensure that a common node exists in both proofs + if len(localProofNodes) > 0 && localProofNodes[0].Key.Length() != 0 { + sentinel := merkledb.ProofNode{ + Children: map[byte]ids.ID{ + localProofNodes[0].Key.Token(0, m.tokenSize): ids.Empty, + }, + } + localProofNodes = append([]merkledb.ProofNode{sentinel}, localProofNodes...) + } + + // Add sentinel node back into the endProof, if it is missing. + // Required to ensure that a common node exists in both proofs + if len(endProof) > 0 && endProof[0].Key.Length() != 0 { + sentinel := merkledb.ProofNode{ + Children: map[byte]ids.ID{ + endProof[0].Key.Token(0, m.tokenSize): ids.Empty, + }, + } + endProof = append([]merkledb.ProofNode{sentinel}, endProof...) + } + localProofNodeIndex := len(localProofNodes) - 1 receivedProofNodeIndex := len(endProof) - 1 - // traverse the two proofs from the deepest nodes up to the root until a difference is found + // traverse the two proofs from the deepest nodes up to the sentinel node until a difference is found for localProofNodeIndex >= 0 && receivedProofNodeIndex >= 0 && nextKey.IsNothing() { localProofNode := localProofNodes[localProofNodeIndex] receivedProofNode := endProof[receivedProofNodeIndex] @@ -447,7 +495,7 @@ func (m *Manager) findNextKey( // select the deepest proof node from the two proofs switch { - case receivedProofNode.Key.TokensLength() > localProofNode.Key.TokensLength(): + case receivedProofNode.Key.Length() > localProofNode.Key.Length(): // there was a branch node in the received proof that isn't in the local proof // see if the received proof node has children not present in the local proof deepestNode = &receivedProofNode @@ -455,7 +503,7 @@ func (m *Manager) findNextKey( // we have dealt with this received node, so move on to the next received node receivedProofNodeIndex-- - case localProofNode.Key.TokensLength() > receivedProofNode.Key.TokensLength(): + case localProofNode.Key.Length() > receivedProofNode.Key.Length(): // there was a branch node in the local proof that isn't in the received proof // see if the local proof node has children not present in the received proof deepestNode = &localProofNode @@ -482,20 +530,20 @@ func (m *Manager) findNextKey( // If the deepest node has the same key as [proofKeyPath], // then all of its children have keys greater than the proof key, // so we can start at the 0 token. - startingChildToken := byte(0) + startingChildToken := 0 // If the deepest node has a key shorter than the key being proven, // we can look at the next token index of the proof key to determine which of that // node's children have keys larger than [proofKeyPath]. // Any child with a token greater than the [proofKeyPath]'s token at that // index will have a larger key. - if deepestNode.Key.TokensLength() < proofKeyPath.TokensLength() { - startingChildToken = proofKeyPath.Token(deepestNode.Key.TokensLength()) + 1 + if deepestNode.Key.Length() < proofKeyPath.Length() { + startingChildToken = int(proofKeyPath.Token(deepestNode.Key.Length(), m.tokenSize)) + 1 } // determine if there are any differences in the children for the deepest unhandled node of the two proofs - if childIndex, hasDifference := findChildDifference(deepestNode, deepestNodeFromOtherProof, startingChildToken, m.branchFactor); hasDifference { - nextKey = maybe.Some(deepestNode.Key.Append(childIndex).Bytes()) + if childIndex, hasDifference := findChildDifference(deepestNode, deepestNodeFromOtherProof, startingChildToken); hasDifference { + nextKey = maybe.Some(deepestNode.Key.Extend(merkledb.ToToken(childIndex, m.tokenSize)).Bytes()) break } } @@ -794,12 +842,27 @@ func midPoint(startMaybe, endMaybe maybe.Maybe[[]byte]) maybe.Maybe[[]byte] { // findChildDifference returns the first child index that is different between node 1 and node 2 if one exists and // a bool indicating if any difference was found -func findChildDifference(node1, node2 *merkledb.ProofNode, startIndex byte, branchFactor merkledb.BranchFactor) (byte, bool) { +func findChildDifference(node1, node2 *merkledb.ProofNode, startIndex int) (byte, bool) { + // Children indices >= [startIndex] present in at least one of the nodes. + childIndices := set.Set[byte]{} + for _, node := range []*merkledb.ProofNode{node1, node2} { + if node == nil { + continue + } + for key := range node.Children { + if int(key) >= startIndex { + childIndices.Add(key) + } + } + } + + sortedChildIndices := maps.Keys(childIndices) + slices.Sort(sortedChildIndices) var ( child1, child2 ids.ID ok1, ok2 bool ) - for childIndex := startIndex; merkledb.BranchFactor(childIndex) < branchFactor; childIndex++ { + for _, childIndex := range sortedChildIndices { if node1 != nil { child1, ok1 = node1.Children[childIndex] } diff --git a/x/sync/metrics.go b/x/sync/metrics.go index 881ca37282ef..fb27e6b45ffb 100644 --- a/x/sync/metrics.go +++ b/x/sync/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync diff --git a/x/sync/mock_client.go b/x/sync/mock_client.go index 153cfb5de5a7..98fa6d69fd9f 100644 --- a/x/sync/mock_client.go +++ b/x/sync/mock_client.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/x/sync (interfaces: Client) +// +// Generated by this command: +// +// mockgen -package=sync -destination=x/sync/mock_client.go github.com/ava-labs/avalanchego/x/sync Client +// // Package sync is a generated GoMock package. package sync @@ -49,7 +51,7 @@ func (m *MockClient) GetChangeProof(arg0 context.Context, arg1 *sync.SyncGetChan } // GetChangeProof indicates an expected call of GetChangeProof. -func (mr *MockClientMockRecorder) GetChangeProof(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockClientMockRecorder) GetChangeProof(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChangeProof", reflect.TypeOf((*MockClient)(nil).GetChangeProof), arg0, arg1, arg2) } @@ -64,7 +66,7 @@ func (m *MockClient) GetRangeProof(arg0 context.Context, arg1 *sync.SyncGetRange } // GetRangeProof indicates an expected call of GetRangeProof. -func (mr *MockClientMockRecorder) GetRangeProof(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockClientMockRecorder) GetRangeProof(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProof", reflect.TypeOf((*MockClient)(nil).GetRangeProof), arg0, arg1) } diff --git a/x/sync/mock_network_client.go b/x/sync/mock_network_client.go index 8021a015f062..428191492c4d 100644 --- a/x/sync/mock_network_client.go +++ b/x/sync/mock_network_client.go @@ -1,7 +1,12 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: x/sync/network_client.go - -// Package mock_sync is a generated GoMock package. +// Source: github.com/ava-labs/avalanchego/x/sync (interfaces: NetworkClient) +// +// Generated by this command: +// +// mockgen -package=sync -destination=x/sync/mock_network_client.go github.com/ava-labs/avalanchego/x/sync NetworkClient +// + +// Package sync is a generated GoMock package. package sync import ( @@ -45,7 +50,7 @@ func (m *MockNetworkClient) AppRequestFailed(arg0 context.Context, arg1 ids.Node } // AppRequestFailed indicates an expected call of AppRequestFailed. -func (mr *MockNetworkClientMockRecorder) AppRequestFailed(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockNetworkClientMockRecorder) AppRequestFailed(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequestFailed", reflect.TypeOf((*MockNetworkClient)(nil).AppRequestFailed), arg0, arg1, arg2) } @@ -59,7 +64,7 @@ func (m *MockNetworkClient) AppResponse(arg0 context.Context, arg1 ids.NodeID, a } // AppResponse indicates an expected call of AppResponse. -func (mr *MockNetworkClientMockRecorder) AppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockNetworkClientMockRecorder) AppResponse(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppResponse", reflect.TypeOf((*MockNetworkClient)(nil).AppResponse), arg0, arg1, arg2, arg3) } @@ -73,7 +78,7 @@ func (m *MockNetworkClient) Connected(arg0 context.Context, arg1 ids.NodeID, arg } // Connected indicates an expected call of Connected. -func (mr *MockNetworkClientMockRecorder) Connected(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockNetworkClientMockRecorder) Connected(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connected", reflect.TypeOf((*MockNetworkClient)(nil).Connected), arg0, arg1, arg2) } @@ -87,30 +92,30 @@ func (m *MockNetworkClient) Disconnected(arg0 context.Context, arg1 ids.NodeID) } // Disconnected indicates an expected call of Disconnected. -func (mr *MockNetworkClientMockRecorder) Disconnected(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockNetworkClientMockRecorder) Disconnected(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnected", reflect.TypeOf((*MockNetworkClient)(nil).Disconnected), arg0, arg1) } // Request mocks base method. -func (m *MockNetworkClient) Request(ctx context.Context, nodeID ids.NodeID, request []byte) ([]byte, error) { +func (m *MockNetworkClient) Request(arg0 context.Context, arg1 ids.NodeID, arg2 []byte) ([]byte, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Request", ctx, nodeID, request) + ret := m.ctrl.Call(m, "Request", arg0, arg1, arg2) ret0, _ := ret[0].([]byte) ret1, _ := ret[1].(error) return ret0, ret1 } // Request indicates an expected call of Request. -func (mr *MockNetworkClientMockRecorder) Request(ctx, nodeID, request interface{}) *gomock.Call { +func (mr *MockNetworkClientMockRecorder) Request(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Request", reflect.TypeOf((*MockNetworkClient)(nil).Request), ctx, nodeID, request) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Request", reflect.TypeOf((*MockNetworkClient)(nil).Request), arg0, arg1, arg2) } // RequestAny mocks base method. -func (m *MockNetworkClient) RequestAny(ctx context.Context, minVersion *version.Application, request []byte) (ids.NodeID, []byte, error) { +func (m *MockNetworkClient) RequestAny(arg0 context.Context, arg1 *version.Application, arg2 []byte) (ids.NodeID, []byte, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RequestAny", ctx, minVersion, request) + ret := m.ctrl.Call(m, "RequestAny", arg0, arg1, arg2) ret0, _ := ret[0].(ids.NodeID) ret1, _ := ret[1].([]byte) ret2, _ := ret[2].(error) @@ -118,19 +123,7 @@ func (m *MockNetworkClient) RequestAny(ctx context.Context, minVersion *version. } // RequestAny indicates an expected call of RequestAny. -func (mr *MockNetworkClientMockRecorder) RequestAny(ctx, minVersion, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestAny", reflect.TypeOf((*MockNetworkClient)(nil).RequestAny), ctx, minVersion, request) -} - -// TrackBandwidth mocks base method. -func (m *MockNetworkClient) TrackBandwidth(nodeID ids.NodeID, bandwidth float64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "TrackBandwidth", nodeID, bandwidth) -} - -// TrackBandwidth indicates an expected call of TrackBandwidth. -func (mr *MockNetworkClientMockRecorder) TrackBandwidth(nodeID, bandwidth interface{}) *gomock.Call { +func (mr *MockNetworkClientMockRecorder) RequestAny(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TrackBandwidth", reflect.TypeOf((*MockNetworkClient)(nil).TrackBandwidth), nodeID, bandwidth) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestAny", reflect.TypeOf((*MockNetworkClient)(nil).RequestAny), arg0, arg1, arg2) } diff --git a/x/sync/network_client.go b/x/sync/network_client.go index 65f939019d7f..22d7766f3f52 100644 --- a/x/sync/network_client.go +++ b/x/sync/network_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync @@ -17,6 +17,7 @@ import ( "golang.org/x/sync/semaphore" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" @@ -87,7 +88,7 @@ type networkClient struct { // controls maximum number of active outbound requests activeRequests *semaphore.Weighted // tracking of peers & bandwidth usage - peers *peerTracker + peers *p2p.PeerTracker // For sending messages to peers appSender common.AppSender } @@ -100,7 +101,7 @@ func NewNetworkClient( metricsNamespace string, registerer prometheus.Registerer, ) (NetworkClient, error) { - peerTracker, err := newPeerTracker(log, metricsNamespace, registerer) + peerTracker, err := p2p.NewPeerTracker(log, metricsNamespace, registerer) if err != nil { return nil, fmt.Errorf("failed to create peer tracker: %w", err) } diff --git a/x/sync/network_server.go b/x/sync/network_server.go index 6f21702ce397..f8c311964e05 100644 --- a/x/sync/network_server.go +++ b/x/sync/network_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync @@ -39,7 +39,6 @@ const ( // TODO: refine this estimate. This is almost certainly a large overestimate. estimatedMessageOverhead = 4 * units.KiB maxByteSizeLimit = constants.DefaultMaxMessageSize - estimatedMessageOverhead - endProofSizeBufferAmount = 2 * units.KiB ) var ( @@ -199,8 +198,15 @@ func (s *NetworkServer) HandleChangeProofRequest( changeProof, err := s.db.GetChangeProof(ctx, startRoot, endRoot, start, end, int(keyLimit)) if err != nil { if !errors.Is(err, merkledb.ErrInsufficientHistory) { + // We should only fail to get a change proof if we have insufficient history. + // Other errors are unexpected. return err } + if errors.Is(err, merkledb.ErrNoEndRoot) { + // [s.db] doesn't have [endRoot] in its history. + // We can't generate a change/range proof. Drop this request. + return nil + } // [s.db] doesn't have sufficient history to generate change proof. // Generate a range proof for the end root ID instead. @@ -391,6 +397,8 @@ func validateChangeProofRequest(req *pb.SyncGetChangeProofRequest) error { return errInvalidStartRootHash case len(req.EndRootHash) != hashing.HashLen: return errInvalidEndRootHash + case bytes.Equal(req.EndRootHash, ids.Empty[:]): + return merkledb.ErrEmptyProof case req.StartKey != nil && req.StartKey.IsNothing && len(req.StartKey.Value) > 0: return errInvalidStartKey case req.EndKey != nil && req.EndKey.IsNothing && len(req.EndKey.Value) > 0: @@ -412,6 +420,8 @@ func validateRangeProofRequest(req *pb.SyncGetRangeProofRequest) error { return errInvalidKeyLimit case len(req.RootHash) != ids.IDLen: return errInvalidRootHash + case bytes.Equal(req.RootHash, ids.Empty[:]): + return merkledb.ErrEmptyProof case req.StartKey != nil && req.StartKey.IsNothing && len(req.StartKey.Value) > 0: return errInvalidStartKey case req.EndKey != nil && req.EndKey.IsNothing && len(req.EndKey.Value) > 0: diff --git a/x/sync/network_server_test.go b/x/sync/network_server_test.go index 60555498457f..66135c0025c6 100644 --- a/x/sync/network_server_test.go +++ b/x/sync/network_server_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync @@ -93,6 +93,14 @@ func Test_Server_GetRangeProof(t *testing.T) { }, expectedMaxResponseBytes: defaultRequestByteSizeLimit, }, + "empty proof": { + request: &pb.SyncGetRangeProofRequest{ + RootHash: ids.Empty[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + proofNil: true, + }, } for name, test := range tests { @@ -114,7 +122,7 @@ func Test_Server_GetRangeProof(t *testing.T) { require.NoError(proto.Unmarshal(responseBytes, &proofProto)) var p merkledb.RangeProof - require.NoError(p.UnmarshalProto(&proofProto, merkledb.BranchFactor16)) + require.NoError(p.UnmarshalProto(&proofProto)) proof = &p } return nil @@ -252,7 +260,7 @@ func Test_Server_GetChangeProof(t *testing.T) { request: &pb.SyncGetChangeProofRequest{ // This root doesn't exist so server has insufficient history // to serve a change proof - StartRootHash: ids.Empty[:], + StartRootHash: fakeRootID[:], EndRootHash: endRoot[:], KeyLimit: defaultRequestKeyLimit, BytesLimit: defaultRequestByteSizeLimit, @@ -263,7 +271,7 @@ func Test_Server_GetChangeProof(t *testing.T) { "insufficient history for change proof or range proof": { request: &pb.SyncGetChangeProofRequest{ // These roots don't exist so server has insufficient history - // to serve a change proof + // to serve a change proof or range proof StartRootHash: ids.Empty[:], EndRootHash: fakeRootID[:], KeyLimit: defaultRequestKeyLimit, @@ -272,6 +280,16 @@ func Test_Server_GetChangeProof(t *testing.T) { expectedMaxResponseBytes: defaultRequestByteSizeLimit, proofNil: true, }, + "empt proof": { + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: fakeRootID[:], + EndRootHash: ids.Empty[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + expectedMaxResponseBytes: defaultRequestByteSizeLimit, + proofNil: true, + }, } for name, test := range tests { diff --git a/x/sync/response_handler.go b/x/sync/response_handler.go index 71e0c5f64580..624a3221fc9c 100644 --- a/x/sync/response_handler.go +++ b/x/sync/response_handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync diff --git a/x/sync/sync_test.go b/x/sync/sync_test.go index 9a6a5de2dba9..0d659b3d84c3 100644 --- a/x/sync/sync_test.go +++ b/x/sync/sync_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync @@ -102,14 +102,17 @@ func Test_Completion(t *testing.T) { newDefaultDBConfig(), ) require.NoError(err) + emptyRoot, err := emptyDB.GetMerkleRoot(context.Background()) require.NoError(err) + db, err := merkledb.New( context.Background(), memdb.New(), newDefaultDBConfig(), ) require.NoError(err) + syncer, err := NewManager(ManagerConfig{ DB: db, Client: newCallthroughSyncClient(ctrl, emptyDB), @@ -120,8 +123,10 @@ func Test_Completion(t *testing.T) { }) require.NoError(err) require.NotNil(syncer) + require.NoError(syncer.Start(context.Background())) require.NoError(syncer.Wait(context.Background())) + syncer.workLock.Lock() require.Zero(syncer.unprocessedWork.Len()) require.Equal(1, syncer.processedWork.Len()) @@ -332,25 +337,26 @@ func Test_Sync_FindNextKey_BranchInLocal(t *testing.T) { require.NoError(db.Put([]byte{0x11}, []byte{1})) require.NoError(db.Put([]byte{0x11, 0x11}, []byte{2})) - syncRoot, err := db.GetMerkleRoot(context.Background()) + targetRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) + proof, err := db.GetProof(context.Background(), []byte{0x11, 0x11}) require.NoError(err) syncer, err := NewManager(ManagerConfig{ DB: db, Client: NewMockClient(ctrl), - TargetRoot: syncRoot, + TargetRoot: targetRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, }) require.NoError(err) - require.NoError(db.Put([]byte{0x12}, []byte{4})) + require.NoError(db.Put([]byte{0x11, 0x15}, []byte{4})) nextKey, err := syncer.findNextKey(context.Background(), []byte{0x11, 0x11}, maybe.Some([]byte{0x20}), proof.Path) require.NoError(err) - require.Equal(maybe.Some([]byte{0x12}), nextKey) + require.Equal(maybe.Some([]byte{0x11, 0x15}), nextKey) } func Test_Sync_FindNextKey_BranchInReceived(t *testing.T) { @@ -365,27 +371,28 @@ func Test_Sync_FindNextKey_BranchInReceived(t *testing.T) { require.NoError(err) require.NoError(db.Put([]byte{0x11}, []byte{1})) require.NoError(db.Put([]byte{0x12}, []byte{2})) - require.NoError(db.Put([]byte{0x11, 0x11}, []byte{3})) + require.NoError(db.Put([]byte{0x12, 0xA0}, []byte{4})) - syncRoot, err := db.GetMerkleRoot(context.Background()) + targetRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - proof, err := db.GetProof(context.Background(), []byte{0x11, 0x11}) + + proof, err := db.GetProof(context.Background(), []byte{0x12}) require.NoError(err) syncer, err := NewManager(ManagerConfig{ DB: db, Client: NewMockClient(ctrl), - TargetRoot: syncRoot, + TargetRoot: targetRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, }) require.NoError(err) - require.NoError(db.Delete([]byte{0x12})) + require.NoError(db.Delete([]byte{0x12, 0xA0})) - nextKey, err := syncer.findNextKey(context.Background(), []byte{0x11, 0x11}, maybe.Some([]byte{0x20}), proof.Path) + nextKey, err := syncer.findNextKey(context.Background(), []byte{0x12}, maybe.Some([]byte{0x20}), proof.Path) require.NoError(err) - require.Equal(maybe.Some([]byte{0x12}), nextKey) + require.Equal(maybe.Some([]byte{0x12, 0xA0}), nextKey) } func Test_Sync_FindNextKey_ExtraValues(t *testing.T) { @@ -586,10 +593,11 @@ func TestFindNextKeyRandom(t *testing.T) { ) require.NoError(err) + config := newDefaultDBConfig() localDB, err := merkledb.New( context.Background(), memdb.New(), - newDefaultDBConfig(), + config, ) require.NoError(err) @@ -677,7 +685,7 @@ func TestFindNextKeyRandom(t *testing.T) { for _, node := range remoteProof.EndProof { for childIdx, childID := range node.Children { remoteKeyIDs = append(remoteKeyIDs, keyAndID{ - key: node.Key.Append(childIdx), + key: node.Key.Extend(merkledb.ToToken(childIdx, merkledb.BranchFactorToTokenSize[config.BranchFactor])), id: childID, }) } @@ -688,18 +696,18 @@ func TestFindNextKeyRandom(t *testing.T) { for _, node := range localProof.Path { for childIdx, childID := range node.Children { localKeyIDs = append(localKeyIDs, keyAndID{ - key: node.Key.Append(childIdx), + key: node.Key.Extend(merkledb.ToToken(childIdx, merkledb.BranchFactorToTokenSize[config.BranchFactor])), id: childID, }) } } // Sort in ascending order by key prefix. - serializedPathLess := func(i, j keyAndID) bool { - return i.key.Less(j.key) + serializedPathCompare := func(i, j keyAndID) int { + return i.key.Compare(j.key) } - slices.SortFunc(remoteKeyIDs, serializedPathLess) - slices.SortFunc(localKeyIDs, serializedPathLess) + slices.SortFunc(remoteKeyIDs, serializedPathCompare) + slices.SortFunc(localKeyIDs, serializedPathCompare) // Filter out keys that are before the last received key findBounds := func(keyIDs []keyAndID) (int, int) { @@ -737,7 +745,7 @@ func TestFindNextKeyRandom(t *testing.T) { for i := 0; i < len(remoteKeyIDs) && i < len(localKeyIDs); i++ { // See if the keys are different. smaller, bigger := remoteKeyIDs[i], localKeyIDs[i] - if serializedPathLess(localKeyIDs[i], remoteKeyIDs[i]) { + if serializedPathCompare(localKeyIDs[i], remoteKeyIDs[i]) == -1 { smaller, bigger = localKeyIDs[i], remoteKeyIDs[i] } @@ -1193,8 +1201,6 @@ func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen } i++ } - slices.SortFunc(allKeys, func(a, b []byte) bool { - return bytes.Compare(a, b) < 0 - }) + slices.SortFunc(allKeys, bytes.Compare) return db, allKeys, batch.Write() } diff --git a/x/sync/workheap.go b/x/sync/workheap.go index 76d438c92d17..b49a19372caf 100644 --- a/x/sync/workheap.go +++ b/x/sync/workheap.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync diff --git a/x/sync/workheap_test.go b/x/sync/workheap_test.go index 0a3262a9310f..d073ce5f9fdc 100644 --- a/x/sync/workheap_test.go +++ b/x/sync/workheap_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync import ( + "bytes" "math/rand" "testing" "time" @@ -13,7 +14,6 @@ import ( "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/maybe" ) @@ -199,7 +199,7 @@ func TestWorkHeapMergeInsertRandom(t *testing.T) { _, _ = rand.Read(bound) bounds = append(bounds, bound) } - utils.SortBytes(bounds) + slices.SortFunc(bounds, bytes.Compare) // Note that start < end for all ranges. // It is possible but extremely unlikely that