From 6bf1c62c416331e51236a99ba33e8424a78a2324 Mon Sep 17 00:00:00 2001 From: noot <36753753+noot@users.noreply.github.com> Date: Thu, 6 May 2021 15:15:01 -0400 Subject: [PATCH] chore(release): release v0.5.0 (#1562) --- .github/CODEOWNERS | 2 +- .github/CODE_OF_CONDUCT.md | 10 +- .github/CONTRIBUTING.md | 4 +- .github/ISSUE_TEMPLATE/bug_report.md | 6 +- .github/workflows/release.yml | 4 +- .github/workflows/tests.yml | 3 +- .golangci.yml | 4 +- Makefile | 2 +- README.md | 6 +- cmd/gossamer/config.go | 123 +++-- cmd/gossamer/config_test.go | 142 +++++- cmd/gossamer/flags.go | 7 +- cmd/gossamer/main.go | 65 ++- cmd/gossamer/main_test.go | 62 ++- cmd/gossamer/utils.go | 5 +- docs/docs/contributing.md | 4 +- docs/docs/getting-started/installation.md | 4 +- .../overview/host-architecture.md | 2 +- .../overview/package-library.md | 4 +- .../resources/general-resources.md | 2 +- docs/docs/integrate/connect-to-polkadot-js.md | 13 +- docs/docs/usage/command-line.md | 10 +- docs/docs/usage/import-runtime.md | 4 +- docs/docs/usage/running-nodes.md | 4 +- dot/build_spec.go | 24 +- dot/build_spec_test.go | 81 +++- dot/core/digest.go | 255 +++++------ dot/core/digest_test.go | 162 ++++--- dot/core/errors.go | 3 - dot/core/interface.go | 21 +- dot/core/service.go | 68 ++- dot/core/service_test.go | 2 +- dot/core/test_helpers.go | 29 +- dot/import.go | 4 +- dot/network/block_announce.go | 25 +- dot/network/block_announce_test.go | 7 +- dot/network/config.go | 9 + dot/network/connmgr.go | 23 +- dot/network/connmgr_test.go | 2 +- dot/network/gossip_test.go | 2 +- dot/network/host.go | 59 +-- dot/network/host_test.go | 33 +- dot/network/light_test.go | 4 +- dot/network/message.go | 19 +- dot/network/message_test.go | 9 +- dot/network/notifications.go | 258 ++++++----- dot/network/notifications_test.go | 49 +- dot/network/service.go | 169 ++++--- dot/network/service_test.go | 37 +- dot/network/state.go | 2 + dot/network/sync.go | 61 ++- dot/network/sync_justification.go | 59 +-- dot/network/sync_justification_test.go | 32 -- dot/network/sync_test.go | 8 +- dot/network/test_helpers.go | 32 +- dot/network/transaction.go | 6 +- dot/network/utils.go | 4 +- dot/node.go | 118 ++++- dot/node_test.go | 31 +- dot/rpc/dot_up_codec.go | 2 +- dot/rpc/http.go | 11 +- dot/rpc/modules/api.go | 6 +- dot/rpc/modules/author.go | 4 +- dot/rpc/modules/chain.go | 4 +- dot/rpc/modules/chain_test.go | 2 +- dot/rpc/modules/grandpa.go | 2 +- dot/rpc/modules/system_test.go | 4 + dot/rpc/subscription/listeners.go | 263 +++-------- dot/rpc/subscription/listeners_test.go | 161 +++++++ dot/rpc/subscription/messages.go | 50 +- dot/rpc/subscription/websocket.go | 219 ++++++++- dot/rpc/subscription/websocket_test.go | 265 +++++++++++ dot/rpc/websocket_test.go | 10 +- dot/services.go | 45 +- dot/services_test.go | 28 +- dot/state/base.go | 148 ++++++ dot/state/{db_test.go => base_test.go} | 64 +-- dot/state/block.go | 127 +++-- dot/state/block_data.go | 12 +- dot/state/block_notify.go | 50 +- dot/state/block_notify_test.go | 12 +- dot/state/block_test.go | 138 +++++- dot/state/db.go | 95 ---- dot/state/epoch.go | 50 +- dot/state/grandpa.go | 266 +++++++++++ dot/state/grandpa_test.go | 124 +++++ dot/state/initialize.go | 209 +++++++++ dot/state/service.go | 238 +++------- dot/state/service_test.go | 52 ++- dot/state/storage.go | 81 +--- dot/state/storage_notify.go | 127 +++-- dot/state/storage_notify_test.go | 145 +++--- dot/sync/interface.go | 7 +- dot/sync/syncer.go | 73 ++- dot/sync/syncer_test.go | 39 +- dot/telemetry/telemetry.go | 95 +++- dot/telemetry/telemetry_test.go | 96 +++- dot/types/block.go | 2 +- dot/types/grandpa.go | 114 +++++ dot/types/roles.go | 2 +- dot/utils.go | 14 + go.mod | 2 +- go.sum | 4 +- lib/babe/babe.go | 10 +- lib/babe/babe_test.go | 2 +- lib/babe/build.go | 143 ++---- lib/babe/build_test.go | 4 +- lib/babe/errors.go | 114 ++++- lib/babe/errors_test.go | 75 +++ lib/babe/verify_test.go | 2 +- lib/blocktree/blocktree.go | 12 +- lib/blocktree/blocktree_test.go | 14 +- lib/blocktree/node.go | 18 +- lib/blocktree/node_test.go | 14 +- lib/common/common.go | 27 ++ lib/common/common_test.go | 33 ++ lib/common/db_keys.go | 12 +- lib/common/optional/types.go | 23 +- lib/common/optional/types_test.go | 35 ++ lib/grandpa/errors.go | 105 +++-- lib/grandpa/grandpa.go | 320 ++++++------- lib/grandpa/grandpa_test.go | 100 ++-- lib/grandpa/message.go | 251 ++++++++-- lib/grandpa/message_handler.go | 249 ++++++++-- lib/grandpa/message_handler_test.go | 258 +++++++---- lib/grandpa/message_test.go | 86 ++-- lib/grandpa/network.go | 61 ++- lib/grandpa/network_test.go | 71 ++- lib/grandpa/round_test.go | 144 +++--- lib/grandpa/state.go | 15 +- lib/grandpa/types.go | 132 +++--- lib/grandpa/types_test.go | 46 +- lib/grandpa/vote_message.go | 34 +- lib/grandpa/vote_message_test.go | 27 +- lib/keystore/keyring.go | 4 +- lib/runtime/constants.go | 3 +- .../extrinsic/unchecked_extrinsic_test.go | 2 +- lib/runtime/life/exports.go | 3 +- lib/runtime/sig_verifier.go | 2 +- lib/runtime/storage/trie_test.go | 2 +- lib/runtime/wasmer/exports.go | 3 +- lib/runtime/wasmer/exports_test.go | 2 +- lib/runtime/wasmtime/exports.go | 2 + lib/scale/decode.go | 2 +- lib/scale/encode.go | 25 +- lib/trie/database_test.go | 2 +- lib/trie/trie_test.go | 4 +- scripts/install-lint.sh | 2 +- tests/polkadotjs_test/package-lock.json | 432 +++++++++--------- tests/polkadotjs_test/test_transaction.js | 5 +- tests/stress/errors.go | 4 +- tests/stress/grandpa_test.go | 10 +- tests/stress/helpers.go | 10 +- tests/stress/stress_test.go | 44 +- tests/sync/sync_test.go | 2 +- tests/utils/chain.go | 4 +- tests/utils/gossamer_utils.go | 14 +- 157 files changed, 5719 insertions(+), 2962 deletions(-) create mode 100644 dot/rpc/subscription/listeners_test.go create mode 100644 dot/rpc/subscription/websocket_test.go create mode 100644 dot/state/base.go rename dot/state/{db_test.go => base_test.go} (72%) delete mode 100644 dot/state/db.go create mode 100644 dot/state/grandpa.go create mode 100644 dot/state/grandpa_test.go create mode 100644 dot/state/initialize.go create mode 100644 lib/babe/errors_test.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8d0407785a..9c443d529a 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,4 +1,4 @@ # CODEOWNERS: https://help.github.com/articles/about-codeowners/ # Primary repo maintainers -* @noot @arijitAD @edwardmack \ No newline at end of file +* @noot @arijitAD @edwardmack @timwu20 \ No newline at end of file diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md index 2e3c5f0ec9..30f45bee28 100644 --- a/.github/CODE_OF_CONDUCT.md +++ b/.github/CODE_OF_CONDUCT.md @@ -11,7 +11,7 @@ appearance, race, religion, or sexual identity and orientation. ## Our Standards -Examples of behavior that contributes to creating a positive environment +Examples of behaviour that contributes to creating a positive environment include: * Using welcoming and inclusive language @@ -20,7 +20,7 @@ include: * Focusing on what is best for the community * Showing empathy towards other community members -Examples of unacceptable behavior by participants include: +Examples of unacceptable behaviour by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances @@ -34,13 +34,13 @@ Examples of unacceptable behavior by participants include: ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. +behaviour and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behaviour. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, +permanently any contributor for other behaviours that they deem inappropriate, threatening, offensive, or harmful. ## Scope diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index e34ef3b777..0f54b02bea 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -44,7 +44,7 @@ $ git remote -v (you should see myrepo and gossamer in the list of remotes) To start, check out our open issues. We recommend starting with an [issue labeled `Good First Issue`](https://github.com/ChainSafe/gossamer/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22). Leave a comment to let us know that you would like to work on it. -Another option is to improve gossamer where you see fit based on your evaluation of our code. In order to best faciliate collaboration, please create an issue before you start working on it. +Another option is to improve gossamer where you see fit based on your evaluation of our code. In order to best facilitate collaboration, please create an issue before you start working on it. **6. Make improvements to the code.** @@ -76,7 +76,7 @@ Navigate your browser to [https://github.com/ChainSafe/gossamer](https://github. ## Note on memory intensive tests Unfortunately, the free tier for CI's have a memory cap and some tests will cause the CI to experience an out of memory error. -In order to mitigate this we have introduced the concept of **short tests**. If your PR causes an out of memory error please seperate the tests into two groups +In order to mitigate this we have introduced the concept of **short tests**. If your PR causes an out of memory error please separate the tests into two groups like below and make sure to label it `large`: ``` diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 8f2ba47ecb..8bbc88a043 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -30,10 +30,10 @@ If you're suggesting a change/improvement, tell us how it should work. @@ -55,7 +55,7 @@ submit a PR and you'll get credit for the whole thing. ## To Reproduce -Steps to reproduce the behavior: +Steps to reproduce the behaviour: 1. 2. diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 63c2de2ca0..4240534bc1 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,7 +1,7 @@ name: Release on: push: - branches: [main, development] + branches: [main] jobs: release: @@ -23,4 +23,4 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} NPM_TOKEN: ${{ secrets.NPM_TOKEN }} - run: npx semantic-release \ No newline at end of file + run: npx semantic-release diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index d984f4eb34..198161afd4 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -58,8 +58,7 @@ jobs: run: | go test ./... -short -coverprofile=coverage.out -covermode=atomic -timeout=20m - uses: codecov/codecov-action@v1 - with: - token: "89982880-a53b-4a3a-9bdd-3dc9c78bd190" + with: files: ./coverage.out flags: unit-tests name: coverage diff --git a/.golangci.yml b/.golangci.yml index dcb195c988..ee156bb62a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -41,7 +41,7 @@ run: # which files to skip: they will be analyzed, but issues from them # won't be reported. Default value is empty list, but there is - # no need to include all autogenerated files, we confidently recognize + # no need to include all autogenerated files, we confidently recognise # autogenerated files. If it's not please let us know. #skip-files: @@ -132,7 +132,7 @@ linters-settings: # Correct spellings using locale preferences for US or UK. # Default is to use a neutral variety of English. # Setting locale to US will correct the British spelling of 'colour' to 'color'. - locale: US + locale: UK ignore-words: - gossamer lll: diff --git a/Makefile b/Makefile index 8dceb3540b..73ca566dc0 100644 --- a/Makefile +++ b/Makefile @@ -87,7 +87,7 @@ build-debug: @echo " > \033[32mBuilding binary...\033[0m " GOBIN=$(PWD)/bin go run scripts/ci.go install-debug -## init: Initialize gossamer using the default genesis and toml configuration files +## init: Initialise gossamer using the default genesis and toml configuration files init: ./bin/gossamer --key alice init --genesis chain/gssmr/genesis.json --force diff --git a/README.md b/README.md index 362ecf9916..0e034e5ce8 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ make gossamer ### Run Default Node -initialize default node: +initialise default node: ``` ./bin/gossamer --chain gssmr init ``` @@ -87,7 +87,7 @@ Then, re-run the above steps. NOTE: this feature is for testing only; if you wis ### Run Kusama Node -initialize kusama node: +initialise kusama node: ``` ./bin/gossamer --chain kusama init ``` @@ -108,7 +108,7 @@ After it's finished bootstrapping, the node should begin to sync. ### Run Polkadot Node -initialize polkadot node: +initialise polkadot node: ``` ./bin/gossamer --chain polkadot init ``` diff --git a/cmd/gossamer/config.go b/cmd/gossamer/config.go index 0b47ff5721..1695c73e62 100644 --- a/cmd/gossamer/config.go +++ b/cmd/gossamer/config.go @@ -17,7 +17,6 @@ package main import ( - "encoding/binary" "fmt" "strconv" "strings" @@ -33,7 +32,7 @@ import ( "github.com/ChainSafe/gossamer/lib/runtime/life" "github.com/ChainSafe/gossamer/lib/runtime/wasmer" "github.com/ChainSafe/gossamer/lib/runtime/wasmtime" - "github.com/cosmos/go-bip39" + "github.com/ChainSafe/gossamer/lib/utils" log "github.com/ChainSafe/log15" "github.com/urfave/cli" @@ -132,7 +131,10 @@ func createDotConfig(ctx *cli.Context) (*dot.Config, error) { logger.Info("loaded package log configuration", "cfg", cfg.Log) // set global configuration values - setDotGlobalConfig(ctx, tomlCfg, &cfg.Global) + if err := setDotGlobalConfig(ctx, tomlCfg, &cfg.Global); err != nil { + logger.Error("failed to set global node configuration", "error", err) + return nil, err + } // set remaining cli configuration values setDotInitConfig(ctx, tomlCfg.Init, &cfg.Init) @@ -151,7 +153,7 @@ func createDotConfig(ctx *cli.Context) (*dot.Config, error) { return cfg, nil } -// createInitConfig creates the configuration required to initialize a dot node +// createInitConfig creates the configuration required to initialise a dot node func createInitConfig(ctx *cli.Context) (*dot.Config, error) { tomlCfg, cfg, err := setupConfigFromChain(ctx) if err != nil { @@ -160,7 +162,11 @@ func createInitConfig(ctx *cli.Context) (*dot.Config, error) { } // set global configuration values - setDotGlobalConfig(ctx, tomlCfg, &cfg.Global) + err = setDotGlobalConfig(ctx, tomlCfg, &cfg.Global) + if err != nil { + logger.Error("failed to set global node configuration", "error", err) + return nil, err + } // set log config err = setLogConfig(ctx, tomlCfg, &cfg.Global, &cfg.Log) @@ -196,7 +202,11 @@ func createImportStateConfig(ctx *cli.Context) (*dot.Config, error) { } // set global configuration values - setDotGlobalConfig(ctx, tomlCfg, &cfg.Global) + if err := setDotGlobalConfig(ctx, tomlCfg, &cfg.Global); err != nil { + logger.Error("failed to set global node configuration", "error", err) + return nil, err + } + return cfg, nil } @@ -210,7 +220,11 @@ func createBuildSpecConfig(ctx *cli.Context) (*dot.Config, error) { } // set global configuration values - setDotGlobalConfig(ctx, tomlCfg, &cfg.Global) + if err := setDotGlobalConfig(ctx, tomlCfg, &cfg.Global); err != nil { + logger.Error("failed to set global node configuration", "error", err) + return nil, err + } + return cfg, nil } @@ -229,7 +243,11 @@ func createExportConfig(ctx *cli.Context) (*dot.Config, error) { updateDotConfigFromGenesisJSONRaw(*tomlCfg, cfg) // set global configuration values - setDotGlobalConfig(ctx, tomlCfg, &cfg.Global) + err = setDotGlobalConfig(ctx, tomlCfg, &cfg.Global) + if err != nil { + logger.Error("failed to set global node configuration", "error", err) + return nil, err + } // set log config err = setLogConfig(ctx, &ctoml.Config{}, &cfg.Global, &cfg.Log) @@ -385,13 +403,27 @@ func setDotInitConfig(ctx *cli.Context, tomlCfg ctoml.InitConfig, cfg *dot.InitC ) } -// setDotGlobalConfig sets dot.GlobalConfig using flag values from the cli context -func setDotGlobalConfig(ctx *cli.Context, tomlCfg *ctoml.Config, cfg *dot.GlobalConfig) { - if tomlCfg != nil { - if tomlCfg.Global.Name != "" { - cfg.Name = tomlCfg.Global.Name - } +func setDotGlobalConfig(ctx *cli.Context, tomlConfig *ctoml.Config, cfg *dot.GlobalConfig) error { + setDotGlobalConfigFromToml(tomlConfig, cfg) + setDotGlobalConfigFromFlags(ctx, cfg) + + if err := setDotGlobalConfigName(ctx, tomlConfig, cfg); err != nil { + return fmt.Errorf("could not set global node name: %w", err) + } + logger.Debug( + "global configuration", + "name", cfg.Name, + "id", cfg.ID, + "basepath", cfg.BasePath, + ) + + return nil +} + +// setDotGlobalConfigFromToml will apply the toml configs to dot global config +func setDotGlobalConfigFromToml(tomlCfg *ctoml.Config, cfg *dot.GlobalConfig) { + if tomlCfg != nil { if tomlCfg.Global.ID != "" { cfg.ID = tomlCfg.Global.ID } @@ -406,20 +438,10 @@ func setDotGlobalConfig(ctx *cli.Context, tomlCfg *ctoml.Config, cfg *dot.Global cfg.MetricsPort = tomlCfg.Global.MetricsPort } +} - // TODO: generate random name if one is not assigned (see issue #1496) - // check --name flag and update node configuration - if name := ctx.GlobalString(NameFlag.Name); name != "" { - cfg.Name = name - } else { - // generate random name - entropy, _ := bip39.NewEntropy(128) - randomNamesString, _ := bip39.NewMnemonic(entropy) - randomNames := strings.Split(randomNamesString, " ") - number := binary.BigEndian.Uint16(entropy) - cfg.Name = randomNames[0] + "-" + randomNames[1] + "-" + fmt.Sprint(number) - } - +// setDotGlobalConfigFromFlags sets dot.GlobalConfig using flag values from the cli context +func setDotGlobalConfigFromFlags(ctx *cli.Context, cfg *dot.GlobalConfig) { // check --basepath flag and update node configuration if basepath := ctx.GlobalString(BasePathFlag.Name); basepath != "" { cfg.BasePath = basepath @@ -429,6 +451,7 @@ func setDotGlobalConfig(ctx *cli.Context, tomlCfg *ctoml.Config, cfg *dot.Global if cfg.BasePath == "" { cfg.BasePath = dot.GssmrConfig().Global.BasePath } + // check --log flag if lvlToInt, err := strconv.Atoi(ctx.String(LogFlag.Name)); err == nil { cfg.LogLvl = log.Lvl(lvlToInt) @@ -444,13 +467,39 @@ func setDotGlobalConfig(ctx *cli.Context, tomlCfg *ctoml.Config, cfg *dot.Global } cfg.NoTelemetry = ctx.Bool("no-telemetry") +} - logger.Debug( - "global configuration", - "name", cfg.Name, - "id", cfg.ID, - "basepath", cfg.BasePath, - ) +func setDotGlobalConfigName(ctx *cli.Context, tomlCfg *ctoml.Config, cfg *dot.GlobalConfig) error { + globalBasePath := utils.ExpandDir(cfg.BasePath) + initialised := dot.NodeInitialized(globalBasePath, false) + + // consider the --name flag as higher priority + if ctx.GlobalString(NameFlag.Name) != "" { + cfg.Name = ctx.GlobalString(NameFlag.Name) + return nil + } + + // consider the name on config as a second priority + if tomlCfg.Global.Name != "" { + cfg.Name = tomlCfg.Global.Name + return nil + } + + // if node was previously initialised and is not the init command + if initialised && ctx.Command.Name != initCommandName { + var err error + if cfg.Name, err = dot.LoadGlobalNodeName(globalBasePath); err != nil { + return err + } + + if cfg.Name != "" { + logger.Debug("load global node name from database", "name", cfg.Name) + return nil + } + } + + cfg.Name = dot.RandomNodeName() + return nil } // setDotAccountConfig sets dot.AccountConfig using flag values from the cli context @@ -736,9 +785,9 @@ func updateDotConfigFromGenesisJSONRaw(tomlCfg ctoml.Config, cfg *dot.Config) { ) } -// updateDotConfigFromGenesisData updates the configuration from genesis data of an initialized node +// updateDotConfigFromGenesisData updates the configuration from genesis data of an initialised node func updateDotConfigFromGenesisData(ctx *cli.Context, cfg *dot.Config) error { - // initialize database using data directory + // initialise database using data directory db, err := chaindb.NewBadgerDB(&chaindb.Config{ DataDir: cfg.Global.BasePath, }) @@ -746,8 +795,8 @@ func updateDotConfigFromGenesisData(ctx *cli.Context, cfg *dot.Config) error { return fmt.Errorf("failed to create database: %s", err) } - // load genesis data from initialized node database - gen, err := state.LoadGenesisData(db) + // load genesis data from initialised node database + gen, err := state.NewBaseState(db).LoadGenesisData() if err != nil { return fmt.Errorf("failed to load genesis data: %s", err) } diff --git a/cmd/gossamer/config_test.go b/cmd/gossamer/config_test.go index c0485bc8a5..c369a322f7 100644 --- a/cmd/gossamer/config_test.go +++ b/cmd/gossamer/config_test.go @@ -23,6 +23,7 @@ import ( "github.com/ChainSafe/gossamer/chain/gssmr" "github.com/ChainSafe/gossamer/dot" "github.com/ChainSafe/gossamer/dot/state" + "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/genesis" "github.com/ChainSafe/gossamer/lib/utils" @@ -822,7 +823,7 @@ func TestUpdateConfigFromGenesisData(t *testing.T) { gen, err := genesis.NewGenesisFromJSONRaw(genFile.Name()) require.Nil(t, err) - err = state.StoreGenesisData(db, gen.GenesisData()) + err = state.NewBaseState(db).StoreGenesisData(gen.GenesisData()) require.Nil(t, err) err = db.Close() @@ -833,3 +834,142 @@ func TestUpdateConfigFromGenesisData(t *testing.T) { require.Equal(t, expected, cfg) } + +func TestGlobalNodeName_WhenNodeAlreadyHasStoredName(t *testing.T) { + // Initialise a node with a random name + globalName := dot.RandomNodeName() + + cfg := dot.NewTestConfig(t) + cfg.Global.Name = globalName + require.NotNil(t, cfg) + + genPath := dot.NewTestGenesisAndRuntime(t) + require.NotNil(t, genPath) + + defer utils.RemoveTestDir(t) + + cfg.Core.Roles = types.FullNodeRole + cfg.Core.BabeAuthority = false + cfg.Core.GrandpaAuthority = false + cfg.Core.BabeThresholdNumerator = 0 + cfg.Core.BabeThresholdDenominator = 0 + cfg.Init.Genesis = genPath + + err := dot.InitNode(cfg) + require.NoError(t, err) + + // call another command and test the name + testApp := cli.NewApp() + testApp.Writer = ioutil.Discard + + testcases := []struct { + description string + flags []string + values []interface{} + expected string + }{ + { + "Test gossamer --roles --basepath", + []string{"basepath", "roles"}, + []interface{}{cfg.Global.BasePath, "4"}, + globalName, + }, + { + "Test gossamer --roles", + []string{"basepath", "roles"}, + []interface{}{cfg.Global.BasePath, "0"}, + globalName, + }, + } + + for _, c := range testcases { + c := c // bypass scopelint false positive + t.Run(c.description, func(t *testing.T) { + ctx, err := newTestContext(c.description, c.flags, c.values) + require.Nil(t, err) + createdCfg, err := createDotConfig(ctx) + require.Nil(t, err) + require.Equal(t, c.expected, createdCfg.Global.Name) + }) + } +} + +func TestGlobalNodeNamePriorityOrder(t *testing.T) { + cfg, testCfgFile := newTestConfigWithFile(t) + require.NotNil(t, cfg) + require.NotNil(t, testCfgFile) + + defer utils.RemoveTestDir(t) + + // call another command and test the name + testApp := cli.NewApp() + testApp.Writer = ioutil.Discard + + // when name flag is defined + whenNameFlagIsDefined := struct { + description string + flags []string + values []interface{} + expected string + }{ + "Test gossamer --basepath --name --config", + []string{"basepath", "name", "config"}, + []interface{}{cfg.Global.BasePath, "mydefinedname", testCfgFile.Name()}, + "mydefinedname", + } + + c := whenNameFlagIsDefined + t.Run(c.description, func(t *testing.T) { + ctx, err := newTestContext(c.description, c.flags, c.values) + require.Nil(t, err) + createdCfg, err := createDotConfig(ctx) + require.Nil(t, err) + require.Equal(t, c.expected, createdCfg.Global.Name) + }) + + // when name flag is not defined + // then should load name from toml if it exists + whenNameIsDefinedOnTomlConfig := struct { + description string + flags []string + values []interface{} + expected string + }{ + "Test gossamer --basepath --config", + []string{"basepath", "config"}, + []interface{}{cfg.Global.BasePath, testCfgFile.Name()}, + cfg.Global.Name, + } + + c = whenNameIsDefinedOnTomlConfig + t.Run(c.description, func(t *testing.T) { + ctx, err := newTestContext(c.description, c.flags, c.values) + require.Nil(t, err) + createdCfg, err := createDotConfig(ctx) + require.Nil(t, err) + require.Equal(t, c.expected, createdCfg.Global.Name) + }) + + // when there is no name flag and no name in config + // should check the load is initialised or generate a new random name + cfg.Global.Name = "" + + whenThereIsNoName := struct { + description string + flags []string + values []interface{} + }{ + "Test gossamer --basepath", + []string{"basepath"}, + []interface{}{cfg.Global.BasePath}, + } + + t.Run(c.description, func(t *testing.T) { + ctx, err := newTestContext(whenThereIsNoName.description, whenThereIsNoName.flags, whenThereIsNoName.values) + require.Nil(t, err) + createdCfg, err := createDotConfig(ctx) + require.Nil(t, err) + require.NotEmpty(t, createdCfg.Global.Name) + require.NotEqual(t, cfg.Global.Name, createdCfg.Global.Name) + }) +} diff --git a/cmd/gossamer/flags.go b/cmd/gossamer/flags.go index 8708433a73..41f36b99de 100644 --- a/cmd/gossamer/flags.go +++ b/cmd/gossamer/flags.go @@ -141,6 +141,10 @@ var ( Name: "genesis-spec", Usage: "Path to human-readable genesis JSON file", } + OutputSpecFlag = cli.StringFlag{ + Name: "output", + Usage: "Path to output the recently created genesis JSON file", + } ) // Network service configuration flags @@ -321,6 +325,7 @@ var ( BuildSpecFlags = append([]cli.Flag{ RawFlag, GenesisSpecFlag, + OutputSpecFlag, }, GlobalFlags...) // ExportFlags are the flags that are valid for use with the export subcommand @@ -354,7 +359,7 @@ var ( // FixFlagOrder allow us to use various flag order formats (ie, `gossamer init // --config config.toml` and `gossamer --config config.toml init`). FixFlagOrder // only fixes global flags, all local flags must come after the subcommand (ie, -// `gossamer --force --config config.toml init` will not recognize `--force` but +// `gossamer --force --config config.toml init` will not recognise `--force` but // `gossamer init --force --config config.toml` will work as expected). func FixFlagOrder(f func(ctx *cli.Context) error) func(*cli.Context) error { return func(ctx *cli.Context) error { diff --git a/cmd/gossamer/main.go b/cmd/gossamer/main.go index 92c748de2b..b6d18f5a68 100644 --- a/cmd/gossamer/main.go +++ b/cmd/gossamer/main.go @@ -28,6 +28,15 @@ import ( "github.com/urfave/cli" ) +const ( + accountCommandName = "account" + exportCommandName = "export" + initCommandName = "init" + buildSpecCommandName = "build-spec" + importRuntimeCommandName = "import-runtime" + importStateCommandName = "import-state" +) + // app is the cli application var app = cli.NewApp() var logger = log.New("pkg", "cmd") @@ -36,7 +45,7 @@ var ( // exportCommand defines the "export" subcommand (ie, `gossamer export`) exportCommand = cli.Command{ Action: FixFlagOrder(exportAction), - Name: "export", + Name: exportCommandName, Usage: "Export configuration values to TOML configuration file", ArgsUsage: "", Flags: ExportFlags, @@ -47,18 +56,18 @@ var ( // initCommand defines the "init" subcommand (ie, `gossamer init`) initCommand = cli.Command{ Action: FixFlagOrder(initAction), - Name: "init", - Usage: "Initialize node databases and load genesis data to state", + Name: initCommandName, + Usage: "Initialise node databases and load genesis data to state", ArgsUsage: "", Flags: InitFlags, Category: "INIT", - Description: "The init command initializes the node databases and loads the genesis data from the genesis file to state.\n" + + Description: "The init command initialises the node databases and loads the genesis data from the genesis file to state.\n" + "\tUsage: gossamer init --genesis genesis.json", } // accountCommand defines the "account" subcommand (ie, `gossamer account`) accountCommand = cli.Command{ Action: FixFlagOrder(accountAction), - Name: "account", + Name: accountCommandName, Usage: "Create and manage node keystore accounts", Flags: AccountFlags, Category: "ACCOUNT", @@ -72,7 +81,7 @@ var ( // buildSpecCommand creates a raw genesis file from a human readable genesis file. buildSpecCommand = cli.Command{ Action: FixFlagOrder(buildSpecAction), - Name: "build-spec", + Name: buildSpecCommandName, Usage: "Generates genesis JSON data, and can convert to raw genesis data", ArgsUsage: "", Flags: BuildSpecFlags, @@ -86,7 +95,7 @@ var ( // importRuntime generates a genesis file given a .wasm runtime binary. importRuntimeCommand = cli.Command{ Action: FixFlagOrder(importRuntimeAction), - Name: "import-runtime", + Name: importRuntimeCommandName, Usage: "Generates a genesis file given a .wasm runtime binary", ArgsUsage: "", Flags: RootFlags, @@ -97,7 +106,7 @@ var ( importStateCommand = cli.Command{ Action: FixFlagOrder(importStateAction), - Name: "import-state", + Name: importStateCommandName, Usage: "Import state from a JSON file and set it as the chain head state", ArgsUsage: "", Flags: ImportStateFlags, @@ -108,7 +117,7 @@ var ( } ) -// init initializes the cli application +// init initialises the cli application func init() { app.Action = gossamerAction app.Copyright = "Copyright 2019 ChainSafe Systems Authors" @@ -181,7 +190,7 @@ func importRuntimeAction(ctx *cli.Context) error { } // gossamerAction is the root action for the gossamer command, creates a node -// configuration, loads the keystore, initializes the node if not initialized, +// configuration, loads the keystore, initialises the node if not initialised, // then creates and starts the node and node services func gossamerAction(ctx *cli.Context) error { // check for unknown command arguments @@ -216,13 +225,13 @@ func gossamerAction(ctx *cli.Context) error { // from createDotConfig because dot config should not include expanded path) cfg.Global.BasePath = utils.ExpandDir(cfg.Global.BasePath) - // check if node has not been initialized (expected true - add warning log) + // check if node has not been initialised (expected true - add warning log) if !dot.NodeInitialized(cfg.Global.BasePath, true) { - // initialize node (initialize state database and load genesis data) + // initialise node (initialise state database and load genesis data) err = dot.InitNode(cfg) if err != nil { - logger.Error("failed to initialize node", "error", err) + logger.Error("failed to initialise node", "error", err) return err } } @@ -289,7 +298,7 @@ func gossamerAction(ctx *cli.Context) error { return nil } -// initAction is the action for the "init" subcommand, initializes the trie and +// initAction is the action for the "init" subcommand, initialises the trie and // state databases and loads initial state from the configured genesis file func initAction(ctx *cli.Context) error { lvl, err := setupLogger(ctx) @@ -310,31 +319,31 @@ func initAction(ctx *cli.Context) error { // from createDotConfig because dot config should not include expanded path) cfg.Global.BasePath = utils.ExpandDir(cfg.Global.BasePath) - // check if node has been initialized (expected false - no warning log) + // check if node has been initialised (expected false - no warning log) if dot.NodeInitialized(cfg.Global.BasePath, false) { - // use --force value to force initialize the node + // use --force value to force initialise the node force := ctx.Bool(ForceFlag.Name) // prompt user to confirm reinitialization - if force || confirmMessage("Are you sure you want to reinitialize the node? [Y/n]") { + if force || confirmMessage("Are you sure you want to reinitialise the node? [Y/n]") { logger.Info( - "reinitializing node...", + "reinitialising node...", "basepath", cfg.Global.BasePath, ) } else { logger.Warn( - "exiting without reinitializing the node", + "exiting without reinitialising the node", "basepath", cfg.Global.BasePath, ) return nil // exit if reinitialization is not confirmed } } - // initialize node (initialize state database and load genesis data) + // initialise node (initialise state database and load genesis data) err = dot.InitNode(cfg) if err != nil { - logger.Error("failed to initialize node", "error", err) + logger.Error("failed to initialise node", "error", err) return err } @@ -353,6 +362,7 @@ func buildSpecAction(ctx *cli.Context) error { } var bs *dot.BuildSpec + if genesis := ctx.String(GenesisSpecFlag.Name); genesis != "" { bspec, e := dot.BuildFromGenesis(genesis, 0) if e != nil { @@ -380,17 +390,24 @@ func buildSpecAction(ctx *cli.Context) error { } var res []byte + if ctx.Bool(RawFlag.Name) { res, err = bs.ToJSONRaw() } else { res, err = bs.ToJSON() } + if err != nil { return err } - // TODO implement --output flag so that user can specify redirecting output a file. - // then this can be removed (See issue #1029) - fmt.Printf("%s", res) + + if outputPath := ctx.String(OutputSpecFlag.Name); outputPath != "" { + if err = dot.WriteGenesisSpecFile(res, outputPath); err != nil { + return err + } + } else { + fmt.Printf("%s", res) + } return nil } diff --git a/cmd/gossamer/main_test.go b/cmd/gossamer/main_test.go index 65c0fc7652..c09ef05742 100644 --- a/cmd/gossamer/main_test.go +++ b/cmd/gossamer/main_test.go @@ -32,6 +32,7 @@ import ( "text/template" "time" + "github.com/ChainSafe/gossamer/dot" "github.com/ChainSafe/gossamer/lib/utils" "github.com/docker/docker/pkg/reexec" "github.com/stretchr/testify/require" @@ -250,7 +251,7 @@ func TestGossamerCommand(t *testing.T) { t.Log("init gossamer output, ", "stdout", string(stdout), "stderr", string(stderr)) expectedMessages := []string{ - "node initialized", + "node initialised", } for _, m := range expectedMessages { @@ -281,7 +282,66 @@ func TestGossamerCommand(t *testing.T) { require.NotContains(t, string(stderr), m) } } +} + +func TestInitCommand_RenameNodeWhenCalled(t *testing.T) { + genesisPath := utils.GetGssmrGenesisRawPath() + + tempDir, err := ioutil.TempDir("", "gossamer-maintest-") + require.Nil(t, err) + + nodeName := dot.RandomNodeName() + init := runTestGossamer(t, + "init", + "--basepath", tempDir, + "--genesis", genesisPath, + "--name", nodeName, + "--config", defaultGssmrConfigPath, + "--force", + ) + + stdout, stderr := init.GetOutput() + require.Nil(t, err) + + t.Log("init gossamer output, ", "stdout", string(stdout), "stderr", string(stderr)) + + // should contains the name defined in name flag + require.Contains(t, string(stdout), nodeName) + + init = runTestGossamer(t, + "init", + "--basepath", tempDir, + "--genesis", genesisPath, + "--config", defaultGssmrConfigPath, + "--force", + ) + + stdout, stderr = init.GetOutput() + require.Nil(t, err) + + t.Log("init gossamer output, ", "stdout", string(stdout), "stderr", string(stderr)) + + // should not contains the name from the last init + require.NotContains(t, string(stdout), nodeName) +} + +func TestBuildSpecCommandWithOutput(t *testing.T) { + tmpOutputfile := "/tmp/raw-genesis-spec-output.json" + buildSpecCommand := runTestGossamer(t, + "build-spec", + "--raw", + "--genesis-spec", "../../chain/gssmr/genesis-spec.json", + "--output", tmpOutputfile) + + time.Sleep(5 * time.Second) + + _, err := os.Stat(tmpOutputfile) + require.False(t, os.IsNotExist(err)) + defer os.Remove(tmpOutputfile) + outb, errb := buildSpecCommand.GetOutput() + require.Empty(t, outb) + require.Empty(t, errb) } // TODO: TestExportCommand test "gossamer export" does not error diff --git a/cmd/gossamer/utils.go b/cmd/gossamer/utils.go index dad18746f5..b5feb371ab 100644 --- a/cmd/gossamer/utils.go +++ b/cmd/gossamer/utils.go @@ -35,6 +35,8 @@ import ( "golang.org/x/crypto/ssh/terminal" //nolint ) +const confirmCharacter = "Y" + // setupLogger sets up the gossamer logger func setupLogger(ctx *cli.Context) (log.Lvl, error) { handler := log.StreamHandler(os.Stdout, log.TerminalFormat()) @@ -76,7 +78,7 @@ func confirmMessage(msg string) bool { for { text, _ := reader.ReadString('\n') text = strings.ReplaceAll(text, "\n", "") - return strings.Compare("Y", text) == 0 + return strings.Compare(confirmCharacter, strings.ToUpper(text)) == 0 } } @@ -124,7 +126,6 @@ func newTestConfigWithFile(t *testing.T) (*dot.Config, *os.File) { require.NoError(t, err) tomlCfg := dotConfigToToml(cfg) - cfgFile := exportConfig(tomlCfg, file.Name()) return cfg, cfgFile } diff --git a/docs/docs/contributing.md b/docs/docs/contributing.md index 6e808861c6..d153a32b00 100644 --- a/docs/docs/contributing.md +++ b/docs/docs/contributing.md @@ -44,7 +44,7 @@ $ git remote -v (you should see myrepo and gossamer in the list of remotes) To start, check out our open issues. We recommend starting with an [issue labeled `Good First Issue`](https://github.com/ChainSafe/gossamer/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22). Leave a comment to let us know that you would like to work on it. -Another option is to improve gossamer where you see fit based on your evaluation of our code. In order to best faciliate collaboration, please create an issue before you start working on it. +Another option is to improve gossamer where you see fit based on your evaluation of our code. In order to best facilitate collaboration, please create an issue before you start working on it. **6. Make improvements to the code.** @@ -76,7 +76,7 @@ Navigate your browser to [https://github.com/ChainSafe/gossamer](https://github. ## Note on memory intensive tests Unfortunately, the free tier for CI's have a memory cap and some tests will cause the CI to experience an out of memory error. -In order to mitigate this we have introduced the concept of **short tests**. If your PR causes an out of memory error please seperate the tests into two groups +In order to mitigate this we have introduced the concept of **short tests**. If your PR causes an out of memory error please separate the tests into two groups like below and make sure to label it `large`: ``` diff --git a/docs/docs/getting-started/installation.md b/docs/docs/getting-started/installation.md index b25d04197a..358775e051 100644 --- a/docs/docs/getting-started/installation.md +++ b/docs/docs/getting-started/installation.md @@ -25,7 +25,7 @@ make gossamer ## Run a Gossamer Node -To run default Gossamer node, first initialize the node. This writes the genesis state to the database. +To run default Gossamer node, first initialise the node. This writes the genesis state to the database. ``` ./bin/gossamer --chain gssmr init ``` @@ -77,7 +77,7 @@ After it's finished bootstrapping, the node should begin to sync. ## Run Polkadot Node -Initialize polkadot node: +Initialise polkadot node: ``` ./bin/gossamer --chain polkadot init ``` diff --git a/docs/docs/getting-started/overview/host-architecture.md b/docs/docs/getting-started/overview/host-architecture.md index ab147dcc51..aeb4f4d4ce 100644 --- a/docs/docs/getting-started/overview/host-architecture.md +++ b/docs/docs/getting-started/overview/host-architecture.md @@ -48,7 +48,7 @@ type Service interface { ### Core Service -The **core service** is responsible for block production and finalization (consensus) and processing messages received from the **network service**; it initializes BABE sessions and GRANDPA rounds and validates blocks and transactions before committing them to the **state service**. +The **core service** is responsible for block production and finalisation (consensus) and processing messages received from the **network service**; it initialises BABE sessions and GRANDPA rounds and validates blocks and transactions before committing them to the **state service**. - only the **core service** writes to block state - only the **core service** writes to storage state diff --git a/docs/docs/getting-started/overview/package-library.md b/docs/docs/getting-started/overview/package-library.md index 347a3caaf3..d0d8b30e8e 100644 --- a/docs/docs/getting-started/overview/package-library.md +++ b/docs/docs/getting-started/overview/package-library.md @@ -42,7 +42,7 @@ Gossamer packages can be categorized into **four package types**: #### `dot/core` -- The **core package** implements the [Core Service](/getting-started/overview/host-architecture#core-service) - responsible for block production and block finalization (consensus) and processing messages received from the [Network Service](/getting-started/overview/host-architecture/#network-service). +- The **core package** implements the [Core Service](/getting-started/overview/host-architecture#core-service) - responsible for block production and block finalisation (consensus) and processing messages received from the [Network Service](/getting-started/overview/host-architecture/#network-service). #### `dot/network` @@ -72,7 +72,7 @@ Gossamer packages can be categorized into **four package types**: #### `lib/blocktree` -- the **blocktree package** implements the blocktree, a data structure which tracks the chain and all its non-finalized forks. +- the **blocktree package** implements the blocktree, a data structure which tracks the chain and all its non-finalised forks. #### `lib/common` diff --git a/docs/docs/getting-started/resources/general-resources.md b/docs/docs/getting-started/resources/general-resources.md index 56854b0644..b9198bcaea 100644 --- a/docs/docs/getting-started/resources/general-resources.md +++ b/docs/docs/getting-started/resources/general-resources.md @@ -34,7 +34,7 @@ To start, it would be extremely beneficial to have an understanding of the Polka ## Polkadot Host -[_Please Note: The "Polkadot Host" was formerly known as the "Polkadot Runtime Enviornment"_] +[_Please Note: The "Polkadot Host" was formerly known as the "Polkadot Runtime Environment"_] The Polkadot Host plays two important roles within the Polkadot ecosystem. diff --git a/docs/docs/integrate/connect-to-polkadot-js.md b/docs/docs/integrate/connect-to-polkadot-js.md index 7c4d8b8cb2..1fb3cfd087 100644 --- a/docs/docs/integrate/connect-to-polkadot-js.md +++ b/docs/docs/integrate/connect-to-polkadot-js.md @@ -21,26 +21,25 @@ You'll need to setup the polkadot.js/apps to use a custom endpoint to connect to Once you've opened the app in your browser, you should see it connected to the Polkadot network: - + In the top left hand corner, click the logo to open the network selection modal: - + Next, at the bottom of this menu is a "Development" dropdown, click to open that - + Now you should see a text area with the label "custom endpoint", here you add your local node's websocket address, usually "ws://127.0.0.1:8586", click the Save icon on the right of the text box to save the endpoint. - + Finally, click the "Switch" button at the top of this modal: - + Congratulations, you've successfully connected to your Gossamer node! - - + diff --git a/docs/docs/usage/command-line.md b/docs/docs/usage/command-line.md index 1d3918c519..89d6934b3e 100644 --- a/docs/docs/usage/command-line.md +++ b/docs/docs/usage/command-line.md @@ -6,7 +6,7 @@ permalink: /usage/command-line/ ## Gossamer Command -The `gossamer` command is the root command for the `gossamer` package (`cmd/gossamer`). The root command starts the node (and initializes the node if the node has not already been initialized). +The `gossamer` command is the root command for the `gossamer` package (`cmd/gossamer`). The root command starts the node (and initialises the node if the node has not already been initialised). ### Accepted Formats @@ -69,7 +69,7 @@ SUBCOMMANDS: help, h Shows a list of commands or help for one command account Create and manage node keystore accounts export Export configuration values to TOML configuration file - init Initialize node databases and load genesis data to state + init Initialise node databases and load genesis data to state ``` List of ***local flags*** for `init` subcommand: @@ -188,15 +188,15 @@ Available built-in keys: ./bin/gossmer --key heather ``` -## Initializing Nodes +## initialising Nodes -To initialize or re-initialize a node, use the init subcommand `init`: +To initialise or re-initialise a node, use the init subcommand `init`: ``` ./bin/gossamer init ./bin/gossamer --key alice --roles 4 ``` -`init` can be used with the `--base-path` or `--config` flag to re-initialize a custom node (ie, `bob` from the example above): +`init` can be used with the `--base-path` or `--config` flag to re-initialise a custom node (ie, `bob` from the example above): ``` ./bin/gossamer --config node/gssmr/bob.toml init ``` diff --git a/docs/docs/usage/import-runtime.md b/docs/docs/usage/import-runtime.md index 7bed1f04cd..df39cd91d4 100644 --- a/docs/docs/usage/import-runtime.md +++ b/docs/docs/usage/import-runtime.md @@ -35,9 +35,9 @@ To create the raw genesis file used by the node, you can use the `gossamer build This creates a genesis file `genesis.json` that is usable by the node. -### 3. Initialize the node with the genesis file +### 3. Initialise the node with the genesis file -Next, you will need to write the state in `genesis.json` to the database by initializing the node. +Next, you will need to write the state in `genesis.json` to the database by initialising the node. ``` ./bin/gossamer init --genesis genesis.json diff --git a/docs/docs/usage/running-nodes.md b/docs/docs/usage/running-nodes.md index 3d44568e49..d32ab6907d 100644 --- a/docs/docs/usage/running-nodes.md +++ b/docs/docs/usage/running-nodes.md @@ -6,7 +6,7 @@ permalink: /usage/running-nodes/ ## Run a Gossamer Node -To run default Gossamer node, first initialize the node. This writes the genesis state to the database. +To run default Gossamer node, first initialise the node. This writes the genesis state to the database. ``` ./bin/gossamer --chain gssmr init ``` @@ -58,7 +58,7 @@ After it's finished bootstrapping, the node should begin to sync. ## Run Polkadot Node -Initialize polkadot node: +Initialise polkadot node: ``` ./bin/gossamer --chain polkadot init ``` diff --git a/dot/build_spec.go b/dot/build_spec.go index 6e2fb6e073..53e150f1e6 100644 --- a/dot/build_spec.go +++ b/dot/build_spec.go @@ -18,10 +18,14 @@ package dot import ( "encoding/json" + "fmt" + "os" + "path/filepath" "github.com/ChainSafe/gossamer/dot/state" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/genesis" + "github.com/ChainSafe/gossamer/lib/utils" log "github.com/ChainSafe/log15" ) @@ -35,8 +39,10 @@ func (b *BuildSpec) ToJSON() ([]byte, error) { tmpGen := &genesis.Genesis{ Name: b.genesis.Name, ID: b.genesis.ID, + ChainType: b.genesis.ChainType, Bootnodes: b.genesis.Bootnodes, ProtocolID: b.genesis.ProtocolID, + Properties: b.genesis.Properties, Genesis: genesis.Fields{ Runtime: b.genesis.GenesisFields().Runtime, }, @@ -52,6 +58,7 @@ func (b *BuildSpec) ToJSONRaw() ([]byte, error) { ChainType: b.genesis.ChainType, Bootnodes: b.genesis.Bootnodes, ProtocolID: b.genesis.ProtocolID, + Properties: b.genesis.Properties, Genesis: genesis.Fields{ Raw: b.genesis.GenesisFields().Raw, }, @@ -71,6 +78,21 @@ func BuildFromGenesis(path string, authCount int) (*BuildSpec, error) { return bs, nil } +// WriteGenesisSpecFile writes the build-spec in the output filepath +func WriteGenesisSpecFile(data []byte, fp string) error { + // if file already exists then dont apply any written on it + if utils.PathExists(fp) { + return fmt.Errorf("file %s already exists, rename to avoid overwriting", fp) + } + + if err := os.MkdirAll(filepath.Dir(fp), os.ModeDir|os.ModePerm); err != nil { + return err + } + + WriteConfig(data, fp) + return nil +} + // BuildFromDB builds a BuildSpec from the DB located at path func BuildFromDB(path string) (*BuildSpec, error) { tmpGen := &genesis.Genesis{ @@ -87,7 +109,7 @@ func BuildFromDB(path string) (*BuildSpec, error) { stateSrvc := state.NewService(path, log.LvlCrit) - // start state service (initialize state database) + // start state service (initialise state database) err := stateSrvc.Start() if err != nil { return nil, err diff --git a/dot/build_spec_test.go b/dot/build_spec_test.go index 98dea7b16b..53dc276742 100644 --- a/dot/build_spec_test.go +++ b/dot/build_spec_test.go @@ -17,6 +17,8 @@ package dot import ( "encoding/json" + "fmt" + "io/ioutil" "os" "testing" @@ -29,6 +31,17 @@ func TestBuildFromGenesis(t *testing.T) { defer os.Remove(file) require.NoError(t, err) bs, err := BuildFromGenesis(file, 0) + + expectedChainType := "TESTCHAINTYPE" + expectedProperties := map[string]interface{}{ + "ss58Format": 0.0, + "tokenDecimals": 0.0, + "tokenSymbol": "TEST", + } + + bs.genesis.ChainType = expectedChainType + bs.genesis.Properties = expectedProperties + require.NoError(t, err) // confirm human-readable fields @@ -39,6 +52,8 @@ func TestBuildFromGenesis(t *testing.T) { require.NoError(t, err) genesis.TestGenesis.Genesis = genesis.TestFieldsHR require.Equal(t, genesis.TestGenesis.Genesis.Runtime, jGen.Genesis.Runtime) + require.Equal(t, expectedChainType, jGen.ChainType) + require.Equal(t, expectedProperties, jGen.Properties) // confirm raw fields raw, err := bs.ToJSONRaw() @@ -48,6 +63,70 @@ func TestBuildFromGenesis(t *testing.T) { require.NoError(t, err) genesis.TestGenesis.Genesis = genesis.TestFieldsRaw require.Equal(t, genesis.TestGenesis.Genesis.Raw, jGenRaw.Genesis.Raw) + require.Equal(t, expectedChainType, jGenRaw.ChainType) + require.Equal(t, expectedProperties, jGenRaw.Properties) +} + +func TestBuildFromGenesis_WhenGenesisDoesNotExists(t *testing.T) { + bs, err := BuildFromGenesis("/not/exists/genesis.json", 0) + require.Nil(t, bs) + require.Error(t, err, os.ErrNotExist) +} + +func TestWriteGenesisSpecFileWhenFileAlreadyExists(t *testing.T) { + f, err := ioutil.TempFile("", "existing file data") + require.NoError(t, err) + defer os.Remove(f.Name()) + + someBytes := []byte("Testing some bytes") + err = WriteGenesisSpecFile(someBytes, f.Name()) + + require.Error(t, err, + fmt.Sprintf("file %s already exists, rename to avoid overwritten", f.Name())) +} + +func TestWriteGenesisSpecFile(t *testing.T) { + cfg := NewTestConfig(t) + cfg.Init.Genesis = "../chain/gssmr/genesis.json" + + expected, err := genesis.NewGenesisFromJSONRaw(cfg.Init.Genesis) + require.NoError(t, err) + + err = InitNode(cfg) + require.NoError(t, err) + + bs, err := BuildFromGenesis(cfg.Init.Genesis, 0) + require.NoError(t, err) + + data, err := bs.ToJSONRaw() + require.NoError(t, err) + + tmpFiles := []string{ + "/tmp/unique-raw-genesis.json", + "./unique-raw-genesis.json", + } + + for _, tmpFile := range tmpFiles { + err = WriteGenesisSpecFile(data, tmpFile) + require.NoError(t, err) + require.FileExists(t, tmpFile) + + defer os.Remove(tmpFile) + + file, err := os.Open(tmpFile) + require.NoError(t, err) + defer file.Close() + + genesisBytes, err := ioutil.ReadAll(file) + require.NoError(t, err) + + gen := new(genesis.Genesis) + err = json.Unmarshal(genesisBytes, gen) + require.NoError(t, err) + + require.Equal(t, expected.ChainType, gen.ChainType) + require.Equal(t, expected.Properties, gen.Properties) + } } func TestBuildFromDB(t *testing.T) { @@ -56,7 +135,7 @@ func TestBuildFromDB(t *testing.T) { cfg.Init.Genesis = "../chain/gssmr/genesis.json" expected, err := genesis.NewGenesisFromJSONRaw(cfg.Init.Genesis) require.NoError(t, err) - // initialize node (initialize state database and load genesis data) + // initialise node (initialise state database and load genesis data) err = InitNode(cfg) require.NoError(t, err) diff --git a/dot/core/digest.go b/dot/core/digest.go index dc00e09b2d..f46fe41768 100644 --- a/dot/core/digest.go +++ b/dot/core/digest.go @@ -33,26 +33,23 @@ type DigestHandler struct { cancel context.CancelFunc // interfaces - blockState BlockState - epochState EpochState - grandpa FinalityGadget - babe BlockProducer - verifier Verifier - isFinalityAuthority bool - isBlockProducer bool + blockState BlockState + epochState EpochState + grandpaState GrandpaState + babe BlockProducer + verifier Verifier // block notification channels imported chan *types.Block importedID byte - finalized chan *types.Header - finalizedID byte + finalised chan *types.FinalisationInfo + finalisedID byte // GRANDPA changes grandpaScheduledChange *grandpaChange grandpaForcedChange *grandpaChange grandpaPause *pause grandpaResume *resume - grandpaAuths []*types.Authority // saved in case of pause } type grandpaChange struct { @@ -69,59 +66,51 @@ type resume struct { } // NewDigestHandler returns a new DigestHandler -func NewDigestHandler(blockState BlockState, epochState EpochState, babe BlockProducer, grandpa FinalityGadget, verifier Verifier) (*DigestHandler, error) { +func NewDigestHandler(blockState BlockState, epochState EpochState, grandpaState GrandpaState, babe BlockProducer, verifier Verifier) (*DigestHandler, error) { imported := make(chan *types.Block, 16) - finalized := make(chan *types.Header, 16) + finalised := make(chan *types.FinalisationInfo, 16) iid, err := blockState.RegisterImportedChannel(imported) if err != nil { return nil, err } - fid, err := blockState.RegisterFinalizedChannel(finalized) + fid, err := blockState.RegisterFinalizedChannel(finalised) if err != nil { return nil, err } - isFinalityAuthority := grandpa != nil - isBlockProducer := babe != nil - ctx, cancel := context.WithCancel(context.Background()) return &DigestHandler{ - ctx: ctx, - cancel: cancel, - blockState: blockState, - epochState: epochState, - grandpa: grandpa, - babe: babe, - verifier: verifier, - isFinalityAuthority: isFinalityAuthority, - isBlockProducer: isBlockProducer, - imported: imported, - importedID: iid, - finalized: finalized, - finalizedID: fid, + ctx: ctx, + cancel: cancel, + blockState: blockState, + epochState: epochState, + grandpaState: grandpaState, + babe: babe, + verifier: verifier, + imported: imported, + importedID: iid, + finalised: finalised, + finalisedID: fid, }, nil } // Start starts the DigestHandler -func (h *DigestHandler) Start() { +func (h *DigestHandler) Start() error { go h.handleBlockImport(h.ctx) - go h.handleBlockFinalization(h.ctx) + go h.handleBlockFinalisation(h.ctx) + return nil } // Stop stops the DigestHandler -func (h *DigestHandler) Stop() { +func (h *DigestHandler) Stop() error { h.cancel() h.blockState.UnregisterImportedChannel(h.importedID) - h.blockState.UnregisterFinalizedChannel(h.finalizedID) + h.blockState.UnregisterFinalizedChannel(h.finalisedID) close(h.imported) - close(h.finalized) -} - -// SetFinalityGadget sets the digest handler's grandpa instance -func (h *DigestHandler) SetFinalityGadget(grandpa FinalityGadget) { - h.grandpa = grandpa + close(h.finalised) + return nil } // NextGrandpaAuthorityChange returns the block number of the next upcoming grandpa authorities change. @@ -155,11 +144,11 @@ func (h *DigestHandler) HandleConsensusDigest(d *types.ConsensusDigest, header * if d.ConsensusEngineID == types.GrandpaEngineID { switch t { case types.GrandpaScheduledChangeType: - return h.handleScheduledChange(d) + return h.handleScheduledChange(d, header) case types.GrandpaForcedChangeType: - return h.handleForcedChange(d) + return h.handleForcedChange(d, header) case types.GrandpaOnDisabledType: - return h.handleGrandpaOnDisabled(d, header) + return nil // do nothing, as this is not implemented in substrate case types.GrandpaPauseType: return h.handlePause(d) case types.GrandpaResumeType: @@ -193,8 +182,9 @@ func (h *DigestHandler) handleBlockImport(ctx context.Context) { continue } - if h.isFinalityAuthority { - h.handleGrandpaChangesOnImport(block.Header.Number) + err := h.handleGrandpaChangesOnImport(block.Header.Number) + if err != nil { + logger.Error("failed to handle grandpa changes on block import", "error", err) } case <-ctx.Done(): return @@ -202,154 +192,157 @@ func (h *DigestHandler) handleBlockImport(ctx context.Context) { } } -func (h *DigestHandler) handleBlockFinalization(ctx context.Context) { +func (h *DigestHandler) handleBlockFinalisation(ctx context.Context) { for { select { - case header := <-h.finalized: - if header == nil { + case info := <-h.finalised: + if info == nil || info.Header == nil { continue } - if h.isFinalityAuthority { - h.handleGrandpaChangesOnFinalization(header.Number) + err := h.handleGrandpaChangesOnFinalization(info.Header.Number) + if err != nil { + logger.Error("failed to handle grandpa changes on block finalisation", "error", err) } - - // TODO: check if there's a NextEpochData or NextConfigData digest, if there is, - // make sure it matches what's in the EpochState for the upcoming epoch case <-ctx.Done(): return } } } -func (h *DigestHandler) handleGrandpaChangesOnImport(num *big.Int) { +func (h *DigestHandler) handleGrandpaChangesOnImport(num *big.Int) error { resume := h.grandpaResume - if resume != nil && num.Cmp(resume.atBlock) == 0 { - h.grandpa.UpdateAuthorities(h.grandpaAuths) + if resume != nil && num.Cmp(resume.atBlock) > -1 { h.grandpaResume = nil } fc := h.grandpaForcedChange - if fc != nil && num.Cmp(fc.atBlock) == 0 { - h.grandpa.UpdateAuthorities(fc.auths) + if fc != nil && num.Cmp(fc.atBlock) > -1 { + err := h.grandpaState.IncrementSetID() + if err != nil { + return err + } + h.grandpaForcedChange = nil + curr, err := h.grandpaState.GetCurrentSetID() + if err != nil { + return err + } + + logger.Debug("incremented grandpa set ID", "set ID", curr) } + + return nil } -func (h *DigestHandler) handleGrandpaChangesOnFinalization(num *big.Int) { +func (h *DigestHandler) handleGrandpaChangesOnFinalization(num *big.Int) error { pause := h.grandpaPause - if pause != nil && num.Cmp(pause.atBlock) == 0 { - // save authority data for Resume - h.grandpaAuths = h.grandpa.Authorities() - h.grandpa.UpdateAuthorities([]*types.Authority{}) + if pause != nil && num.Cmp(pause.atBlock) > -1 { h.grandpaPause = nil } sc := h.grandpaScheduledChange - if sc != nil && num.Cmp(sc.atBlock) == 0 { - h.grandpa.UpdateAuthorities(sc.auths) + if sc != nil && num.Cmp(sc.atBlock) > -1 { + err := h.grandpaState.IncrementSetID() + if err != nil { + return err + } + h.grandpaScheduledChange = nil + curr, err := h.grandpaState.GetCurrentSetID() + if err != nil { + return err + } + + logger.Debug("incremented grandpa set ID", "set ID", curr) } - // if blocks get finalized before forced change takes place, disregard it + // if blocks get finalised before forced change takes place, disregard it h.grandpaForcedChange = nil + return nil } -func (h *DigestHandler) handleScheduledChange(d *types.ConsensusDigest) error { +func (h *DigestHandler) handleScheduledChange(d *types.ConsensusDigest, header *types.Header) error { curr, err := h.blockState.BestBlockHeader() if err != nil { return err } - if d.ConsensusEngineID == types.GrandpaEngineID { - if h.grandpaScheduledChange != nil { - return nil - } - - sc := &types.GrandpaScheduledChange{} - dec, err := scale.Decode(d.Data[1:], sc) - if err != nil { - return err - } - sc = dec.(*types.GrandpaScheduledChange) + if d.ConsensusEngineID != types.GrandpaEngineID { + return nil + } - logger.Debug("handling GrandpaScheduledChange", "data", sc) + if h.grandpaScheduledChange != nil { + return nil + } - if h.grandpa == nil { - // this should never happen - return nil - } + sc := &types.GrandpaScheduledChange{} + dec, err := scale.Decode(d.Data[1:], sc) + if err != nil { + return err + } + sc = dec.(*types.GrandpaScheduledChange) - c, err := newGrandpaChange(sc.Auths, sc.Delay, curr.Number) - if err != nil { - return err - } + logger.Debug("handling GrandpaScheduledChange", "data", sc) - h.grandpaScheduledChange = c + c, err := newGrandpaChange(sc.Auths, sc.Delay, curr.Number) + if err != nil { + return err } - return nil -} + h.grandpaScheduledChange = c -func (h *DigestHandler) handleForcedChange(d *types.ConsensusDigest) error { - curr, err := h.blockState.BestBlockHeader() + auths, err := types.GrandpaAuthoritiesRawToAuthorities(sc.Auths) if err != nil { return err } - if d.ConsensusEngineID == types.GrandpaEngineID { - if h.grandpaForcedChange != nil { - return errors.New("already have forced change scheduled") - } - - fc := &types.GrandpaForcedChange{} - dec, err := scale.Decode(d.Data[1:], fc) - if err != nil { - return err - } - fc = dec.(*types.GrandpaForcedChange) + logger.Debug("setting GrandpaScheduledChange", "at block", big.NewInt(0).Add(header.Number, big.NewInt(int64(sc.Delay)))) + return h.grandpaState.SetNextChange( + types.NewGrandpaVotersFromAuthorities(auths), + big.NewInt(0).Add(header.Number, big.NewInt(int64(sc.Delay))), + ) +} - c, err := newGrandpaChange(fc.Auths, fc.Delay, curr.Number) - if err != nil { - return err - } +func (h *DigestHandler) handleForcedChange(d *types.ConsensusDigest, header *types.Header) error { + if d.ConsensusEngineID != types.GrandpaEngineID { + return nil + } - h.grandpaForcedChange = c + if header == nil { + return errors.New("header is nil") } - return nil -} + if h.grandpaForcedChange != nil { + return errors.New("already have forced change scheduled") + } -func (h *DigestHandler) handleGrandpaOnDisabled(d *types.ConsensusDigest, _ *types.Header) error { - od := &types.GrandpaOnDisabled{} - dec, err := scale.Decode(d.Data[1:], od) + fc := &types.GrandpaForcedChange{} + dec, err := scale.Decode(d.Data[1:], fc) if err != nil { return err } - od = dec.(*types.GrandpaOnDisabled) + fc = dec.(*types.GrandpaForcedChange) - logger.Debug("handling GrandpaOnDisabled", "data", od) + logger.Debug("handling GrandpaForcedChange", "data", fc) - if h.grandpa == nil { - // this should never happen - return nil + c, err := newGrandpaChange(fc.Auths, fc.Delay, header.Number) + if err != nil { + return err } - curr := h.grandpa.Authorities() - next := []*types.Authority{} + h.grandpaForcedChange = c - for _, auth := range curr { - if auth.Weight != od.ID { - next = append(next, auth) - } + auths, err := types.GrandpaAuthoritiesRawToAuthorities(fc.Auths) + if err != nil { + return err } - // TODO: this needs to be updated not to remove the authority from the list, - // but to flag them as disabled. thus, if we are disabled, we should stop voting. - // if we receive vote or finalization messages, we should ignore anything signed by the - // disabled authority - h.grandpa.UpdateAuthorities(next) - return nil + logger.Debug("setting GrandpaForcedChange", "at block", big.NewInt(0).Add(header.Number, big.NewInt(int64(fc.Delay)))) + return h.grandpaState.SetNextChange( + types.NewGrandpaVotersFromAuthorities(auths), + big.NewInt(0).Add(header.Number, big.NewInt(int64(fc.Delay))), + ) } func (h *DigestHandler) handlePause(d *types.ConsensusDigest) error { @@ -371,7 +364,7 @@ func (h *DigestHandler) handlePause(d *types.ConsensusDigest) error { atBlock: big.NewInt(-1).Add(curr.Number, delay), } - return nil + return h.grandpaState.SetNextPause(h.grandpaPause.atBlock) } func (h *DigestHandler) handleResume(d *types.ConsensusDigest) error { @@ -393,7 +386,7 @@ func (h *DigestHandler) handleResume(d *types.ConsensusDigest) error { atBlock: big.NewInt(-1).Add(curr.Number, delay), } - return nil + return h.grandpaState.SetNextResume(h.grandpaResume.atBlock) } func newGrandpaChange(raw []*types.GrandpaAuthoritiesRaw, delay uint32, currBlock *big.Int) (*grandpaChange, error) { diff --git a/dot/core/digest_test.go b/dot/core/digest_test.go index fd2e1075f8..51f78e11ff 100644 --- a/dot/core/digest_test.go +++ b/dot/core/digest_test.go @@ -18,6 +18,7 @@ package core import ( "io/ioutil" + "math/big" "testing" "time" @@ -38,7 +39,7 @@ func newTestDigestHandler(t *testing.T, withBABE, withGrandpa bool) *DigestHandl stateSrvc.UseMemDB() gen, genTrie, genHeader := newTestGenesisWithTrieAndHeader(t) - err = stateSrvc.Initialize(gen, genHeader, genTrie) + err = stateSrvc.Initialise(gen, genHeader, genTrie) require.NoError(t, err) err = stateSrvc.Start() @@ -49,13 +50,8 @@ func newTestDigestHandler(t *testing.T, withBABE, withGrandpa bool) *DigestHandl bp = &mockBlockProducer{} } - var fg FinalityGadget - if withGrandpa { - fg = &mockFinalityGadget{} - } - time.Sleep(time.Second) - dh, err := NewDigestHandler(stateSrvc.Block, stateSrvc.Epoch, bp, fg, &mockVerifier{}) + dh, err := NewDigestHandler(stateSrvc.Block, stateSrvc.Epoch, stateSrvc.Grandpa, bp, &mockVerifier{}) require.NoError(t, err) return dh } @@ -64,7 +60,6 @@ func TestDigestHandler_GrandpaScheduledChange(t *testing.T) { handler := newTestDigestHandler(t, false, true) handler.Start() defer handler.Stop() - require.True(t, handler.isFinalityAuthority) kr, err := keystore.NewEd25519Keyring() require.NoError(t, err) @@ -84,7 +79,11 @@ func TestDigestHandler_GrandpaScheduledChange(t *testing.T) { Data: data, } - err = handler.HandleConsensusDigest(d, nil) + header := &types.Header{ + Number: big.NewInt(1), + } + + err = handler.HandleConsensusDigest(d, header) require.NoError(t, err) headers := addTestBlocksToState(t, 2, handler.blockState) @@ -92,9 +91,6 @@ func TestDigestHandler_GrandpaScheduledChange(t *testing.T) { handler.blockState.SetFinalizedHash(h.Hash(), 0, 0) } - auths := handler.grandpa.Authorities() - require.Nil(t, auths) - // authorities should change on start of block 3 from start headers = addTestBlocksToState(t, 1, handler.blockState) for _, h := range headers { @@ -102,8 +98,15 @@ func TestDigestHandler_GrandpaScheduledChange(t *testing.T) { } time.Sleep(time.Millisecond * 100) - auths = handler.grandpa.Authorities() - require.Equal(t, 1, len(auths)) + setID, err := handler.grandpaState.(*state.GrandpaState).GetCurrentSetID() + require.NoError(t, err) + require.Equal(t, uint64(1), setID) + + auths, err := handler.grandpaState.(*state.GrandpaState).GetAuthorities(setID) + require.NoError(t, err) + expected, err := types.NewGrandpaVotersFromAuthoritiesRaw(sc.Auths) + require.NoError(t, err) + require.Equal(t, expected, auths) } func TestDigestHandler_GrandpaForcedChange(t *testing.T) { @@ -129,69 +132,28 @@ func TestDigestHandler_GrandpaForcedChange(t *testing.T) { Data: data, } - err = handler.HandleConsensusDigest(d, nil) + header := &types.Header{ + Number: big.NewInt(1), + } + + err = handler.HandleConsensusDigest(d, header) require.NoError(t, err) - addTestBlocksToState(t, 2, handler.blockState) - auths := handler.grandpa.Authorities() - require.Nil(t, auths) + addTestBlocksToState(t, 3, handler.blockState) - // authorities should change on start of block 3 from start + // authorities should change on start of block 4 from start addTestBlocksToState(t, 1, handler.blockState) time.Sleep(time.Millisecond * 100) - auths = handler.grandpa.Authorities() - require.Equal(t, 1, len(auths)) -} - -func TestDigestHandler_GrandpaOnDisabled(t *testing.T) { - handler := newTestDigestHandler(t, false, true) - handler.Start() - defer handler.Stop() - - kr, err := keystore.NewEd25519Keyring() - require.NoError(t, err) - handler.grandpa.UpdateAuthorities([]*types.Authority{ - {Key: kr.Alice().Public().(*ed25519.PublicKey), Weight: 0}, - }) - - // try with ID that doesn't exist - od := &types.GrandpaOnDisabled{ - ID: 1, - } - - data, err := od.Encode() + setID, err := handler.grandpaState.(*state.GrandpaState).GetCurrentSetID() require.NoError(t, err) + require.Equal(t, uint64(1), setID) - d := &types.ConsensusDigest{ - ConsensusEngineID: types.GrandpaEngineID, - Data: data, - } - - err = handler.HandleConsensusDigest(d, nil) + auths, err := handler.grandpaState.(*state.GrandpaState).GetAuthorities(setID) require.NoError(t, err) - - auths := handler.grandpa.Authorities() - require.Equal(t, 1, len(auths)) - - // try with ID that does exist - od = &types.GrandpaOnDisabled{ - ID: 0, - } - - data, err = od.Encode() + expected, err := types.NewGrandpaVotersFromAuthoritiesRaw(fc.Auths) require.NoError(t, err) - - d = &types.ConsensusDigest{ - ConsensusEngineID: types.GrandpaEngineID, - Data: data, - } - - err = handler.HandleConsensusDigest(d, nil) - require.NoError(t, err) - - auths = handler.grandpa.Authorities() - require.Equal(t, 0, len(auths)) + require.Equal(t, expected, auths) } func TestDigestHandler_GrandpaPauseAndResume(t *testing.T) { @@ -199,13 +161,6 @@ func TestDigestHandler_GrandpaPauseAndResume(t *testing.T) { handler.Start() defer handler.Stop() - kr, err := keystore.NewEd25519Keyring() - require.NoError(t, err) - - handler.grandpa.UpdateAuthorities([]*types.Authority{ - {Key: kr.Alice().Public().(*ed25519.PublicKey), Weight: 0}, - }) - p := &types.GrandpaPause{ Delay: 3, } @@ -220,6 +175,9 @@ func TestDigestHandler_GrandpaPauseAndResume(t *testing.T) { err = handler.HandleConsensusDigest(d, nil) require.NoError(t, err) + nextPause, err := handler.grandpaState.(*state.GrandpaState).GetNextPause() + require.NoError(t, err) + require.Equal(t, big.NewInt(int64(p.Delay)), nextPause) headers := addTestBlocksToState(t, 3, handler.blockState) for _, h := range headers { @@ -227,8 +185,7 @@ func TestDigestHandler_GrandpaPauseAndResume(t *testing.T) { } time.Sleep(time.Millisecond * 100) - auths := handler.grandpa.Authorities() - require.Equal(t, 0, len(auths)) + require.Nil(t, handler.grandpaPause) r := &types.GrandpaResume{ Delay: 3, @@ -247,8 +204,11 @@ func TestDigestHandler_GrandpaPauseAndResume(t *testing.T) { addTestBlocksToState(t, 3, handler.blockState) time.Sleep(time.Millisecond * 110) - auths = handler.grandpa.Authorities() - require.Equal(t, 1, len(auths)) + require.Nil(t, handler.grandpaResume) + + nextResume, err := handler.grandpaState.(*state.GrandpaState).GetNextResume() + require.NoError(t, err) + require.Equal(t, big.NewInt(int64(r.Delay)+int64(p.Delay)), nextResume) } func TestNextGrandpaAuthorityChange_OneChange(t *testing.T) { @@ -269,12 +229,22 @@ func TestNextGrandpaAuthorityChange_OneChange(t *testing.T) { ConsensusEngineID: types.GrandpaEngineID, Data: data, } + header := &types.Header{ + Number: big.NewInt(1), + } - err = handler.HandleConsensusDigest(d, nil) + err = handler.HandleConsensusDigest(d, header) require.NoError(t, err) next := handler.NextGrandpaAuthorityChange() require.Equal(t, uint64(block), next) + + nextSetID := uint64(1) + auths, err := handler.grandpaState.(*state.GrandpaState).GetAuthorities(nextSetID) + require.NoError(t, err) + expected, err := types.NewGrandpaVotersFromAuthoritiesRaw(sc.Auths) + require.NoError(t, err) + require.Equal(t, expected, auths) } func TestNextGrandpaAuthorityChange_MultipleChanges(t *testing.T) { @@ -282,7 +252,10 @@ func TestNextGrandpaAuthorityChange_MultipleChanges(t *testing.T) { handler.Start() defer handler.Stop() - later := uint32(5) + kr, err := keystore.NewEd25519Keyring() + require.NoError(t, err) + + later := uint32(6) sc := &types.GrandpaScheduledChange{ Auths: []*types.GrandpaAuthoritiesRaw{}, Delay: later, @@ -296,12 +269,25 @@ func TestNextGrandpaAuthorityChange_MultipleChanges(t *testing.T) { Data: data, } - err = handler.HandleConsensusDigest(d, nil) + header := &types.Header{ + Number: big.NewInt(1), + } + + err = handler.HandleConsensusDigest(d, header) + require.NoError(t, err) + + nextSetID := uint64(1) + auths, err := handler.grandpaState.(*state.GrandpaState).GetAuthorities(nextSetID) + require.NoError(t, err) + expected, err := types.NewGrandpaVotersFromAuthoritiesRaw(sc.Auths) require.NoError(t, err) + require.Equal(t, expected, auths) - earlier := uint32(3) + earlier := uint32(4) fc := &types.GrandpaForcedChange{ - Auths: []*types.GrandpaAuthoritiesRaw{}, + Auths: []*types.GrandpaAuthoritiesRaw{ + {Key: kr.Alice().Public().(*ed25519.PublicKey).AsBytes(), ID: 0}, + }, Delay: earlier, } @@ -313,11 +299,17 @@ func TestNextGrandpaAuthorityChange_MultipleChanges(t *testing.T) { Data: data, } - err = handler.HandleConsensusDigest(d, nil) + err = handler.HandleConsensusDigest(d, header) require.NoError(t, err) next := handler.NextGrandpaAuthorityChange() - require.Equal(t, uint64(earlier), next) + require.Equal(t, uint64(earlier+1), next) + + auths, err = handler.grandpaState.(*state.GrandpaState).GetAuthorities(nextSetID) + require.NoError(t, err) + expected, err = types.NewGrandpaVotersFromAuthoritiesRaw(fc.Auths) + require.NoError(t, err) + require.Equal(t, expected, auths) } func TestDigestHandler_HandleBABEOnDisabled(t *testing.T) { diff --git a/dot/core/errors.go b/dot/core/errors.go index fd3b77705e..5faf6f8b02 100644 --- a/dot/core/errors.go +++ b/dot/core/errors.go @@ -45,9 +45,6 @@ var ErrNilRuntime = errors.New("cannot have nil runtime") // ErrNilBlockProducer is returned when trying to instantiate a block producing Service without a block producer var ErrNilBlockProducer = errors.New("cannot have nil BlockProducer") -// ErrNilFinalityGadget is returned when trying to instantiate a finalizing Service without a finality gadget -var ErrNilFinalityGadget = errors.New("cannot have nil FinalityGadget") - // ErrNilConsensusMessageHandler is returned when trying to instantiate a Service without a FinalityMessageHandler var ErrNilConsensusMessageHandler = errors.New("cannot have nil ErrNilFinalityMessageHandler") diff --git a/dot/core/interface.go b/dot/core/interface.go index e6c2d43759..bab25e3099 100644 --- a/dot/core/interface.go +++ b/dot/core/interface.go @@ -22,8 +22,8 @@ import ( "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/grandpa" rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" - "github.com/ChainSafe/gossamer/lib/services" "github.com/ChainSafe/gossamer/lib/transaction" ) @@ -44,7 +44,7 @@ type BlockState interface { SetFinalizedHash(common.Hash, uint64, uint64) error RegisterImportedChannel(ch chan<- *types.Block) (byte, error) UnregisterImportedChannel(id byte) - RegisterFinalizedChannel(ch chan<- *types.Header) (byte, error) + RegisterFinalizedChannel(ch chan<- *types.FinalisationInfo) (byte, error) UnregisterFinalizedChannel(id byte) HighestCommonAncestor(a, b common.Hash) (common.Hash, error) SubChain(start, end common.Hash) ([]common.Hash, error) @@ -68,14 +68,6 @@ type TransactionState interface { PendingInPool() []*transaction.ValidTransaction } -// FinalityGadget is the interface that a finality gadget must implement -type FinalityGadget interface { - services.Service - - UpdateAuthorities(ad []*types.Authority) - Authorities() []*types.Authority -} - // BlockProducer is the interface that a block production service must implement type BlockProducer interface { GetBlockChannel() <-chan types.Block @@ -100,3 +92,12 @@ type EpochState interface { SetCurrentEpoch(epoch uint64) error GetCurrentEpoch() (uint64, error) } + +// GrandpaState is the interface for the state.GrandpaState +type GrandpaState interface { + SetNextChange(authorities []*grandpa.Voter, number *big.Int) error + IncrementSetID() error + SetNextPause(number *big.Int) error + SetNextResume(number *big.Int) error + GetCurrentSetID() (uint64, error) +} diff --git a/dot/core/service.go b/dot/core/service.go index 93120e87c1..cf72bdfe65 100644 --- a/dot/core/service.go +++ b/dot/core/service.go @@ -58,10 +58,6 @@ type Service struct { blockProducer BlockProducer isBlockProducer bool - // Finality gadget variables - finalityGadget FinalityGadget - isFinalityAuthority bool - // Block verification verifier Verifier @@ -81,19 +77,17 @@ type Service struct { // Config holds the configuration for the core Service. type Config struct { - LogLvl log.Lvl - BlockState BlockState - EpochState EpochState - StorageState StorageState - TransactionState TransactionState - Network Network - Keystore *keystore.GlobalKeystore - Runtime runtime.Instance - BlockProducer BlockProducer - IsBlockProducer bool - FinalityGadget FinalityGadget - IsFinalityAuthority bool - Verifier Verifier + LogLvl log.Lvl + BlockState BlockState + EpochState EpochState + StorageState StorageState + TransactionState TransactionState + Network Network + Keystore *keystore.GlobalKeystore + Runtime runtime.Instance + BlockProducer BlockProducer + IsBlockProducer bool + Verifier Verifier NewBlocks chan types.Block // only used for testing purposes } @@ -121,10 +115,6 @@ func NewService(cfg *Config) (*Service, error) { return nil, ErrNilBlockProducer } - if cfg.IsFinalityAuthority && cfg.FinalityGadget == nil { - return nil, ErrNilFinalityGadget - } - h := log.StreamHandler(os.Stdout, log.TerminalFormat()) h = log.CallerFileHandler(h) logger.SetHandler(log.LvlFilterHandler(cfg.LogLvl, h)) @@ -148,25 +138,23 @@ func NewService(cfg *Config) (*Service, error) { ctx, cancel := context.WithCancel(context.Background()) srv := &Service{ - ctx: ctx, - cancel: cancel, - rt: cfg.Runtime, - codeHash: codeHash, - keys: cfg.Keystore, - blkRec: cfg.NewBlocks, - blockState: cfg.BlockState, - epochState: cfg.EpochState, - storageState: cfg.StorageState, - transactionState: cfg.TransactionState, - net: cfg.Network, - isBlockProducer: cfg.IsBlockProducer, - blockProducer: cfg.BlockProducer, - finalityGadget: cfg.FinalityGadget, - verifier: cfg.Verifier, - isFinalityAuthority: cfg.IsFinalityAuthority, - lock: &sync.Mutex{}, - blockAddCh: blockAddCh, - blockAddChID: id, + ctx: ctx, + cancel: cancel, + rt: cfg.Runtime, + codeHash: codeHash, + keys: cfg.Keystore, + blkRec: cfg.NewBlocks, + blockState: cfg.BlockState, + epochState: cfg.EpochState, + storageState: cfg.StorageState, + transactionState: cfg.TransactionState, + net: cfg.Network, + isBlockProducer: cfg.IsBlockProducer, + blockProducer: cfg.BlockProducer, + verifier: cfg.Verifier, + lock: &sync.Mutex{}, + blockAddCh: blockAddCh, + blockAddChID: id, } if cfg.NewBlocks != nil { diff --git a/dot/core/service_test.go b/dot/core/service_test.go index 6fd46f8f23..b3ee59cccc 100644 --- a/dot/core/service_test.go +++ b/dot/core/service_test.go @@ -378,7 +378,7 @@ func TestService_HandleSubmittedExtrinsic(t *testing.T) { header, err := types.NewHeader(parentHash, common.Hash{}, common.Hash{}, big.NewInt(1), types.NewEmptyDigest()) require.NoError(t, err) - //initialize block header + //initialise block header err = s.rt.InitializeBlock(header) require.NoError(t, err) diff --git a/dot/core/test_helpers.go b/dot/core/test_helpers.go index d5cd894efa..5366d4cf8f 100644 --- a/dot/core/test_helpers.go +++ b/dot/core/test_helpers.go @@ -96,29 +96,6 @@ func (n *mockNetwork) SendMessage(m network.NotificationsMessage) { n.Message = m } -// mockFinalityGadget implements the FinalityGadget interface -type mockFinalityGadget struct { - auths []*types.Authority -} - -// Start mocks starting -func (fg *mockFinalityGadget) Start() error { - return nil -} - -// Stop mocks stopping -func (fg *mockFinalityGadget) Stop() error { - return nil -} - -func (fg *mockFinalityGadget) UpdateAuthorities(ad []*types.Authority) { - fg.auths = ad -} - -func (fg *mockFinalityGadget) Authorities() []*types.Authority { - return fg.auths -} - // NewTestService creates a new test core service func NewTestService(t *testing.T, cfg *Config) *Service { if cfg == nil { @@ -156,7 +133,7 @@ func NewTestService(t *testing.T, cfg *Config) *Service { stateSrvc = state.NewService(testDatadirPath, log.LvlInfo) stateSrvc.UseMemDB() - err = stateSrvc.Initialize(gen, genHeader, genTrie) + err = stateSrvc.Initialise(gen, genHeader, genTrie) require.Nil(t, err) err = stateSrvc.Start() @@ -256,6 +233,10 @@ func (s *mockSyncer) ProcessBlockData(_ []*types.BlockData) (int, error) { return 0, nil } +func (s *mockSyncer) ProcessJustification(data []*types.BlockData) (int, error) { + return 0, nil +} + func (s *mockSyncer) IsSynced() bool { return false } diff --git a/dot/import.go b/dot/import.go index 25d0337b71..0a78503f82 100644 --- a/dot/import.go +++ b/dot/import.go @@ -21,7 +21,6 @@ import ( "encoding/json" "errors" "io/ioutil" - "math/big" "path/filepath" "github.com/ChainSafe/gossamer/dot/state" @@ -97,8 +96,7 @@ func newHeaderFromFile(filename string) (*types.Header, error) { return nil, errors.New("invalid number field in header JSON") } - numBytes := common.MustHexToBytes(hexNum) - num := big.NewInt(0).SetBytes(numBytes) + num := common.MustHexToBigInt(hexNum) parentHashStr, ok := jsonHeader["parentHash"].(string) if !ok { diff --git a/dot/network/block_announce.go b/dot/network/block_announce.go index 05e1c9b71f..bf8e655195 100644 --- a/dot/network/block_announce.go +++ b/dot/network/block_announce.go @@ -56,7 +56,7 @@ func (bm *BlockAnnounceMessage) Type() byte { // string formats a BlockAnnounceMessage as a string func (bm *BlockAnnounceMessage) String() string { - return fmt.Sprintf("BlockAnnounceMessage ParentHash=%s Number=%d StateRoot=%sx ExtrinsicsRoot=%s Digest=%v", + return fmt.Sprintf("BlockAnnounceMessage ParentHash=%s Number=%d StateRoot=%s ExtrinsicsRoot=%s Digest=%v", bm.ParentHash, bm.Number, bm.StateRoot, @@ -220,17 +220,14 @@ func (s *Service) validateBlockAnnounceHandshake(peer peer.ID, hs Handshake) err // don't need to lock here, since function is always called inside the func returned by // `createNotificationsMessageHandler` which locks the map beforehand. - data, ok := np.getHandshakeData(peer) - if !ok { - np.handshakeData.Store(peer, &handshakeData{ - received: true, - validated: true, - }) - data, _ = np.getHandshakeData(peer) + data, ok := np.getHandshakeData(peer, true) + if ok { + data.handshake = hs + // TODO: since this is used only for rpc system_peers only, + // we can just set the inbound handshake and use that in Peers() + np.inboundHandshakeData.Store(peer, data) } - data.handshake = hs - // if peer has higher best block than us, begin syncing latestHeader, err := s.blockState.BestBlockHeader() if err != nil { @@ -254,14 +251,14 @@ func (s *Service) validateBlockAnnounceHandshake(peer peer.ID, hs Handshake) err // handleBlockAnnounceMessage handles BlockAnnounce messages // if some more blocks are required to sync the announced block, the node will open a sync stream // with its peer and send a BlockRequest message -func (s *Service) handleBlockAnnounceMessage(peer peer.ID, msg NotificationsMessage) error { +func (s *Service) handleBlockAnnounceMessage(peer peer.ID, msg NotificationsMessage) (propagate bool, err error) { if an, ok := msg.(*BlockAnnounceMessage); ok { s.syncQueue.handleBlockAnnounce(an, peer) - err := s.syncer.HandleBlockAnnounce(an) + err = s.syncer.HandleBlockAnnounce(an) if err != nil { - return err + return false, err } } - return nil + return true, nil } diff --git a/dot/network/block_announce_test.go b/dot/network/block_announce_test.go index ea381dad6d..4010e002b3 100644 --- a/dot/network/block_announce_test.go +++ b/dot/network/block_announce_test.go @@ -101,8 +101,9 @@ func TestHandleBlockAnnounceMessage(t *testing.T) { Number: big.NewInt(10), } - err := s.handleBlockAnnounceMessage(peerID, msg) + propagate, err := s.handleBlockAnnounceMessage(peerID, msg) require.NoError(t, err) + require.True(t, propagate) } func TestValidateBlockAnnounceHandshake(t *testing.T) { @@ -117,10 +118,10 @@ func TestValidateBlockAnnounceHandshake(t *testing.T) { nodeA := createTestService(t, configA) nodeA.noGossip = true nodeA.notificationsProtocols[BlockAnnounceMsgType] = ¬ificationsProtocol{ - handshakeData: new(sync.Map), + inboundHandshakeData: new(sync.Map), } testPeerID := peer.ID("noot") - nodeA.notificationsProtocols[BlockAnnounceMsgType].handshakeData.Store(testPeerID, &handshakeData{}) + nodeA.notificationsProtocols[BlockAnnounceMsgType].inboundHandshakeData.Store(testPeerID, handshakeData{}) err := nodeA.validateBlockAnnounceHandshake(testPeerID, &BlockAnnounceHandshake{ BestBlockNumber: 100, diff --git a/dot/network/config.go b/dot/network/config.go index a3cf8e2a02..af178811c7 100644 --- a/dot/network/config.go +++ b/dot/network/config.go @@ -19,6 +19,7 @@ package network import ( "errors" "path" + "time" log "github.com/ChainSafe/log15" "github.com/libp2p/go-libp2p-core/crypto" @@ -93,6 +94,9 @@ type Config struct { // PublishMetrics enables collection of network metrics PublishMetrics bool + + // telemetryInterval how often to send telemetry metrics + telemetryInterval time.Duration } // build checks the configuration, sets up the private key for the network service, @@ -134,6 +138,11 @@ func (c *Config) build() error { c.logger.Warn("Bootstrap is enabled but no bootstrap nodes are defined") } + // set telemetryInterval to default + if c.telemetryInterval.Microseconds() == 0 { + c.telemetryInterval = time.Second * 5 + } + return nil } diff --git a/dot/network/connmgr.go b/dot/network/connmgr.go index f10b2fc585..276ad0e6e6 100644 --- a/dot/network/connmgr.go +++ b/dot/network/connmgr.go @@ -20,6 +20,7 @@ import ( "context" "math/rand" "sync" + "time" "github.com/libp2p/go-libp2p-core/connmgr" "github.com/libp2p/go-libp2p-core/network" @@ -29,6 +30,10 @@ import ( ma "github.com/multiformats/go-multiaddr" ) +var ( + maxRetries = 12 +) + // ConnManager implements connmgr.ConnManager type ConnManager struct { sync.Mutex @@ -191,10 +196,18 @@ func (cm *ConnManager) Disconnected(n network.Network, c network.Conn) { Addrs: addrs, } - err := cm.host.connect(info) - if err != nil { - logger.Warn("failed to reconnect to persistent peer", "peer", c.RemotePeer(), "error", err) - } + go func() { + for i := 0; i < maxRetries; i++ { + err := cm.host.connect(info) + if err != nil { + logger.Warn("failed to reconnect to persistent peer", "peer", c.RemotePeer(), "error", err) + time.Sleep(time.Minute) + continue + } + + return + } + }() // TODO: if number of peers falls below the min desired peer count, we should try to connect to previously discovered peers } @@ -207,7 +220,6 @@ func (cm *ConnManager) registerDisconnectHandler(cb func(peer.ID)) { func (cm *ConnManager) OpenedStream(n network.Network, s network.Stream) { logger.Trace( "Opened stream", - "host", s.Conn().LocalPeer(), "peer", s.Conn().RemotePeer(), "protocol", s.Protocol(), ) @@ -221,7 +233,6 @@ func (cm *ConnManager) registerCloseHandler(protocolID protocol.ID, cb func(id p func (cm *ConnManager) ClosedStream(n network.Network, s network.Stream) { logger.Trace( "Closed stream", - "host", s.Conn().LocalPeer(), "peer", s.Conn().RemotePeer(), "protocol", s.Protocol(), ) diff --git a/dot/network/connmgr_test.go b/dot/network/connmgr_test.go index e760704013..5f6b22bbcb 100644 --- a/dot/network/connmgr_test.go +++ b/dot/network/connmgr_test.go @@ -112,7 +112,7 @@ func TestPersistentPeers(t *testing.T) { require.NotEqual(t, 0, len(conns)) // if A disconnects from B, B should reconnect - nodeA.host.h.Network().ClosePeer(nodeA.host.id()) + nodeA.host.h.Network().ClosePeer(nodeB.host.id()) time.Sleep(time.Millisecond * 500) conns = nodeB.host.h.Network().ConnsToPeer(nodeA.host.id()) require.NotEqual(t, 0, len(conns)) diff --git a/dot/network/gossip_test.go b/dot/network/gossip_test.go index 73fe5f55c0..94c7b0669e 100644 --- a/dot/network/gossip_test.go +++ b/dot/network/gossip_test.go @@ -101,7 +101,7 @@ func TestGossip(t *testing.T) { } require.NoError(t, err) - err = nodeA.host.send(addrInfosB[0].ID, "", testBlockAnnounceMessage) + _, err = nodeA.host.send(addrInfosB[0].ID, "", testBlockAnnounceMessage) require.NoError(t, err) time.Sleep(TestMessageTimeout) diff --git a/dot/network/host.go b/dot/network/host.go index 12c4a386ba..78445dc37c 100644 --- a/dot/network/host.go +++ b/dot/network/host.go @@ -27,6 +27,7 @@ import ( badger "github.com/ipfs/go-ds-badger2" "github.com/libp2p/go-libp2p" libp2phost "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/metrics" libp2pnetwork "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" @@ -59,6 +60,7 @@ type host struct { cm *ConnManager ds *badger.Datastore messageCache *messageCache + bwc *metrics.BandwidthCounter } // newHost creates a host wrapper with a new libp2p host instance @@ -167,6 +169,8 @@ func newHost(ctx context.Context, cfg *Config) (*host, error) { return nil, err } + bwc := metrics.NewBandwidthCounter() + host := &host{ ctx: ctx, h: h, @@ -177,6 +181,7 @@ func newHost(ctx context.Context, cfg *Config) (*host, error) { ds: ds, persistentPeers: pps, messageCache: msgCache, + bwc: bwc, } cm.host = host @@ -257,37 +262,31 @@ func (h *host) bootstrap() { failed++ } } - if failed == len(all) { + if failed == len(all) && len(all) != 0 { logger.Error("failed to bootstrap to any bootnode") } } -// send writes the given message to the outbound message stream for the given -// peer (gets the already opened outbound message stream or opens a new one). -func (h *host) send(p peer.ID, pid protocol.ID, msg Message) (err error) { - // get outbound stream for given peer - s := h.getOutboundStream(p, pid) - - // check if stream needs to be opened - if s == nil { - // open outbound stream with host protocol id - s, err = h.h.NewStream(h.ctx, p, pid) - if err != nil { - logger.Trace("failed to open new stream with peer", "peer", p, "protocol", pid, "error", err) - return err - } - - logger.Trace( - "Opened stream", - "host", h.id(), - "peer", p, - "protocol", pid, - ) +// send creates a new outbound stream with the given peer and writes the message. It also returns +// the newly created stream. +func (h *host) send(p peer.ID, pid protocol.ID, msg Message) (libp2pnetwork.Stream, error) { + // open outbound stream with host protocol id + stream, err := h.h.NewStream(h.ctx, p, pid) + if err != nil { + logger.Trace("failed to open new stream with peer", "peer", p, "protocol", pid, "error", err) + return nil, err } - err = h.writeToStream(s, msg) + logger.Trace( + "Opened stream", + "host", h.id(), + "peer", p, + "protocol", pid, + ) + + err = h.writeToStream(stream, msg) if err != nil { - return err + return nil, err } logger.Trace( @@ -298,7 +297,7 @@ func (h *host) send(p peer.ID, pid protocol.ID, msg Message) (err error) { "message", msg.String(), ) - return nil + return stream, nil } func (h *host) writeToStream(s libp2pnetwork.Stream, msg Message) error { @@ -311,8 +310,14 @@ func (h *host) writeToStream(s libp2pnetwork.Stream, msg Message) error { lenBytes := uint64ToLEB128(msgLen) encMsg = append(lenBytes, encMsg...) - _, err = s.Write(encMsg) - return err + sent, err := s.Write(encMsg) + if err != nil { + return err + } + + h.bwc.LogSentMessage(int64(sent)) + + return nil } // getOutboundStream returns the outbound message stream for the given peer or returns diff --git a/dot/network/host_test.go b/dot/network/host_test.go index 1bd3aec938..26090f8cb0 100644 --- a/dot/network/host_test.go +++ b/dot/network/host_test.go @@ -218,7 +218,7 @@ func TestSend(t *testing.T) { } require.NoError(t, err) - err = nodeA.host.send(addrInfosB[0].ID, nodeB.host.protocolID, testBlockRequestMessage) + _, err = nodeA.host.send(addrInfosB[0].ID, nodeB.host.protocolID, testBlockRequestMessage) require.NoError(t, err) time.Sleep(TestMessageTimeout) @@ -273,44 +273,29 @@ func TestExistingStream(t *testing.T) { } require.NoError(t, err) - stream := nodeA.host.getOutboundStream(nodeB.host.id(), nodeB.host.protocolID) - require.Nil(t, stream, "node A should not have an outbound stream") - // node A opens the stream to send the first message - err = nodeA.host.send(addrInfosB[0].ID, nodeB.host.protocolID, testBlockRequestMessage) + stream, err := nodeA.host.send(addrInfosB[0].ID, nodeB.host.protocolID, testBlockRequestMessage) require.NoError(t, err) time.Sleep(TestMessageTimeout) require.NotNil(t, handlerB.messages[nodeA.host.id()], "node B timeout waiting for message from node A") - stream = nodeA.host.getOutboundStream(nodeB.host.id(), nodeB.host.protocolID) - require.NotNil(t, stream, "node A should have an outbound stream") - // node A uses the stream to send a second message - err = nodeA.host.send(addrInfosB[0].ID, nodeB.host.protocolID, testBlockRequestMessage) + err = nodeA.host.writeToStream(stream, testBlockRequestMessage) require.NoError(t, err) require.NotNil(t, handlerB.messages[nodeA.host.id()], "node B timeout waiting for message from node A") - stream = nodeA.host.getOutboundStream(nodeB.host.id(), nodeB.host.protocolID) - require.NotNil(t, stream, "node B should have an outbound stream") - // node B opens the stream to send the first message - err = nodeB.host.send(addrInfosA[0].ID, nodeB.host.protocolID, testBlockRequestMessage) + stream, err = nodeB.host.send(addrInfosA[0].ID, nodeB.host.protocolID, testBlockRequestMessage) require.NoError(t, err) time.Sleep(TestMessageTimeout) require.NotNil(t, handlerA.messages[nodeB.host.id()], "node A timeout waiting for message from node B") - stream = nodeB.host.getOutboundStream(nodeA.host.id(), nodeB.host.protocolID) - require.NotNil(t, stream, "node B should have an outbound stream") - // node B uses the stream to send a second message - err = nodeB.host.send(addrInfosA[0].ID, nodeB.host.protocolID, testBlockRequestMessage) + err = nodeB.host.writeToStream(stream, testBlockRequestMessage) require.NoError(t, err) require.NotNil(t, handlerA.messages[nodeB.host.id()], "node A timeout waiting for message from node B") - - stream = nodeB.host.getOutboundStream(nodeA.host.id(), nodeB.host.protocolID) - require.NotNil(t, stream, "node B should have an outbound stream") } func TestStreamCloseMetadataCleanup(t *testing.T) { @@ -361,19 +346,19 @@ func TestStreamCloseMetadataCleanup(t *testing.T) { } // node A opens the stream to send the first message - err = nodeA.host.send(nodeB.host.id(), nodeB.host.protocolID+blockAnnounceID, testHandshake) + _, err = nodeA.host.send(nodeB.host.id(), nodeB.host.protocolID+blockAnnounceID, testHandshake) require.NoError(t, err) info := nodeA.notificationsProtocols[BlockAnnounceMsgType] // Set handshake data to received - info.handshakeData.Store(nodeB.host.id(), &handshakeData{ + info.inboundHandshakeData.Store(nodeB.host.id(), handshakeData{ received: true, validated: true, }) // Verify that handshake data exists. - _, ok := info.getHandshakeData(nodeB.host.id()) + _, ok := info.getHandshakeData(nodeB.host.id(), true) require.True(t, ok) time.Sleep(time.Second) @@ -383,7 +368,7 @@ func TestStreamCloseMetadataCleanup(t *testing.T) { time.Sleep(time.Second) // Verify that handshake data is cleared. - _, ok = info.getHandshakeData(nodeB.host.id()) + _, ok = info.getHandshakeData(nodeB.host.id(), true) require.False(t, ok) } diff --git a/dot/network/light_test.go b/dot/network/light_test.go index 50a5572aa2..bc9b2fd862 100644 --- a/dot/network/light_test.go +++ b/dot/network/light_test.go @@ -22,7 +22,7 @@ func TestDecodeLightMessage(t *testing.T) { reqEnc, err := testLightRequest.Encode() require.NoError(t, err) - msg, err := s.decodeLightMessage(reqEnc, testPeer) + msg, err := s.decodeLightMessage(reqEnc, testPeer, true) require.NoError(t, err) req, ok := msg.(*LightRequest) @@ -36,7 +36,7 @@ func TestDecodeLightMessage(t *testing.T) { respEnc, err := testLightResponse.Encode() require.NoError(t, err) - msg, err = s.decodeLightMessage(respEnc, testPeer) + msg, err = s.decodeLightMessage(respEnc, testPeer, true) require.NoError(t, err) resp, ok := msg.(*LightResponse) require.True(t, ok) diff --git a/dot/network/message.go b/dot/network/message.go index 87a60a976f..c50e8b971f 100644 --- a/dot/network/message.go +++ b/dot/network/message.go @@ -148,6 +148,10 @@ func (bm *BlockRequestMessage) Decode(in []byte) error { case *pb.BlockRequest_Hash: startingBlock, err = variadic.NewUint64OrHash(common.BytesToHash(from.Hash)) case *pb.BlockRequest_Number: + // TODO: we are receiving block requests w/ 4-byte From field; did the format change? + if len(from.Number) != 8 { + return errors.New("invalid BlockResponseMessage.From; uint64 is not 8 bytes") + } startingBlock, err = variadic.NewUint64OrHash(binary.LittleEndian.Uint64(from.Number)) default: err = errors.New("invalid StartingBlock") @@ -355,9 +359,6 @@ var _ NotificationsMessage = &ConsensusMessage{} // ConsensusMessage is mostly opaque to us type ConsensusMessage struct { - // Identifies consensus engine. - ConsensusEngineID types.ConsensusEngineID - // Message payload. Data []byte } @@ -373,23 +374,17 @@ func (cm *ConsensusMessage) Type() byte { // String is the string func (cm *ConsensusMessage) String() string { - return fmt.Sprintf("ConsensusMessage ConsensusEngineID=%d, DATA=%x", cm.ConsensusEngineID, cm.Data) + return fmt.Sprintf("ConsensusMessage Data=%x", cm.Data) } // Encode encodes a block response message using SCALE func (cm *ConsensusMessage) Encode() ([]byte, error) { - encMsg := cm.ConsensusEngineID.ToBytes() - return append(encMsg, cm.Data...), nil + return cm.Data, nil } // Decode the message into a ConsensusMessage func (cm *ConsensusMessage) Decode(in []byte) error { - if len(in) < 5 { - return errors.New("cannot decode ConsensusMessage: encoding is too short") - } - - cm.ConsensusEngineID = types.NewConsensusEngineID(in[:4]) - cm.Data = in[4:] + cm.Data = in return nil } diff --git a/dot/network/message_test.go b/dot/network/message_test.go index 3fa96e5eb2..ba02958cce 100644 --- a/dot/network/message_test.go +++ b/dot/network/message_test.go @@ -340,12 +340,8 @@ func TestDecodeTransactionMessageTwoExtrinsics(t *testing.T) { } func TestDecodeConsensusMessage(t *testing.T) { - ConsensusEngineID := types.BabeEngineID - - testID := hex.EncodeToString(types.BabeEngineID.ToBytes()) testData := "03100405" - - msg := "0x" + testID + testData // 0x4241424503100405 + msg := "0x" + testData encMsg, err := common.HexToBytes(msg) require.Nil(t, err) @@ -358,8 +354,7 @@ func TestDecodeConsensusMessage(t *testing.T) { require.Nil(t, err) expected := &ConsensusMessage{ - ConsensusEngineID: ConsensusEngineID, - Data: out, + Data: out, } require.Equal(t, expected, m) diff --git a/dot/network/notifications.go b/dot/network/notifications.go index 9d36695379..47792d22a7 100644 --- a/dot/network/notifications.go +++ b/dot/network/notifications.go @@ -18,8 +18,8 @@ package network import ( "errors" - "math/rand" "sync" + "unsafe" libp2pnetwork "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" @@ -28,6 +28,8 @@ import ( var errCannotValidateHandshake = errors.New("failed to validate handshake") +var maxHandshakeSize = unsafe.Sizeof(BlockAnnounceHandshake{}) //nolint + // Handshake is the interface all handshakes for notifications protocols must implement type Handshake interface { NotificationsMessage @@ -48,40 +50,58 @@ type ( MessageDecoder = func([]byte) (NotificationsMessage, error) // NotificationsMessageHandler is called when a (non-handshake) message is received over a notifications stream. - NotificationsMessageHandler = func(peer peer.ID, msg NotificationsMessage) error + NotificationsMessageHandler = func(peer peer.ID, msg NotificationsMessage) (propagate bool, err error) ) type notificationsProtocol struct { - protocolID protocol.ID - getHandshake HandshakeGetter - handshakeData *sync.Map //map[peer.ID]*handshakeData - mapMu sync.RWMutex + protocolID protocol.ID + getHandshake HandshakeGetter + handshakeValidator HandshakeValidator + + inboundHandshakeData *sync.Map //map[peer.ID]*handshakeData + outboundHandshakeData *sync.Map //map[peer.ID]*handshakeData } -func (n *notificationsProtocol) getHandshakeData(pid peer.ID) (*handshakeData, bool) { - data, has := n.handshakeData.Load(pid) +func (n *notificationsProtocol) getHandshakeData(pid peer.ID, inbound bool) (handshakeData, bool) { + if inbound { + data, has := n.inboundHandshakeData.Load(pid) + if !has { + return handshakeData{}, false + } + + return data.(handshakeData), true + } + + data, has := n.outboundHandshakeData.Load(pid) if !has { - return nil, false + return handshakeData{}, false } - return data.(*handshakeData), true + return data.(handshakeData), true } type handshakeData struct { - received bool - validated bool - handshake Handshake - outboundMsg NotificationsMessage + received bool + validated bool + handshake Handshake + stream libp2pnetwork.Stream + *sync.Mutex +} + +func newHandshakeData(received, validated bool, stream libp2pnetwork.Stream) handshakeData { + return handshakeData{ + received: received, + validated: validated, + stream: stream, + Mutex: new(sync.Mutex), + } } func createDecoder(info *notificationsProtocol, handshakeDecoder HandshakeDecoder, messageDecoder MessageDecoder) messageDecoder { - return func(in []byte, peer peer.ID) (Message, error) { + return func(in []byte, peer peer.ID, inbound bool) (Message, error) { // if we don't have handshake data on this peer, or we haven't received the handshake from them already, // assume we are receiving the handshake - info.mapMu.RLock() - defer info.mapMu.RUnlock() - - if hsData, has := info.getHandshakeData(peer); !has || !hsData.received { + if hsData, has := info.getHandshakeData(peer, inbound); !has || !hsData.received { return handshakeDecoder(in) } @@ -90,9 +110,9 @@ func createDecoder(info *notificationsProtocol, handshakeDecoder HandshakeDecode } } -func (s *Service) createNotificationsMessageHandler(info *notificationsProtocol, handshakeValidator HandshakeValidator, messageHandler NotificationsMessageHandler) messageHandler { +func (s *Service) createNotificationsMessageHandler(info *notificationsProtocol, messageHandler NotificationsMessageHandler) messageHandler { return func(stream libp2pnetwork.Stream, m Message) error { - if m == nil || info == nil || handshakeValidator == nil || messageHandler == nil { + if m == nil || info == nil || info.handshakeValidator == nil || messageHandler == nil { return nil } @@ -117,69 +137,38 @@ func (s *Service) createNotificationsMessageHandler(info *notificationsProtocol, return errors.New("failed to convert message to Handshake") } - info.mapMu.Lock() - defer info.mapMu.Unlock() - // if we are the receiver and haven't received the handshake already, validate it - if _, has := info.getHandshakeData(peer); !has { + // note: if this function is being called, it's being called via SetStreamHandler, + // ie it is an inbound stream and we only send the handshake over it. + // we do not send any other data over this stream, we would need to open a new outbound stream. + if _, has := info.getHandshakeData(peer, true); !has { logger.Trace("receiver: validating handshake", "protocol", info.protocolID) - info.handshakeData.Store(peer, &handshakeData{ - validated: false, - received: true, - }) - err := handshakeValidator(peer, hs) + hsData := newHandshakeData(true, false, stream) + info.inboundHandshakeData.Store(peer, hsData) + + err := info.handshakeValidator(peer, hs) if err != nil { logger.Trace("failed to validate handshake", "protocol", info.protocolID, "peer", peer, "error", err) - _ = stream.Conn().Close() return errCannotValidateHandshake } - data, _ := info.getHandshakeData(peer) - data.validated = true + hsData.validated = true + info.inboundHandshakeData.Store(peer, hsData) // once validated, send back a handshake resp, err := info.getHandshake() if err != nil { - logger.Debug("failed to get handshake", "protocol", info.protocolID, "error", err) + logger.Warn("failed to get handshake", "protocol", info.protocolID, "error", err) return err } - err = s.host.send(peer, info.protocolID, resp) + err = s.host.writeToStream(stream, resp) if err != nil { logger.Trace("failed to send handshake", "protocol", info.protocolID, "peer", peer, "error", err) - _ = stream.Conn().Close() return err } logger.Trace("receiver: sent handshake", "protocol", info.protocolID, "peer", peer) - } - - // if we are the initiator and haven't received the handshake already, validate it - if hsData, has := info.getHandshakeData(peer); has && !hsData.validated { - logger.Trace("sender: validating handshake") - err := handshakeValidator(peer, hs) - if err != nil { - logger.Trace("failed to validate handshake", "protocol", info.protocolID, "peer", peer, "error", err) - hsData.validated = false - _ = stream.Conn().Close() - return errCannotValidateHandshake - } - - hsData.validated = true - hsData.received = true - logger.Trace("sender: validated handshake", "protocol", info.protocolID, "peer", peer) - } else if hsData.received { - return nil - } - - // if we are the initiator, send the message - if hsData, has := info.getHandshakeData(peer); has && hsData.validated && hsData.received && hsData.outboundMsg != nil { - logger.Trace("sender: sending message", "protocol", info.protocolID) - err := s.host.send(peer, info.protocolID, hsData.outboundMsg) - if err != nil { - logger.Debug("failed to send message", "protocol", info.protocolID, "peer", peer, "error", err) - return err - } return nil } @@ -191,25 +180,99 @@ func (s *Service) createNotificationsMessageHandler(info *notificationsProtocol, "peer", stream.Conn().RemotePeer(), ) - err := messageHandler(peer, msg) + propagate, err := messageHandler(peer, msg) if err != nil { return err } - // TODO: improve this by keeping track of who you've received/sent messages from - if !s.noGossip { - seen := s.gossip.hasSeen(msg) - if !seen { - s.broadcastExcluding(info, peer, msg) - } + if !propagate || s.noGossip { + return nil + } + + seen := s.gossip.hasSeen(msg) + if !seen { + s.broadcastExcluding(info, peer, msg) } return nil } } -// gossipExcluding sends a message to each connected peer except the given peer -// Used for notifications sub-protocols to gossip a message +func (s *Service) sendData(peer peer.ID, hs Handshake, info *notificationsProtocol, msg NotificationsMessage) { + hsData, has := info.getHandshakeData(peer, false) + if has && !hsData.validated { + // peer has sent us an invalid handshake in the past, ignore + return + } + + if !has || !hsData.received || hsData.stream == nil { + if !has { + hsData = newHandshakeData(false, false, nil) + } + + hsData.Lock() + defer hsData.Unlock() + + logger.Trace("sending outbound handshake", "protocol", info.protocolID, "peer", peer, "message", hs) + stream, err := s.host.send(peer, info.protocolID, hs) + if err != nil { + logger.Trace("failed to send message to peer", "peer", peer, "error", err) + return + } + + hsData.stream = stream + info.outboundHandshakeData.Store(peer, hsData) + + if info.handshakeValidator == nil { + return + } + + hs, err := readHandshake(stream, decodeBlockAnnounceHandshake) + if err != nil { + logger.Trace("failed to read handshake", "protocol", info.protocolID, "peer", peer, "error", err) + _ = stream.Close() + return + } + + hsData.received = true + + err = info.handshakeValidator(peer, hs) + if err != nil { + logger.Trace("failed to validate handshake", "protocol", info.protocolID, "peer", peer, "error", err) + hsData.validated = false + info.outboundHandshakeData.Store(peer, hsData) + return + } + + hsData.validated = true + info.outboundHandshakeData.Store(peer, hsData) + logger.Trace("sender: validated handshake", "protocol", info.protocolID, "peer", peer) + } + + if s.host.messageCache != nil { + added, err := s.host.messageCache.put(peer, msg) + if err != nil { + logger.Error("failed to add message to cache", "peer", peer, "error", err) + return + } + + if !added { + return + } + } + + // we've completed the handshake with the peer, send message directly + logger.Trace("sending message", "protocol", info.protocolID, "peer", peer, "message", msg) + + err := s.host.writeToStream(hsData.stream, msg) + if err != nil { + logger.Trace("failed to send message to peer", "peer", peer, "error", err) + } +} + +// broadcastExcluding sends a message to each connected peer except the given peer, +// and peers that have previously sent us the message or who we have already sent the message to. +// used for notifications sub-protocols to gossip a message func (s *Service) broadcastExcluding(info *notificationsProtocol, excluding peer.ID, msg NotificationsMessage) { logger.Trace( "broadcasting message from notifications sub-protocol", @@ -223,45 +286,26 @@ func (s *Service) broadcastExcluding(info *notificationsProtocol, excluding peer } peers := s.host.peers() - rand.Shuffle(len(peers), func(i, j int) { peers[i], peers[j] = peers[j], peers[i] }) - - info.mapMu.RLock() - defer info.mapMu.RUnlock() - - for _, peer := range peers { // TODO: check if stream is open, if not, open and send handshake + for _, peer := range peers { if peer == excluding { continue } - if hsData, has := info.getHandshakeData(peer); !has || !hsData.received { - info.handshakeData.Store(peer, &handshakeData{ - validated: false, - outboundMsg: msg, - }) - - logger.Trace("sending handshake", "protocol", info.protocolID, "peer", peer, "message", hs) - err = s.host.send(peer, info.protocolID, hs) - } else { - if s.host.messageCache != nil { - var added bool - added, err = s.host.messageCache.put(peer, msg) - if err != nil { - logger.Error("failed to add message to cache", "peer", peer, "error", err) - continue - } - - if !added { - continue - } - } + go s.sendData(peer, hs, info, msg) + } +} - // we've already completed the handshake with the peer, send message directly - logger.Trace("sending message", "protocol", info.protocolID, "peer", peer, "message", msg) - err = s.host.send(peer, info.protocolID, msg) - } +func readHandshake(stream libp2pnetwork.Stream, decoder HandshakeDecoder) (Handshake, error) { + msgBytes := make([]byte, maxHandshakeSize) + tot, err := readStream(stream, msgBytes) + if err != nil { + return nil, err + } - if err != nil { - logger.Error("failed to send message to peer", "peer", peer, "error", err) - } + hs, err := decoder(msgBytes[:tot]) + if err != nil { + return nil, err } + + return hs, nil } diff --git a/dot/network/notifications_test.go b/dot/network/notifications_test.go index f0f65f793e..83926edaef 100644 --- a/dot/network/notifications_test.go +++ b/dot/network/notifications_test.go @@ -30,6 +30,10 @@ import ( "github.com/stretchr/testify/require" ) +func TestHandshake_SizeOf(t *testing.T) { + require.Equal(t, uint32(maxHandshakeSize), uint32(72)) +} + func TestCreateDecoder_BlockAnnounce(t *testing.T) { basePath := utils.NewTestBasePath(t, "nodeA") @@ -45,15 +49,17 @@ func TestCreateDecoder_BlockAnnounce(t *testing.T) { // create info and decoder info := ¬ificationsProtocol{ - protocolID: s.host.protocolID + blockAnnounceID, - getHandshake: s.getBlockAnnounceHandshake, - handshakeData: new(sync.Map), + protocolID: s.host.protocolID + blockAnnounceID, + getHandshake: s.getBlockAnnounceHandshake, + handshakeValidator: s.validateBlockAnnounceHandshake, + inboundHandshakeData: new(sync.Map), + outboundHandshakeData: new(sync.Map), } decoder := createDecoder(info, decodeBlockAnnounceHandshake, decodeBlockAnnounceMessage) // haven't received handshake from peer testPeerID := peer.ID("QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ") - info.handshakeData.Store(testPeerID, &handshakeData{ + info.inboundHandshakeData.Store(testPeerID, handshakeData{ received: false, }) @@ -67,7 +73,7 @@ func TestCreateDecoder_BlockAnnounce(t *testing.T) { enc, err := testHandshake.Encode() require.NoError(t, err) - msg, err := decoder(enc, testPeerID) + msg, err := decoder(enc, testPeerID, true) require.NoError(t, err) require.Equal(t, testHandshake, msg) @@ -83,9 +89,10 @@ func TestCreateDecoder_BlockAnnounce(t *testing.T) { require.NoError(t, err) // set handshake data to received - hsData, _ := info.getHandshakeData(testPeerID) + hsData, _ := info.getHandshakeData(testPeerID, true) hsData.received = true - msg, err = decoder(enc, testPeerID) + info.inboundHandshakeData.Store(testPeerID, hsData) + msg, err = decoder(enc, testPeerID, true) require.NoError(t, err) require.Equal(t, testBlockAnnounce, msg) } @@ -132,14 +139,16 @@ func TestCreateNotificationsMessageHandler_BlockAnnounce(t *testing.T) { // create info and handler info := ¬ificationsProtocol{ - protocolID: s.host.protocolID + blockAnnounceID, - getHandshake: s.getBlockAnnounceHandshake, - handshakeData: new(sync.Map), + protocolID: s.host.protocolID + blockAnnounceID, + getHandshake: s.getBlockAnnounceHandshake, + handshakeValidator: s.validateBlockAnnounceHandshake, + inboundHandshakeData: new(sync.Map), + outboundHandshakeData: new(sync.Map), } - handler := s.createNotificationsMessageHandler(info, s.validateBlockAnnounceHandshake, s.handleBlockAnnounceMessage) + handler := s.createNotificationsMessageHandler(info, s.handleBlockAnnounceMessage) // set handshake data to received - info.handshakeData.Store(testPeerID, &handshakeData{ + info.inboundHandshakeData.Store(testPeerID, handshakeData{ received: true, validated: true, }) @@ -164,11 +173,13 @@ func TestCreateNotificationsMessageHandler_BlockAnnounceHandshake(t *testing.T) // create info and handler info := ¬ificationsProtocol{ - protocolID: s.host.protocolID + blockAnnounceID, - getHandshake: s.getBlockAnnounceHandshake, - handshakeData: new(sync.Map), + protocolID: s.host.protocolID + blockAnnounceID, + getHandshake: s.getBlockAnnounceHandshake, + handshakeValidator: s.validateBlockAnnounceHandshake, + inboundHandshakeData: new(sync.Map), + outboundHandshakeData: new(sync.Map), } - handler := s.createNotificationsMessageHandler(info, s.validateBlockAnnounceHandshake, s.handleBlockAnnounceMessage) + handler := s.createNotificationsMessageHandler(info, s.handleBlockAnnounceMessage) configB := &Config{ BasePath: utils.NewTestBasePath(t, "nodeB"), @@ -207,7 +218,7 @@ func TestCreateNotificationsMessageHandler_BlockAnnounceHandshake(t *testing.T) err = handler(stream, testHandshake) require.Equal(t, errCannotValidateHandshake, err) - data, has := info.getHandshakeData(testPeerID) + data, has := info.getHandshakeData(testPeerID, true) require.True(t, has) require.True(t, data.received) require.False(t, data.validated) @@ -220,9 +231,11 @@ func TestCreateNotificationsMessageHandler_BlockAnnounceHandshake(t *testing.T) GenesisHash: s.blockState.GenesisHash(), } + info.inboundHandshakeData.Delete(testPeerID) + err = handler(stream, testHandshake) require.NoError(t, err) - data, has = info.getHandshakeData(testPeerID) + data, has = info.getHandshakeData(testPeerID, true) require.True(t, has) require.True(t, data.received) require.True(t, data.validated) diff --git a/dot/network/service.go b/dot/network/service.go index 6c454d611a..bed9b7203e 100644 --- a/dot/network/service.go +++ b/dot/network/service.go @@ -26,11 +26,11 @@ import ( "time" gssmrmetrics "github.com/ChainSafe/gossamer/dot/metrics" + "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/services" - "github.com/ethereum/go-ethereum/metrics" - log "github.com/ChainSafe/log15" + "github.com/ethereum/go-ethereum/metrics" libp2pnetwork "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/protocol" @@ -57,7 +57,7 @@ var ( type ( // messageDecoder is passed on readStream to decode the data from the stream into a message. // since messages are decoded based on context, this is different for every sub-protocol. - messageDecoder = func([]byte, peer.ID) (Message, error) + messageDecoder = func([]byte, peer.ID, bool) (Message, error) // messageHandler is passed on readStream to handle the resulting message. it should return an error only if the stream is to be closed messageHandler = func(stream libp2pnetwork.Stream, msg Message) error ) @@ -89,6 +89,10 @@ type Service struct { noDiscover bool noMDNS bool noGossip bool // internal option + + // telemetry + telemetryInterval time.Duration + closeCh chan interface{} } // NewService creates a new network service from the configuration and message channels @@ -142,10 +146,11 @@ func NewService(cfg *Config) (*Service, error) { syncer: cfg.Syncer, notificationsProtocols: make(map[byte]*notificationsProtocol), lightRequest: make(map[peer.ID]struct{}), + telemetryInterval: cfg.telemetryInterval, + closeCh: make(chan interface{}), } network.syncQueue = newSyncQueue(network) - return network, err } @@ -246,6 +251,9 @@ func (s *Service) Start() error { } go s.logPeerCount() + go s.publishNetworkTelemetry(s.closeCh) + go s.sentBlockIntervalTelemetry() + return nil } @@ -279,44 +287,53 @@ func (s *Service) logPeerCount() { } } -func (s *Service) handleConn(conn libp2pnetwork.Conn) { - // give new peers a slight weight - s.syncQueue.updatePeerScore(conn.RemotePeer(), 1) +func (s *Service) publishNetworkTelemetry(done chan interface{}) { + ticker := time.NewTicker(s.telemetryInterval) + defer ticker.Stop() - s.notificationsMu.Lock() - defer s.notificationsMu.Unlock() +main: + for { + select { + case <-done: + break main - info, has := s.notificationsProtocols[BlockAnnounceMsgType] - if !has { - // this shouldn't happen - logger.Warn("block announce protocol is not yet registered!") - return - } + case <-ticker.C: + o := s.host.bwc.GetBandwidthTotals() + telemetry.GetInstance().SendNetworkData(telemetry.NewNetworkData(s.host.peerCount(), o.RateIn, o.RateOut)) + } - // open block announce substream - hs, err := info.getHandshake() - if err != nil { - logger.Warn("failed to get handshake", "protocol", blockAnnounceID, "error", err) - return } +} - info.mapMu.RLock() - defer info.mapMu.RUnlock() - - peer := conn.RemotePeer() - if hsData, has := info.getHandshakeData(peer); !has || !hsData.received { //nolint - info.handshakeData.Store(peer, &handshakeData{ - validated: false, - }) - - logger.Trace("sending handshake", "protocol", info.protocolID, "peer", peer, "message", hs) - err = s.host.send(peer, info.protocolID, hs) +func (s *Service) sentBlockIntervalTelemetry() { + for { + best, err := s.blockState.BestBlockHeader() if err != nil { - logger.Trace("failed to send block announce handshake to peer", "peer", peer, "error", err) + continue } + finalized, err := s.blockState.GetFinalizedHeader(0, 0) //nolint + if err != nil { + continue + } + + telemetry.GetInstance().SendBlockIntervalData(&telemetry.BlockIntervalData{ + BestHash: best.Hash(), + BestHeight: best.Number, + FinalizedHash: finalized.Hash(), + FinalizedHeight: finalized.Number, + TXCount: 0, // todo (ed) determine where to get tx count + UsedStateCacheSize: 0, // todo (ed) determine where to get used_state_cache_size + }) + time.Sleep(s.telemetryInterval) } } +func (s *Service) handleConn(conn libp2pnetwork.Conn) { + // give new peers a slight weight + // TODO: do this once handshake is received + s.syncQueue.updatePeerScore(conn.RemotePeer(), 1) +} + func (s *Service) beginDiscovery() error { rd := discovery.NewRoutingDiscovery(s.host.dht) @@ -376,6 +393,19 @@ func (s *Service) Stop() error { logger.Error("Failed to close host", "error", err) } + // check if closeCh is closed, if not, close it. +mainloop: + for { + select { + case _, hasMore := <-s.closeCh: + if !hasMore { + break mainloop + } + default: + close(s.closeCh) + } + } + return nil } @@ -405,31 +435,39 @@ func (s *Service) RegisterNotificationsProtocol(sub protocol.ID, } np := ¬ificationsProtocol{ - protocolID: protocolID, - getHandshake: handshakeGetter, - handshakeData: new(sync.Map), + protocolID: protocolID, + getHandshake: handshakeGetter, + handshakeValidator: handshakeValidator, + inboundHandshakeData: new(sync.Map), + outboundHandshakeData: new(sync.Map), } s.notificationsProtocols[messageID] = np connMgr := s.host.h.ConnManager().(*ConnManager) connMgr.registerCloseHandler(protocolID, func(peerID peer.ID) { - np.mapMu.Lock() - defer np.mapMu.Unlock() + if _, ok := np.getHandshakeData(peerID, true); ok { + logger.Trace( + "Cleaning up inbound handshake data", + "peer", peerID, + "protocol", protocolID, + ) + np.inboundHandshakeData.Delete(peerID) + } - if _, ok := np.getHandshakeData(peerID); ok { + if _, ok := np.getHandshakeData(peerID, false); ok { logger.Trace( - "Cleaning up handshake data", + "Cleaning up outbound handshake data", "peer", peerID, "protocol", protocolID, ) - np.handshakeData.Delete(peerID) + np.outboundHandshakeData.Delete(peerID) } }) info := s.notificationsProtocols[messageID] decoder := createDecoder(info, handshakeDecoder, messageDecoder) - handlerWithValidate := s.createNotificationsMessageHandler(info, handshakeValidator, messageHandler) + handlerWithValidate := s.createNotificationsMessageHandler(info, messageHandler) s.host.registerStreamHandlerWithOverwrite(sub, overwriteProtocol, func(stream libp2pnetwork.Stream) { logger.Trace("received stream", "sub-protocol", sub) @@ -439,8 +477,7 @@ func (s *Service) RegisterNotificationsProtocol(sub protocol.ID, return } - p := conn.RemotePeer() - s.readStream(stream, p, decoder, handlerWithValidate) + s.readStream(stream, decoder, handlerWithValidate) }) logger.Info("registered notifications sub-protocol", "protocol", protocolID) @@ -488,18 +525,10 @@ func (s *Service) SendMessage(msg NotificationsMessage) { // handleLightStream handles streams with the /light/2 protocol ID func (s *Service) handleLightStream(stream libp2pnetwork.Stream) { - conn := stream.Conn() - if conn == nil { - logger.Error("Failed to get connection from stream") - _ = stream.Close() - return - } - - peer := conn.RemotePeer() - s.readStream(stream, peer, s.decodeLightMessage, s.handleLightMsg) + s.readStream(stream, s.decodeLightMessage, s.handleLightMsg) } -func (s *Service) decodeLightMessage(in []byte, peer peer.ID) (Message, error) { +func (s *Service) decodeLightMessage(in []byte, peer peer.ID, _ bool) (Message, error) { s.lightRequestMu.RLock() defer s.lightRequestMu.RUnlock() @@ -517,10 +546,15 @@ func (s *Service) decodeLightMessage(in []byte, peer peer.ID) (Message, error) { return msg, err } -func (s *Service) readStream(stream libp2pnetwork.Stream, peer peer.ID, decoder messageDecoder, handler messageHandler) { +func isInbound(stream libp2pnetwork.Stream) bool { + return stream.Stat().Direction == libp2pnetwork.DirInbound +} + +func (s *Service) readStream(stream libp2pnetwork.Stream, decoder messageDecoder, handler messageHandler) { var ( maxMessageSize uint64 = maxBlockResponseSize // TODO: determine actual max message size msgBytes = make([]byte, maxMessageSize) + peer = stream.Conn().RemotePeer() ) for { @@ -528,34 +562,33 @@ func (s *Service) readStream(stream libp2pnetwork.Stream, peer peer.ID, decoder if err == io.EOF { continue } else if err != nil { - logger.Trace("failed to read from stream", "protocol", stream.Protocol(), "error", err) + logger.Trace("failed to read from stream", "peer", stream.Conn().RemotePeer(), "protocol", stream.Protocol(), "error", err) _ = stream.Close() return } // decode message based on message type - msg, err := decoder(msgBytes[:tot], peer) + msg, err := decoder(msgBytes[:tot], peer, isInbound(stream)) if err != nil { logger.Trace("failed to decode message from peer", "protocol", stream.Protocol(), "err", err) continue } logger.Trace( - "Received message from peer", + "received message from peer", "host", s.host.id(), "peer", peer, "msg", msg.String(), ) - go func() { - // handle message based on peer status and message type - err = handler(stream, msg) - if err != nil { - logger.Trace("Failed to handle message from stream", "message", msg, "error", err) - _ = stream.Close() - return - } - }() + err = handler(stream, msg) + if err != nil { + logger.Debug("failed to handle message from stream", "message", msg, "error", err) + _ = stream.Close() + return + } + + s.host.bwc.LogRecvMessage(int64(tot)) } } @@ -621,14 +654,14 @@ func (s *Service) NetworkState() common.NetworkState { // Peers returns information about connected peers needed for the rpc server func (s *Service) Peers() []common.PeerInfo { - peers := []common.PeerInfo{} + var peers []common.PeerInfo s.notificationsMu.RLock() np := s.notificationsProtocols[BlockAnnounceMsgType] s.notificationsMu.RUnlock() for _, p := range s.host.peers() { - data, has := np.getHandshakeData(p) + data, has := np.getHandshakeData(p, true) if !has || data.handshake == nil { peers = append(peers, common.PeerInfo{ PeerID: p.String(), diff --git a/dot/network/service_test.go b/dot/network/service_test.go index a63b446398..20a23e1a2a 100644 --- a/dot/network/service_test.go +++ b/dot/network/service_test.go @@ -17,6 +17,7 @@ package network import ( + "context" "fmt" "os" "strings" @@ -196,12 +197,6 @@ func TestBroadcastDuplicateMessage(t *testing.T) { addrInfosB, err := nodeB.host.addrInfos() require.NoError(t, err) - protocol := nodeA.notificationsProtocols[BlockAnnounceMsgType] - protocol.handshakeData.Store(nodeB.host.id(), &handshakeData{ - received: true, - validated: true, - }) - err = nodeA.host.connect(*addrInfosB[0]) // retry connect if "failed to dial" error if failedToDial(err) { @@ -210,9 +205,21 @@ func TestBroadcastDuplicateMessage(t *testing.T) { } require.NoError(t, err) + stream, err := nodeA.host.h.NewStream(context.Background(), nodeB.host.id(), nodeB.host.protocolID+blockAnnounceID) + require.NoError(t, err) + require.NotNil(t, stream) + + protocol := nodeA.notificationsProtocols[BlockAnnounceMsgType] + protocol.outboundHandshakeData.Store(nodeB.host.id(), handshakeData{ + received: true, + validated: true, + stream: stream, + }) + // Only one message will be sent. for i := 0; i < 5; i++ { nodeA.SendMessage(testBlockAnnounceMessage) + time.Sleep(time.Millisecond * 10) } time.Sleep(time.Millisecond * 200) @@ -223,10 +230,8 @@ func TestBroadcastDuplicateMessage(t *testing.T) { // All 5 message will be sent since cache is disabled. for i := 0; i < 5; i++ { nodeA.SendMessage(testBlockAnnounceMessage) - require.NoError(t, err) + time.Sleep(time.Millisecond * 10) } - - time.Sleep(time.Millisecond * 200) require.Equal(t, 6, len(handler.messages[nodeA.host.id()])) } @@ -390,7 +395,7 @@ func TestPersistPeerStore(t *testing.T) { require.NotEmpty(t, nodeA.host.h.Peerstore().PeerInfo(nodeB.host.id()).Addrs) - // Stop a node and reinitialize a new node with same base path. + // Stop a node and reinitialise a new node with same base path. err = nodeA.Stop() require.NoError(t, err) @@ -438,16 +443,4 @@ func TestHandleConn(t *testing.T) { aScore, ok := nodeB.syncQueue.peerScore.Load(nodeA.host.id()) require.True(t, ok) require.Equal(t, 1, aScore) - - infoA := nodeA.notificationsProtocols[BlockAnnounceMsgType] - hsDataB, has := infoA.getHandshakeData(nodeB.host.id()) - require.True(t, has) - require.True(t, hsDataB.received) - require.True(t, hsDataB.validated) - - infoB := nodeB.notificationsProtocols[BlockAnnounceMsgType] - hsDataA, has := infoB.getHandshakeData(nodeA.host.id()) - require.True(t, has) - require.True(t, hsDataA.received) - require.True(t, hsDataA.validated) } diff --git a/dot/network/state.go b/dot/network/state.go index 72a4831e35..70b948d11f 100644 --- a/dot/network/state.go +++ b/dot/network/state.go @@ -38,6 +38,8 @@ type Syncer interface { // CreateBlockResponse is called upon receipt of a BlockRequestMessage to create the response CreateBlockResponse(*BlockRequestMessage) (*BlockResponseMessage, error) + ProcessJustification(data []*types.BlockData) (int, error) + // ProcessBlockData is called to process BlockData received in a BlockResponseMessage ProcessBlockData(data []*types.BlockData) (int, error) diff --git a/dot/network/sync.go b/dot/network/sync.go index 5eb3b87d7d..350843e061 100644 --- a/dot/network/sync.go +++ b/dot/network/sync.go @@ -18,6 +18,7 @@ package network import ( "context" + "errors" "fmt" "reflect" "sort" @@ -25,10 +26,12 @@ import ( "time" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/blocktree" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/common/optional" "github.com/ChainSafe/gossamer/lib/common/variadic" + "github.com/ChainSafe/chaindb" libp2pnetwork "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" ) @@ -39,24 +42,16 @@ func (s *Service) handleSyncStream(stream libp2pnetwork.Stream) { return } - conn := stream.Conn() - if conn == nil { - logger.Error("Failed to get connection from stream") - _ = stream.Close() - return - } - - peer := conn.RemotePeer() - s.readStream(stream, peer, s.decodeSyncMessage, s.handleSyncMessage) + s.readStream(stream, s.decodeSyncMessage, s.handleSyncMessage) } -func (s *Service) decodeSyncMessage(in []byte, peer peer.ID) (Message, error) { +func (s *Service) decodeSyncMessage(in []byte, peer peer.ID, inbound bool) (Message, error) { msg := new(BlockRequestMessage) err := msg.Decode(in) return msg, err } -// handleSyncMessage handles synchronization message types (BlockRequest and BlockResponse) +// handleSyncMessage handles synchronisation message types (BlockRequest and BlockResponse) func (s *Service) handleSyncMessage(stream libp2pnetwork.Stream, msg Message) error { if msg == nil { _ = stream.Close() @@ -161,7 +156,6 @@ func newSyncQueue(s *Service) *syncQueue { func (q *syncQueue) start() { go q.handleResponseQueue() go q.syncAtHead() - go q.finalizeAtHead() go q.processBlockRequests() go q.processBlockResponses() @@ -178,11 +172,12 @@ func (q *syncQueue) syncAtHead() { } q.s.syncer.SetSyncing(true) + q.s.noGossip = true // don't gossip messages until we're at the head for { select { // sleep for average block time TODO: make this configurable from slot duration - case <-time.After(q.slotDuration): + case <-time.After(q.slotDuration * 2): case <-q.ctx.Done(): return } @@ -195,10 +190,12 @@ func (q *syncQueue) syncAtHead() { // we aren't at the head yet, sleep if curr.Number.Int64() < q.goal && curr.Number.Cmp(prev.Number) > 0 { prev = curr + q.s.noGossip = true continue } q.s.syncer.SetSyncing(false) + q.s.noGossip = false // we have received new blocks since the last check, sleep if prev.Number.Int64() < curr.Number.Int64() { @@ -318,12 +315,12 @@ func (q *syncQueue) benchmark() { } if before.Number.Int64() >= q.goal { - finalized, err := q.s.blockState.GetFinalizedHeader(0, 0) //nolint + finalised, err := q.s.blockState.GetFinalizedHeader(0, 0) //nolint if err != nil { continue } - logger.Info("💤 node waiting", "head", before.Number, "finalized", finalized.Number) + logger.Info("💤 node waiting", "head", before.Number, "finalised", finalised.Number) time.Sleep(time.Second * 5) continue } @@ -472,6 +469,7 @@ func (q *syncQueue) pushResponse(resp *BlockResponseMessage, pid peer.ID) error } if numJustifications == 0 { + logger.Debug("got empty justification data", "start hash", startHash) return errEmptyJustificationData } @@ -482,7 +480,7 @@ func (q *syncQueue) pushResponse(resp *BlockResponseMessage, pid peer.ID) error from: pid, }) - logger.Info("pushed justification data to queue", "hash", startHash) + logger.Debug("pushed justification data to queue", "hash", startHash) q.responseCh <- justificationResponses return nil } @@ -570,7 +568,7 @@ func (q *syncQueue) trySync(req *syncRequest) { q.updatePeerScore(req.to, -1) } - logger.Trace("trying peers in prioritized order...") + logger.Trace("trying peers in prioritised order...") syncPeers := q.getSortedPeers() for _, peer := range syncPeers { @@ -666,7 +664,7 @@ func (q *syncQueue) handleBlockJustification(data []*types.BlockData) { startHash, endHash := data[0].Hash, data[len(data)-1].Hash logger.Debug("sending justification data to syncer", "start", startHash, "end", endHash) - _, err := q.s.syncer.ProcessBlockData(data) + _, err := q.s.syncer.ProcessJustification(data) if err != nil { logger.Warn("failed to handle block justifications", "error", err) return @@ -689,14 +687,14 @@ func (q *syncQueue) handleBlockJustification(data []*types.BlockData) { } func (q *syncQueue) handleBlockData(data []*types.BlockData) { - bestNum, err := q.s.blockState.BestBlockNumber() + finalised, err := q.s.blockState.GetFinalizedHeader(0, 0) if err != nil { - panic(err) // TODO: don't panic but try again. seems blockState needs better concurrency handling + panic(err) // this should never happen } end := data[len(data)-1].Number().Int64() - if end <= bestNum.Int64() { - logger.Debug("ignoring block data that is below our head", "got", end, "head", bestNum.Int64()) + if end <= finalised.Number.Int64() { + logger.Debug("ignoring block data that is below our head", "got", end, "head", finalised.Number.Int64()) q.pushRequest(uint64(end+1), blockRequestBufferSize, "") return } @@ -736,13 +734,23 @@ func (q *syncQueue) handleBlockData(data []*types.BlockData) { func (q *syncQueue) handleBlockDataFailure(idx int, err error, data []*types.BlockData) { logger.Warn("failed to handle block data", "failed on block", q.currStart+int64(idx), "error", err) - if err.Error() == "failed to get parent hash: Key not found" { // TODO: unwrap err + if errors.Is(err, chaindb.ErrKeyNotFound) || errors.Is(err, blocktree.ErrParentNotFound) { + finalised, err := q.s.blockState.GetFinalizedHeader(0, 0) + if err != nil { + panic(err) + } + header, err := types.NewHeaderFromOptional(data[idx].Header) if err != nil { logger.Debug("failed to get header from BlockData", "idx", idx, "error", err) return } + // don't request a chain that's been dropped + if header.Number.Int64() <= finalised.Number.Int64() { + return + } + parentHash := header.ParentHash req := createBlockRequestWithHash(parentHash, 0) @@ -780,6 +788,7 @@ func (q *syncQueue) handleBlockAnnounceHandshake(blockNum uint32, from peer.ID) func (q *syncQueue) handleBlockAnnounce(msg *BlockAnnounceMessage, from peer.ID) { q.updatePeerScore(from, 1) + logger.Debug("received BlockAnnounce", "number", msg.Number, "from", from) header, err := types.NewHeader(msg.ParentHash, msg.StateRoot, msg.ExtrinsicsRoot, msg.Number, msg.Digest) if err != nil { @@ -787,7 +796,6 @@ func (q *syncQueue) handleBlockAnnounce(msg *BlockAnnounceMessage, from peer.ID) return } - logger.Debug("received BlockAnnounce!", "number", msg.Number, "hash", header.Hash(), "from", from) has, _ := q.s.blockState.HasBlockBody(header.Hash()) if has { return @@ -797,13 +805,16 @@ func (q *syncQueue) handleBlockAnnounce(msg *BlockAnnounceMessage, from peer.ID) return } + q.goal = header.Number.Int64() + bestNum, err := q.s.blockState.BestBlockNumber() if err != nil { logger.Error("failed to get best block number", "error", err) return } - q.goal = header.Number.Int64() + // TODO: if we're at the head, this should request by hash instead of number, since there will + // certainly be blocks with the same number. q.pushRequest(uint64(bestNum.Int64()+1), blockRequestBufferSize, from) } diff --git a/dot/network/sync_justification.go b/dot/network/sync_justification.go index e0897d0d10..2015afb576 100644 --- a/dot/network/sync_justification.go +++ b/dot/network/sync_justification.go @@ -18,62 +18,19 @@ package network import ( "math/big" - "time" -) - -func (q *syncQueue) finalizeAtHead() { - prev, err := q.s.blockState.GetFinalizedHeader(0, 0) - if err != nil { - logger.Error("failed to get latest finalized block header", "error", err) - return - } - - for { - select { - // sleep for average block time TODO: make this configurable from slot duration - case <-time.After(q.slotDuration * 2): - case <-q.ctx.Done(): - return - } - - head, err := q.s.blockState.BestBlockNumber() - if err != nil { - continue - } - if head.Int64() < q.goal { - continue - } - - curr, err := q.s.blockState.GetFinalizedHeader(0, 0) - if err != nil { - continue - } - - logger.Debug("checking finalized blocks", "curr", curr.Number, "prev", prev.Number) - - if curr.Number.Cmp(prev.Number) > 0 { - prev = curr - continue - } - - prev = curr - - start := head.Uint64() - uint64(blockRequestSize) - if curr.Number.Uint64() > start { - start = curr.Number.Uint64() + 1 - } else if int(start) < int(blockRequestSize) { - start = 1 - } + "github.com/libp2p/go-libp2p-core/peer" +) - q.pushJustificationRequest(start) - } +// SendJustificationRequest pushes a justification request to the queue to be sent out to the network +func (s *Service) SendJustificationRequest(to peer.ID, num uint32) { + s.syncQueue.pushJustificationRequest(to, uint64(num)) } -func (q *syncQueue) pushJustificationRequest(start uint64) { +func (q *syncQueue) pushJustificationRequest(to peer.ID, start uint64) { startHash, err := q.s.blockState.GetHashByNumber(big.NewInt(int64(start))) if err != nil { - logger.Error("failed to get hash for block w/ number", "number", start, "error", err) + logger.Debug("failed to get hash for block w/ number", "number", start, "error", err) return } @@ -87,6 +44,6 @@ func (q *syncQueue) pushJustificationRequest(start uint64) { q.requestCh <- &syncRequest{ req: req, - to: "", + to: to, } } diff --git a/dot/network/sync_justification_test.go b/dot/network/sync_justification_test.go index 00d854471a..bcb791d9bc 100644 --- a/dot/network/sync_justification_test.go +++ b/dot/network/sync_justification_test.go @@ -18,7 +18,6 @@ package network import ( "context" - "math/big" "testing" "time" @@ -135,34 +134,3 @@ func TestSyncQueue_processBlockResponses_Justification(t *testing.T) { require.True(t, ok) require.Equal(t, 2, score) } - -func TestSyncQueue_finalizeAtHead(t *testing.T) { - q := newTestSyncQueue(t) - q.stop() - time.Sleep(time.Second) - q.ctx = context.Background() - q.slotDuration = time.Millisecond * 200 - - hash, err := q.s.blockState.GetHashByNumber(big.NewInt(1)) - require.NoError(t, err) - - go q.finalizeAtHead() - time.Sleep(time.Second) - - data, has := q.justificationRequestData.Load(hash) - require.True(t, has) - require.Equal(t, requestData{}, data) - - expected := createBlockRequestWithHash(hash, blockRequestSize) - expected.RequestedData = RequestedDataJustification - - select { - case req := <-q.requestCh: - require.Equal(t, &syncRequest{ - req: expected, - to: "", - }, req) - case <-time.After(time.Second): - t.Fatal("did not receive request") - } -} diff --git a/dot/network/sync_test.go b/dot/network/sync_test.go index c65d700952..812ec04ca0 100644 --- a/dot/network/sync_test.go +++ b/dot/network/sync_test.go @@ -28,6 +28,7 @@ import ( "github.com/ChainSafe/gossamer/lib/common/optional" "github.com/ChainSafe/gossamer/lib/utils" + "github.com/ChainSafe/chaindb" "github.com/libp2p/go-libp2p-core/peer" "github.com/stretchr/testify/require" ) @@ -67,7 +68,7 @@ func TestDecodeSyncMessage(t *testing.T) { reqEnc, err := testBlockRequestMessage.Encode() require.NoError(t, err) - msg, err := s.decodeSyncMessage(reqEnc, testPeer) + msg, err := s.decodeSyncMessage(reqEnc, testPeer, true) require.NoError(t, err) req, ok := msg.(*BlockRequestMessage) @@ -425,9 +426,10 @@ func TestSyncQueue_SyncAtHead(t *testing.T) { q.stop() time.Sleep(time.Second) q.ctx = context.Background() + q.slotDuration = time.Millisecond * 100 go q.syncAtHead() - time.Sleep(time.Millisecond * 6100) + time.Sleep(q.slotDuration * 3) select { case req := <-q.requestCh: require.Equal(t, uint64(2), req.req.StartingBlock.Uint64()) @@ -500,7 +502,7 @@ func TestSyncQueue_handleBlockDataFailure_MissingParent(t *testing.T) { q.ctx = context.Background() data := testBlockResponseMessage().BlockData - q.handleBlockDataFailure(0, fmt.Errorf("failed to get parent hash: Key not found"), data) + q.handleBlockDataFailure(0, fmt.Errorf("some error: %w", chaindb.ErrKeyNotFound), data) select { case req := <-q.requestCh: require.True(t, req.req.StartingBlock.IsHash()) diff --git a/dot/network/test_helpers.go b/dot/network/test_helpers.go index 7659867268..65d1855997 100644 --- a/dot/network/test_helpers.go +++ b/dot/network/test_helpers.go @@ -61,6 +61,10 @@ func (s *mockSyncer) ProcessBlockData(data []*types.BlockData) (int, error) { return 0, nil } +func (s *mockSyncer) ProcessJustification(data []*types.BlockData) (int, error) { + return 0, nil +} + func (s *mockSyncer) IsSynced() bool { return s.synced } @@ -95,7 +99,21 @@ func (s *testStreamHandler) handleStream(stream libp2pnetwork.Stream) { func (s *testStreamHandler) handleMessage(stream libp2pnetwork.Stream, msg Message) error { msgs := s.messages[stream.Conn().RemotePeer()] s.messages[stream.Conn().RemotePeer()] = append(msgs, msg) - return nil + return s.writeToStream(stream, testBlockAnnounceHandshake) +} + +func (s *testStreamHandler) writeToStream(stream libp2pnetwork.Stream, msg Message) error { + encMsg, err := msg.Encode() + if err != nil { + return err + } + + msgLen := uint64(len(encMsg)) + lenBytes := uint64ToLEB128(msgLen) + encMsg = append(lenBytes, encMsg...) + + _, err = stream.Write(encMsg) + return err } func (s *testStreamHandler) readStream(stream libp2pnetwork.Stream, peer peer.ID, decoder messageDecoder, handler messageHandler) { @@ -115,7 +133,7 @@ func (s *testStreamHandler) readStream(stream libp2pnetwork.Stream, peer peer.ID } // decode message based on message type - msg, err := decoder(msgBytes[:tot], peer) + msg, err := decoder(msgBytes[:tot], peer, isInbound(stream)) if err != nil { logger.Error("Failed to decode message from peer", "peer", peer, "err", err) continue @@ -141,7 +159,7 @@ var testBlockRequestMessage = &BlockRequestMessage{ Max: optional.NewUint32(true, 1), } -func testBlockRequestMessageDecoder(in []byte, _ peer.ID) (Message, error) { +func testBlockRequestMessageDecoder(in []byte, _ peer.ID, _ bool) (Message, error) { msg := new(BlockRequestMessage) err := msg.Decode(in) return msg, err @@ -151,13 +169,17 @@ var testBlockAnnounceMessage = &BlockAnnounceMessage{ Number: big.NewInt(128 * 7), } -func testBlockAnnounceMessageDecoder(in []byte, _ peer.ID) (Message, error) { +var testBlockAnnounceHandshake = &BlockAnnounceHandshake{ + BestBlockNumber: 0, +} + +func testBlockAnnounceMessageDecoder(in []byte, _ peer.ID, _ bool) (Message, error) { msg := new(BlockAnnounceMessage) err := msg.Decode(in) return msg, err } -func testBlockAnnounceHandshakeDecoder(in []byte, _ peer.ID) (Message, error) { +func testBlockAnnounceHandshakeDecoder(in []byte, _ peer.ID, _ bool) (Message, error) { msg := new(BlockAnnounceHandshake) err := msg.Decode(in) return msg, err diff --git a/dot/network/transaction.go b/dot/network/transaction.go index 5131bc23ef..2a2c13f9a2 100644 --- a/dot/network/transaction.go +++ b/dot/network/transaction.go @@ -156,11 +156,11 @@ func decodeTransactionMessage(in []byte) (NotificationsMessage, error) { return msg, err } -func (s *Service) handleTransactionMessage(_ peer.ID, msg NotificationsMessage) error { +func (s *Service) handleTransactionMessage(_ peer.ID, msg NotificationsMessage) (bool, error) { txMsg, ok := msg.(*TransactionMessage) if !ok { - return errors.New("invalid transaction type") + return false, errors.New("invalid transaction type") } - return s.transactionHandler.HandleTransactionMessage(txMsg) + return true, s.transactionHandler.HandleTransactionMessage(txMsg) } diff --git a/dot/network/utils.go b/dot/network/utils.go index 62e9c53c00..74935e3e47 100644 --- a/dot/network/utils.go +++ b/dot/network/utils.go @@ -136,7 +136,7 @@ func saveKey(priv crypto.PrivKey, fp string) (err error) { } func uint64ToLEB128(in uint64) []byte { - out := []byte{} + var out []byte for { b := uint8(in & 0x7f) in >>= 7 @@ -189,7 +189,7 @@ func readStream(stream libp2pnetwork.Stream, buf []byte) (int, error) { } if length == 0 { - return 0, err // TODO: return bytes read from readLEB128ToUint64 + return 0, nil // msg length of 0 is allowed, for example transactions handshake } // TODO: check if length > len(buf), if so probably log.Crit diff --git a/dot/node.go b/dot/node.go index cc11ef1330..4055c35869 100644 --- a/dot/node.go +++ b/dot/node.go @@ -52,12 +52,12 @@ type Node struct { wg sync.WaitGroup } -// InitNode initializes a new dot node from the provided dot node configuration +// InitNode initialises a new dot node from the provided dot node configuration // and JSON formatted genesis file. func InitNode(cfg *Config) error { setupLogger(cfg) logger.Info( - "🕸️ initializing node...", + "🕸️ initialising node...", "name", cfg.Global.Name, "id", cfg.Global.ID, "basepath", cfg.Global.BasePath, @@ -98,14 +98,19 @@ func InitNode(cfg *Config) error { stateSrvc.BabeThresholdDenominator = cfg.Core.BabeThresholdDenominator } - // initialize state service with genesis data, block, and trie - err = stateSrvc.Initialize(gen, header, t) + // initialise state service with genesis data, block, and trie + err = stateSrvc.Initialise(gen, header, t) if err != nil { - return fmt.Errorf("failed to initialize state service: %s", err) + return fmt.Errorf("failed to initialise state service: %s", err) + } + + err = storeGlobalNodeName(cfg.Global.Name, cfg.Global.BasePath) + if err != nil { + return fmt.Errorf("failed to store global node name: %s", err) } logger.Info( - "node initialized", + "node initialised", "name", cfg.Global.Name, "id", cfg.Global.ID, "basepath", cfg.Global.BasePath, @@ -122,11 +127,12 @@ func InitNode(cfg *Config) error { func NodeInitialized(basepath string, expected bool) bool { // check if key registry exists registry := path.Join(basepath, "KEYREGISTRY") + _, err := os.Stat(registry) if os.IsNotExist(err) { if expected { - logger.Warn( - "node has not been initialized", + logger.Debug( + "node has not been initialised", "basepath", basepath, "error", "failed to locate KEYREGISTRY file in data directory", ) @@ -134,7 +140,7 @@ func NodeInitialized(basepath string, expected bool) bool { return false } - // initialize database using data directory + // initialise database using data directory db, err := chaindb.NewBadgerDB(&chaindb.Config{ DataDir: basepath, }) @@ -147,24 +153,56 @@ func NodeInitialized(basepath string, expected bool) bool { return false } - // load genesis data from initialized node database - _, err = state.LoadGenesisData(db) + defer func() { + // close database + err = db.Close() + if err != nil { + logger.Error("failed to close database", "error", err) + } + }() + + // load genesis data from initialised node database + _, err = state.NewBaseState(db).LoadGenesisData() if err != nil { logger.Warn( - "node has not been initialized", + "node has not been initialised", "basepath", basepath, "error", err, ) return false } - // close database - err = db.Close() + return true +} + +// LoadGlobalNodeName returns the stored global node name from database +func LoadGlobalNodeName(basepath string) (nodename string, err error) { + // initialise database using data directory + db, err := state.SetupDatabase(basepath) if err != nil { - logger.Error("failed to close database", "error", err) + return "", err } - return true + defer func() { + err = db.Close() + if err != nil { + logger.Error("failed to close database", "error", err) + return + } + }() + + basestate := state.NewBaseState(db) + nodename, err = basestate.LoadNodeGlobalName() + if err != nil { + logger.Warn( + "failed to load global node name", + "basepath", basepath, + "error", err, + ) + return "", err + } + + return nodename, err } // NewNode creates a new dot node from a dot node configuration @@ -186,7 +224,7 @@ func NewNode(cfg *Config, ks *keystore.GlobalKeystore, stopFunc func()) (*Node, // Node Services logger.Info( - "🕸️ initializing node services...", + "🕸️ initialising node services...", "name", cfg.Global.Name, "id", cfg.Global.ID, "basepath", cfg.Global.BasePath, @@ -198,6 +236,7 @@ func NewNode(cfg *Config, ks *keystore.GlobalKeystore, stopFunc func()) (*Node, // create state service and append state service to node services stateSrvc, err := createStateService(cfg) + if err != nil { return nil, fmt.Errorf("failed to create state service: %s", err) } @@ -241,25 +280,25 @@ func NewNode(cfg *Config, ks *keystore.GlobalKeystore, stopFunc func()) (*Node, if err != nil { return nil, err } + nodeSrvcs = append(nodeSrvcs, dh) - // Syncer - syncer, err := createSyncService(cfg, stateSrvc, bp, dh, ver, rt) + // create GRANDPA service + fg, err := createGRANDPAService(cfg, rt, stateSrvc, dh, ks.Gran, networkSrvc) if err != nil { return nil, err } + nodeSrvcs = append(nodeSrvcs, fg) - // create GRANDPA service - fg, err := createGRANDPAService(cfg, rt, stateSrvc, dh, ks.Gran, networkSrvc) + // Syncer + syncer, err := createSyncService(cfg, stateSrvc, bp, fg, dh, ver, rt) if err != nil { return nil, err } - nodeSrvcs = append(nodeSrvcs, fg) - dh.SetFinalityGadget(fg) // TODO: this should be cleaned up // Core Service // create core service and append core service to node services - coreSrvc, err := createCoreService(cfg, bp, fg, ver, rt, ks, stateSrvc, networkSrvc) + coreSrvc, err := createCoreService(cfg, bp, ver, rt, ks, stateSrvc, networkSrvc) if err != nil { return nil, fmt.Errorf("failed to create core service: %s", err) } @@ -308,7 +347,7 @@ func NewNode(cfg *Config, ks *keystore.GlobalKeystore, stopFunc func()) (*Node, publishMetrics(cfg) } - gd, err := stateSrvc.Storage.GetGenesisData() + gd, err := stateSrvc.Base.LoadGenesisData() if err != nil { return nil, err } @@ -354,6 +393,35 @@ func setupMetricsServer(address string) { }() } +// stores the global node name to reuse +func storeGlobalNodeName(name, basepath string) (err error) { + db, err := state.SetupDatabase(basepath) + if err != nil { + return err + } + + defer func() { + err = db.Close() + if err != nil { + logger.Error("failed to close database", "error", err) + return + } + }() + + basestate := state.NewBaseState(db) + err = basestate.StoreNodeGlobalName(name) + if err != nil { + logger.Warn( + "failed to store global node name", + "basepath", basepath, + "error", err, + ) + return err + } + + return nil +} + // Start starts all dot node services func (n *Node) Start() error { logger.Info("🕸️ starting node services...") diff --git a/dot/node_test.go b/dot/node_test.go index 6411392378..ce3d78ac5a 100644 --- a/dot/node_test.go +++ b/dot/node_test.go @@ -225,7 +225,7 @@ func TestInitNode_LoadGenesisData(t *testing.T) { genesisHeader, err := types.NewHeader(common.NewHash([]byte{0}), genTrie.MustHash(), trie.EmptyHash, big.NewInt(0), types.Digest{}) require.NoError(t, err) - err = stateSrvc.Initialize(gen, genesisHeader, genTrie) + err = stateSrvc.Initialise(gen, genesisHeader, genTrie) require.NoError(t, err) err = stateSrvc.Start() @@ -236,7 +236,7 @@ func TestInitNode_LoadGenesisData(t *testing.T) { require.NoError(t, err) }() - gendata, err := state.LoadGenesisData(stateSrvc.DB()) + gendata, err := stateSrvc.Base.LoadGenesisData() require.NoError(t, err) testGenesis := NewTestGenesis(t) @@ -382,3 +382,30 @@ func TestNode_StopFunc(t *testing.T) { node.Stop() require.Equal(t, testvar, "after") } + +func TestNode_PersistGlobalName_WhenInitialize(t *testing.T) { + globalName := RandomNodeName() + + cfg := NewTestConfig(t) + cfg.Global.Name = globalName + require.NotNil(t, cfg) + + genPath := NewTestGenesisAndRuntime(t) + require.NotNil(t, genPath) + + defer utils.RemoveTestDir(t) + + cfg.Core.Roles = types.FullNodeRole + cfg.Core.BabeAuthority = false + cfg.Core.GrandpaAuthority = false + cfg.Core.BabeThresholdNumerator = 0 + cfg.Core.BabeThresholdDenominator = 0 + cfg.Init.Genesis = genPath + + err := InitNode(cfg) + require.NoError(t, err) + + storedName, err := LoadGlobalNodeName(cfg.Global.BasePath) + require.Nil(t, err) + require.Equal(t, globalName, storedName) +} diff --git a/dot/rpc/dot_up_codec.go b/dot/rpc/dot_up_codec.go index 14ce815762..013698128e 100644 --- a/dot/rpc/dot_up_codec.go +++ b/dot/rpc/dot_up_codec.go @@ -53,7 +53,7 @@ func (c *DotUpCodec) NewRequest(r *http.Request) rpc.CodecRequest { // DotUpCodecRequest decodes and encodes a single request. UpCodecRequest // implements gorilla/rpc.CodecRequest interface primarily by embedding // the CodecRequest from gorilla/rpc/json. By selectively adding -// CodecRequest methods to UpCodecRequest, we can modify that behavior +// CodecRequest methods to UpCodecRequest, we can modify that behaviour // while maintaining all the other remaining CodecRequest methods from // gorilla's rpc/json implementation type DotUpCodecRequest struct { diff --git a/dot/rpc/http.go b/dot/rpc/http.go index fd98877331..76038e3847 100644 --- a/dot/rpc/http.go +++ b/dot/rpc/http.go @@ -107,7 +107,7 @@ func (h *HTTPServer) RegisterModules(mods []string) { case "dev": srvc = modules.NewDevModule(h.serverConfig.BlockProducerAPI, h.serverConfig.NetworkAPI) default: - h.logger.Warn("Unrecognized module", "module", mod) + h.logger.Warn("Unrecognised module", "module", mod) continue } @@ -177,9 +177,8 @@ func (h *HTTPServer) Stop() error { for _, conn := range h.wsConns { for _, sub := range conn.Subscriptions { switch v := sub.(type) { - case *subscription.StorageChangeListener: - h.serverConfig.StorageAPI.UnregisterStorageChangeChannel(v.ChanID) - close(v.Channel) + case *subscription.StorageObserver: + h.serverConfig.StorageAPI.UnregisterStorageObserver(v) case *subscription.BlockListener: h.serverConfig.BlockAPI.UnregisterImportedChannel(v.ChanID) close(v.Channel) @@ -234,8 +233,8 @@ func (h *HTTPServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { func NewWSConn(conn *websocket.Conn, cfg *HTTPServerConfig) *subscription.WSConn { c := &subscription.WSConn{ Wsconn: conn, - Subscriptions: make(map[int]subscription.Listener), - BlockSubChannels: make(map[int]byte), + Subscriptions: make(map[uint]subscription.Listener), + BlockSubChannels: make(map[uint]byte), StorageSubChannels: make(map[int]byte), StorageAPI: cfg.StorageAPI, BlockAPI: cfg.BlockAPI, diff --git a/dot/rpc/modules/api.go b/dot/rpc/modules/api.go index 28eea721d5..ce7356d1d6 100644 --- a/dot/rpc/modules/api.go +++ b/dot/rpc/modules/api.go @@ -16,10 +16,10 @@ type StorageAPI interface { GetStorage(root *common.Hash, key []byte) ([]byte, error) GetStorageByBlockHash(bhash common.Hash, key []byte) ([]byte, error) Entries(root *common.Hash) (map[string][]byte, error) - RegisterStorageChangeChannel(sub state.StorageSubscription) (byte, error) - UnregisterStorageChangeChannel(id byte) GetStateRootFromBlock(bhash *common.Hash) (*common.Hash, error) GetKeysWithPrefix(root *common.Hash, prefix []byte) ([][]byte, error) + RegisterStorageObserver(observer state.Observer) + UnregisterStorageObserver(observer state.Observer) } // BlockAPI is the interface for the block state @@ -33,7 +33,7 @@ type BlockAPI interface { GetJustification(hash common.Hash) ([]byte, error) RegisterImportedChannel(ch chan<- *types.Block) (byte, error) UnregisterImportedChannel(id byte) - RegisterFinalizedChannel(ch chan<- *types.Header) (byte, error) + RegisterFinalizedChannel(ch chan<- *types.FinalisationInfo) (byte, error) UnregisterFinalizedChannel(id byte) SubChain(start, end common.Hash) ([]common.Hash, error) } diff --git a/dot/rpc/modules/author.go b/dot/rpc/modules/author.go index 2066e4d40c..5b9d68c4bd 100644 --- a/dot/rpc/modules/author.go +++ b/dot/rpc/modules/author.go @@ -68,8 +68,8 @@ type KeyRotateResponse []byte type ExtrinsicStatus struct { IsFuture bool IsReady bool - IsFinalized bool - AsFinalized common.Hash + Isfinalised bool + Asfinalised common.Hash IsUsurped bool AsUsurped common.Hash IsBroadcast bool diff --git a/dot/rpc/modules/chain.go b/dot/rpc/modules/chain.go index d4fded3689..bebf96d250 100644 --- a/dot/rpc/modules/chain.go +++ b/dot/rpc/modules/chain.go @@ -133,7 +133,7 @@ func (cm *ChainModule) GetHead(r *http.Request, req *ChainBlockNumberRequest, re return cm.GetBlockHash(r, req, res) } -// GetFinalizedHead returns the most recently finalized block hash +// GetFinalizedHead returns the most recently finalised block hash func (cm *ChainModule) GetFinalizedHead(r *http.Request, req *EmptyRequest, res *ChainHashResponse) error { h, err := cm.blockAPI.GetFinalizedHash(0, 0) if err != nil { @@ -144,7 +144,7 @@ func (cm *ChainModule) GetFinalizedHead(r *http.Request, req *EmptyRequest, res return nil } -// GetFinalizedHeadByRound returns the hash of the block finalized at the given round and setID +// GetFinalizedHeadByRound returns the hash of the block finalised at the given round and setID func (cm *ChainModule) GetFinalizedHeadByRound(r *http.Request, req *ChainFinalizedHeadRequest, res *ChainHashResponse) error { h, err := cm.blockAPI.GetFinalizedHash(req.Round, req.SetID) if err != nil { diff --git a/dot/rpc/modules/chain_test.go b/dot/rpc/modules/chain_test.go index 8f9f3cd6e4..cb00fcf714 100644 --- a/dot/rpc/modules/chain_test.go +++ b/dot/rpc/modules/chain_test.go @@ -283,7 +283,7 @@ func newTestStateService(t *testing.T) *state.Service { stateSrvc := state.NewService(testDatadirPath, log.LvlInfo) stateSrvc.UseMemDB() - err = stateSrvc.Initialize(gen, genesisHeader, genTrie) + err = stateSrvc.Initialise(gen, genesisHeader, genTrie) if err != nil { t.Fatal(err) } diff --git a/dot/rpc/modules/grandpa.go b/dot/rpc/modules/grandpa.go index dd9d15ffa7..0ceafe7a24 100644 --- a/dot/rpc/modules/grandpa.go +++ b/dot/rpc/modules/grandpa.go @@ -44,7 +44,7 @@ type ProveFinalityRequest struct { // ProveFinalityResponse is an optional SCALE encoded proof array type ProveFinalityResponse [][]byte -// ProveFinality for the provided block range. Returns NULL if there are no known finalized blocks in the range. If no authorities set is provided, the current one will be attempted. +// ProveFinality for the provided block range. Returns NULL if there are no known finalised blocks in the range. If no authorities set is provided, the current one will be attempted. func (gm *GrandpaModule) ProveFinality(r *http.Request, req *ProveFinalityRequest, res *ProveFinalityResponse) error { blocksToCheck, err := gm.blockAPI.SubChain(req.blockHashStart, req.blockHashEnd) if err != nil { diff --git a/dot/rpc/modules/system_test.go b/dot/rpc/modules/system_test.go index 98c52935a1..a632029709 100644 --- a/dot/rpc/modules/system_test.go +++ b/dot/rpc/modules/system_test.go @@ -51,6 +51,10 @@ func (s *mockSyncer) ProcessBlockData(_ []*types.BlockData) (int, error) { return 0, nil } +func (s *mockSyncer) ProcessJustification(_ []*types.BlockData) (int, error) { + return 0, nil +} + func (s *mockSyncer) HandleBlockAnnounce(msg *network.BlockAnnounceMessage) error { return nil } diff --git a/dot/rpc/subscription/listeners.go b/dot/rpc/subscription/listeners.go index 27a7b89c28..579cfbba88 100644 --- a/dot/rpc/subscription/listeners.go +++ b/dot/rpc/subscription/listeners.go @@ -30,119 +30,67 @@ type Listener interface { Listen() } -func (c *WSConn) startListener(lid int) { - go c.Subscriptions[lid].Listen() +// WSConnAPI interface defining methors a WSConn should have +type WSConnAPI interface { + safeSend(interface{}) } -func (c *WSConn) initStorageChangeListener(reqID float64, params interface{}) (int, error) { - scl := &StorageChangeListener{ - Channel: make(chan *state.SubscriptionResult), - wsconn: c, - } - sub := &state.StorageSubscription{ - Filter: make(map[string]bool), - Listener: scl.Channel, - } +// StorageObserver struct to hold data for observer (Observer Design Pattern) +type StorageObserver struct { + id uint + filter map[string][]byte + wsconn WSConnAPI +} - pA := params.([]interface{}) - for _, param := range pA { - switch p := param.(type) { - case []interface{}: - for _, pp := range param.([]interface{}) { - sub.Filter[pp.(string)] = true - } - case string: - sub.Filter[p] = true - default: - return 0, fmt.Errorf("unknow parameter type") - } - } +// Change type defining key value pair representing change +type Change [2]string - if c.StorageAPI == nil { - c.safeSendError(reqID, nil, "error StorageAPI not set") - return 0, fmt.Errorf("error StorageAPI not set") - } +// ChangeResult struct to hold change result data +type ChangeResult struct { + Changes []Change `json:"changes"` + Block string `json:"block"` +} - chanID, err := c.StorageAPI.RegisterStorageChangeChannel(*sub) - if err != nil { - return 0, err +// Update is called to notify observer of new value +func (s *StorageObserver) Update(change *state.SubscriptionResult) { + if change == nil { + return } - scl.ChanID = chanID - - c.qtyListeners++ - scl.subID = c.qtyListeners - c.Subscriptions[scl.subID] = scl - c.StorageSubChannels[scl.subID] = chanID - initRes := newSubscriptionResponseJSON(scl.subID, reqID) - c.safeSend(initRes) + changeResult := ChangeResult{ + Block: change.Hash.String(), + Changes: make([]Change, len(change.Changes)), + } + for i, v := range change.Changes { + changeResult.Changes[i] = Change{common.BytesToHex(v.Key), common.BytesToHex(v.Value)} + } - return scl.subID, nil + res := newSubcriptionBaseResponseJSON() + res.Method = "state_storage" + res.Params.Result = changeResult + res.Params.SubscriptionID = s.GetID() + s.wsconn.safeSend(res) } -// StorageChangeListener for listening to state change channels -type StorageChangeListener struct { - Channel chan *state.SubscriptionResult - wsconn *WSConn - ChanID byte - subID int +// GetID the id for the Observer +func (s *StorageObserver) GetID() uint { + return s.id } -// Listen implementation of Listen interface to listen for importedChan changes -func (l *StorageChangeListener) Listen() { - for change := range l.Channel { - if change == nil { - continue - } - - result := make(map[string]interface{}) - result["block"] = change.Hash.String() - changes := [][]string{} - for _, v := range change.Changes { - kv := []string{common.BytesToHex(v.Key), common.BytesToHex(v.Value)} - changes = append(changes, kv) - } - result["changes"] = changes - - res := newSubcriptionBaseResponseJSON() - res.Method = "state_storage" - res.Params.Result = result - res.Params.SubscriptionID = l.subID - l.wsconn.safeSend(res) - } +// GetFilter returns the filter the Observer is using +func (s *StorageObserver) GetFilter() map[string][]byte { + return s.filter } +// Listen to satisfy Listener interface (but is no longer used by StorageObserver) +func (s *StorageObserver) Listen() {} + // BlockListener to handle listening for blocks importedChan type BlockListener struct { Channel chan *types.Block - wsconn *WSConn + wsconn WSConnAPI ChanID byte - subID int -} - -func (c *WSConn) initBlockListener(reqID float64) (int, error) { - bl := &BlockListener{ - Channel: make(chan *types.Block), - wsconn: c, - } - - if c.BlockAPI == nil { - c.safeSendError(reqID, nil, "error BlockAPI not set") - return 0, fmt.Errorf("error BlockAPI not set") - } - chanID, err := c.BlockAPI.RegisterImportedChannel(bl.Channel) - if err != nil { - return 0, err - } - bl.ChanID = chanID - c.qtyListeners++ - bl.subID = c.qtyListeners - c.Subscriptions[bl.subID] = bl - c.BlockSubChannels[bl.subID] = chanID - initRes := newSubscriptionResponseJSON(bl.subID, reqID) - c.safeSend(initRes) - - return bl.subID, nil + subID uint } // Listen implementation of Listen interface to listen for importedChan changes @@ -164,46 +112,21 @@ func (l *BlockListener) Listen() { } } -// BlockFinalizedListener to handle listening for finalized blocks +// BlockFinalizedListener to handle listening for finalised blocks type BlockFinalizedListener struct { - channel chan *types.Header - wsconn *WSConn + channel chan *types.FinalisationInfo + wsconn WSConnAPI chanID byte - subID int -} - -func (c *WSConn) initBlockFinalizedListener(reqID float64) (int, error) { - bfl := &BlockFinalizedListener{ - channel: make(chan *types.Header), - wsconn: c, - } - - if c.BlockAPI == nil { - c.safeSendError(reqID, nil, "error BlockAPI not set") - return 0, fmt.Errorf("error BlockAPI not set") - } - chanID, err := c.BlockAPI.RegisterFinalizedChannel(bfl.channel) - if err != nil { - return 0, err - } - bfl.chanID = chanID - c.qtyListeners++ - bfl.subID = c.qtyListeners - c.Subscriptions[bfl.subID] = bfl - c.BlockSubChannels[bfl.subID] = chanID - initRes := newSubscriptionResponseJSON(bfl.subID, reqID) - c.safeSend(initRes) - - return bfl.subID, nil + subID uint } // Listen implementation of Listen interface to listen for importedChan changes func (l *BlockFinalizedListener) Listen() { - for header := range l.channel { - if header == nil { + for info := range l.channel { + if info == nil || info.Header == nil { continue } - head, err := modules.HeaderToJSON(*header) + head, err := modules.HeaderToJSON(*info.Header) if err != nil { logger.Error("failed to convert header to JSON", "error", err) } @@ -217,69 +140,20 @@ func (l *BlockFinalizedListener) Listen() { // ExtrinsicSubmitListener to handle listening for extrinsic events type ExtrinsicSubmitListener struct { - wsconn *WSConn - subID int + wsconn WSConnAPI + subID uint extrinsic types.Extrinsic importedChan chan *types.Block importedChanID byte importedHash common.Hash - finalizedChan chan *types.Header - finalizedChanID byte + finalisedChan chan *types.FinalisationInfo + finalisedChanID byte } // AuthorExtrinsicUpdates method name const AuthorExtrinsicUpdates = "author_extrinsicUpdate" -func (c *WSConn) initExtrinsicWatch(reqID float64, params interface{}) (int, error) { - pA := params.([]interface{}) - extBytes, err := common.HexToBytes(pA[0].(string)) - if err != nil { - return 0, err - } - - // listen for built blocks - esl := &ExtrinsicSubmitListener{ - importedChan: make(chan *types.Block), - wsconn: c, - extrinsic: types.Extrinsic(extBytes), - finalizedChan: make(chan *types.Header), - } - - if c.BlockAPI == nil { - return 0, fmt.Errorf("error BlockAPI not set") - } - esl.importedChanID, err = c.BlockAPI.RegisterImportedChannel(esl.importedChan) - if err != nil { - return 0, err - } - - esl.finalizedChanID, err = c.BlockAPI.RegisterFinalizedChannel(esl.finalizedChan) - if err != nil { - return 0, err - } - - c.qtyListeners++ - esl.subID = c.qtyListeners - c.Subscriptions[esl.subID] = esl - c.BlockSubChannels[esl.subID] = esl.importedChanID - - err = c.CoreAPI.HandleSubmittedExtrinsic(extBytes) - if err != nil { - return 0, err - } - c.safeSend(newSubscriptionResponseJSON(esl.subID, reqID)) - - // TODO (ed) since HandleSubmittedExtrinsic has been called we assume the extrinsic is in the tx queue - // should we add a channel to tx queue so we're notified when it's in the queue - if c.CoreAPI.IsBlockProducer() { - c.safeSend(newSubscriptionResponse(AuthorExtrinsicUpdates, esl.subID, "ready")) - } - - // todo (ed) determine which peer extrinsic has been broadcast to, and set status - return esl.subID, err -} - // Listen implementation of Listen interface to listen for importedChan changes func (l *ExtrinsicSubmitListener) Listen() { // listen for imported blocks with extrinsic @@ -304,12 +178,12 @@ func (l *ExtrinsicSubmitListener) Listen() { } }() - // listen for finalized headers + // listen for finalised headers go func() { - for header := range l.finalizedChan { - if reflect.DeepEqual(l.importedHash, header.Hash()) { + for info := range l.finalisedChan { + if reflect.DeepEqual(l.importedHash, info.Header.Hash()) { resM := make(map[string]interface{}) - resM["finalized"] = header.Hash().String() + resM["finalised"] = info.Header.Hash().String() l.wsconn.safeSend(newSubscriptionResponse(AuthorExtrinsicUpdates, l.subID, resM)) } } @@ -319,28 +193,13 @@ func (l *ExtrinsicSubmitListener) Listen() { // RuntimeVersionListener to handle listening for Runtime Version type RuntimeVersionListener struct { wsconn *WSConn - subID int -} - -func (c *WSConn) initRuntimeVersionListener(reqID float64) (int, error) { - rvl := &RuntimeVersionListener{ - wsconn: c, - } - if c.CoreAPI == nil { - c.safeSendError(reqID, nil, "error CoreAPI not set") - return 0, fmt.Errorf("error CoreAPI not set") - } - c.qtyListeners++ - rvl.subID = c.qtyListeners - c.Subscriptions[rvl.subID] = rvl - initRes := newSubscriptionResponseJSON(rvl.subID, reqID) - c.safeSend(initRes) - - return rvl.subID, nil + subID uint } // Listen implementation of Listen interface to listen for runtime version changes func (l *RuntimeVersionListener) Listen() { + // This sends current runtime version once when subscription is created + // TODO (ed) add logic to send updates when runtime version changes rtVersion, err := l.wsconn.CoreAPI.GetRuntimeVersion(nil) if err != nil { return diff --git a/dot/rpc/subscription/listeners_test.go b/dot/rpc/subscription/listeners_test.go new file mode 100644 index 0000000000..13a20914bc --- /dev/null +++ b/dot/rpc/subscription/listeners_test.go @@ -0,0 +1,161 @@ +// Copyright 2020 ChainSafe Systems (ON) Corp. +// This file is part of gossamer. +// +// The gossamer library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The gossamer library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the gossamer library. If not, see . + +package subscription + +import ( + "math/big" + "testing" + "time" + + "github.com/ChainSafe/gossamer/dot/rpc/modules" + "github.com/ChainSafe/gossamer/dot/state" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/stretchr/testify/require" +) + +type MockWSConnAPI struct { + lastMessage BaseResponseJSON +} + +func (m *MockWSConnAPI) safeSend(msg interface{}) { + m.lastMessage = msg.(BaseResponseJSON) +} + +func TestStorageObserver_Update(t *testing.T) { + mockConnection := &MockWSConnAPI{} + storageObserver := StorageObserver{ + id: 0, + wsconn: mockConnection, + } + + data := []state.KeyValue{{ + Key: []byte("key"), + Value: []byte("value"), + }} + change := &state.SubscriptionResult{ + Hash: common.Hash{}, + Changes: data, + } + + expected := ChangeResult{ + Block: change.Hash.String(), + Changes: make([]Change, len(change.Changes)), + } + for i, v := range change.Changes { + expected.Changes[i] = Change{common.BytesToHex(v.Key), common.BytesToHex(v.Value)} + } + + expectedRespones := newSubcriptionBaseResponseJSON() + expectedRespones.Method = "state_storage" + expectedRespones.Params.Result = expected + + storageObserver.Update(change) + time.Sleep(time.Millisecond * 10) + require.Equal(t, expectedRespones, mockConnection.lastMessage) +} + +func TestBlockListener_Listen(t *testing.T) { + notifyChan := make(chan *types.Block) + mockConnection := &MockWSConnAPI{} + bl := BlockListener{ + Channel: notifyChan, + wsconn: mockConnection, + } + + block := types.NewEmptyBlock() + block.Header.Number = big.NewInt(1) + + head, err := modules.HeaderToJSON(*block.Header) + require.NoError(t, err) + + expectedResposnse := newSubcriptionBaseResponseJSON() + expectedResposnse.Method = "chain_newHead" + expectedResposnse.Params.Result = head + + go bl.Listen() + + notifyChan <- block + time.Sleep(time.Millisecond * 10) + require.Equal(t, expectedResposnse, mockConnection.lastMessage) +} + +func TestBlockFinalizedListener_Listen(t *testing.T) { + notifyChan := make(chan *types.FinalisationInfo) + mockConnection := &MockWSConnAPI{} + bfl := BlockFinalizedListener{ + channel: notifyChan, + wsconn: mockConnection, + } + + header := types.NewEmptyHeader() + head, err := modules.HeaderToJSON(*header) + if err != nil { + logger.Error("failed to convert header to JSON", "error", err) + } + expectedResponse := newSubcriptionBaseResponseJSON() + expectedResponse.Method = "chain_finalizedHead" + expectedResponse.Params.Result = head + + go bfl.Listen() + + notifyChan <- &types.FinalisationInfo{ + Header: header, + } + time.Sleep(time.Millisecond * 10) + require.Equal(t, expectedResponse, mockConnection.lastMessage) +} + +func TestExtrinsicSubmitListener_Listen(t *testing.T) { + notifyImportedChan := make(chan *types.Block) + notifyFinalizedChan := make(chan *types.FinalisationInfo) + + mockConnection := &MockWSConnAPI{} + esl := ExtrinsicSubmitListener{ + importedChan: notifyImportedChan, + finalisedChan: notifyFinalizedChan, + wsconn: mockConnection, + extrinsic: types.Extrinsic{1, 2, 3}, + } + header := types.NewEmptyHeader() + exts := []types.Extrinsic{{1, 2, 3}, {7, 8, 9, 0}, {0xa, 0xb}} + + body, err := types.NewBodyFromExtrinsics(exts) + require.NoError(t, err) + + block := &types.Block{ + Header: header, + Body: body, + } + + resImported := map[string]interface{}{"inBlock": block.Header.Hash().String()} + expectedImportedRespones := newSubscriptionResponse(AuthorExtrinsicUpdates, esl.subID, resImported) + + go esl.Listen() + + notifyImportedChan <- block + time.Sleep(time.Millisecond * 10) + require.Equal(t, expectedImportedRespones, mockConnection.lastMessage) + + notifyFinalizedChan <- &types.FinalisationInfo{ + Header: header, + } + time.Sleep(time.Millisecond * 10) + resFinalised := map[string]interface{}{"finalised": block.Header.Hash().String()} + expectedFinalizedRespones := newSubscriptionResponse(AuthorExtrinsicUpdates, esl.subID, resFinalised) + require.Equal(t, expectedFinalizedRespones, mockConnection.lastMessage) +} diff --git a/dot/rpc/subscription/messages.go b/dot/rpc/subscription/messages.go index 0722483689..d6df1258dd 100644 --- a/dot/rpc/subscription/messages.go +++ b/dot/rpc/subscription/messages.go @@ -15,10 +15,6 @@ // along with the gossamer library. If not, see . package subscription -import ( - "math/big" -) - // BaseResponseJSON for base json response type BaseResponseJSON struct { Jsonrpc string `json:"jsonrpc"` @@ -29,7 +25,7 @@ type BaseResponseJSON struct { // Params for json param response type Params struct { Result interface{} `json:"result"` - SubscriptionID int `json:"subscription"` + SubscriptionID uint `json:"subscription"` } func newSubcriptionBaseResponseJSON() BaseResponseJSON { @@ -38,7 +34,7 @@ func newSubcriptionBaseResponseJSON() BaseResponseJSON { } } -func newSubscriptionResponse(method string, subID int, result interface{}) BaseResponseJSON { +func newSubscriptionResponse(method string, subID uint, result interface{}) BaseResponseJSON { return BaseResponseJSON{ Jsonrpc: "2.0", Method: method, @@ -52,52 +48,14 @@ func newSubscriptionResponse(method string, subID int, result interface{}) BaseR // ResponseJSON for json subscription responses type ResponseJSON struct { Jsonrpc string `json:"jsonrpc"` - Result int `json:"result"` + Result uint `json:"result"` ID float64 `json:"id"` } -func newSubscriptionResponseJSON(subID int, reqID float64) ResponseJSON { +func newSubscriptionResponseJSON(subID uint, reqID float64) ResponseJSON { return ResponseJSON{ Jsonrpc: "2.0", Result: subID, ID: reqID, } } - -// ErrorResponseJSON json for error responses -type ErrorResponseJSON struct { - Jsonrpc string `json:"jsonrpc"` - Error *ErrorMessageJSON `json:"error"` - ID float64 `json:"id"` -} - -// ErrorMessageJSON json for error messages -type ErrorMessageJSON struct { - Code *big.Int `json:"code"` - Message string `json:"message"` -} - -func (c *WSConn) safeSend(msg interface{}) { - c.mu.Lock() - defer c.mu.Unlock() - err := c.Wsconn.WriteJSON(msg) - if err != nil { - logger.Debug("error sending websocket message", "error", err) - } -} -func (c *WSConn) safeSendError(reqID float64, errorCode *big.Int, message string) { - res := &ErrorResponseJSON{ - Jsonrpc: "2.0", - Error: &ErrorMessageJSON{ - Code: errorCode, - Message: message, - }, - ID: reqID, - } - c.mu.Lock() - defer c.mu.Unlock() - err := c.Wsconn.WriteJSON(res) - if err != nil { - logger.Debug("error sending websocket message", "error", err) - } -} diff --git a/dot/rpc/subscription/websocket.go b/dot/rpc/subscription/websocket.go index fa4a878638..40fd48bedc 100644 --- a/dot/rpc/subscription/websocket.go +++ b/dot/rpc/subscription/websocket.go @@ -27,20 +27,22 @@ import ( "sync" "github.com/ChainSafe/gossamer/dot/rpc/modules" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" log "github.com/ChainSafe/log15" "github.com/gorilla/websocket" ) -var logger = log.New("pkg", "subscription") +var logger = log.New("pkg", "rpc/subscription") // WSConn struct to hold WebSocket Connection references type WSConn struct { Wsconn *websocket.Conn mu sync.Mutex - BlockSubChannels map[int]byte + BlockSubChannels map[uint]byte StorageSubChannels map[int]byte - qtyListeners int - Subscriptions map[int]Listener + qtyListeners uint + Subscriptions map[uint]Listener StorageAPI modules.StorageAPI BlockAPI modules.BlockAPI RuntimeAPI modules.RuntimeAPI @@ -81,16 +83,16 @@ func (c *WSConn) HandleComm() { } c.startListener(bl) case "state_subscribeStorage": - scl, err2 := c.initStorageChangeListener(reqid, params) + _, err2 := c.initStorageChangeListener(reqid, params) if err2 != nil { logger.Warn("failed to create state change listener", "error", err2) continue } - c.startListener(scl) + case "chain_subscribeFinalizedHeads": bfl, err3 := c.initBlockFinalizedListener(reqid) if err3 != nil { - logger.Warn("failed to create block finalized", "error", err3) + logger.Warn("failed to create block finalised", "error", err3) continue } c.startListener(bfl) @@ -161,3 +163,206 @@ func (c *WSConn) HandleComm() { c.safeSend(wsSend) } } + +func (c *WSConn) initStorageChangeListener(reqID float64, params interface{}) (uint, error) { + if c.StorageAPI == nil { + c.safeSendError(reqID, nil, "error StorageAPI not set") + return 0, fmt.Errorf("error StorageAPI not set") + } + + myObs := &StorageObserver{ + filter: make(map[string][]byte), + wsconn: c, + } + + pA, ok := params.([]interface{}) + if !ok { + return 0, fmt.Errorf("unknown parameter type") + } + for _, param := range pA { + switch p := param.(type) { + case []interface{}: + for _, pp := range param.([]interface{}) { + data, ok := pp.(string) + if !ok { + return 0, fmt.Errorf("unknown parameter type") + } + myObs.filter[data] = []byte{} + } + case string: + myObs.filter[p] = []byte{} + default: + return 0, fmt.Errorf("unknown parameter type") + } + } + + c.qtyListeners++ + myObs.id = c.qtyListeners + + c.StorageAPI.RegisterStorageObserver(myObs) + + c.Subscriptions[myObs.id] = myObs + + initRes := newSubscriptionResponseJSON(myObs.id, reqID) + c.safeSend(initRes) + + return myObs.id, nil +} + +func (c *WSConn) initBlockListener(reqID float64) (uint, error) { + bl := &BlockListener{ + Channel: make(chan *types.Block), + wsconn: c, + } + + if c.BlockAPI == nil { + c.safeSendError(reqID, nil, "error BlockAPI not set") + return 0, fmt.Errorf("error BlockAPI not set") + } + chanID, err := c.BlockAPI.RegisterImportedChannel(bl.Channel) + if err != nil { + return 0, err + } + bl.ChanID = chanID + c.qtyListeners++ + bl.subID = c.qtyListeners + c.Subscriptions[bl.subID] = bl + c.BlockSubChannels[bl.subID] = chanID + initRes := newSubscriptionResponseJSON(bl.subID, reqID) + c.safeSend(initRes) + + return bl.subID, nil +} + +func (c *WSConn) initBlockFinalizedListener(reqID float64) (uint, error) { + bfl := &BlockFinalizedListener{ + channel: make(chan *types.FinalisationInfo), + wsconn: c, + } + + if c.BlockAPI == nil { + c.safeSendError(reqID, nil, "error BlockAPI not set") + return 0, fmt.Errorf("error BlockAPI not set") + } + chanID, err := c.BlockAPI.RegisterFinalizedChannel(bfl.channel) + if err != nil { + return 0, err + } + bfl.chanID = chanID + c.qtyListeners++ + bfl.subID = c.qtyListeners + c.Subscriptions[bfl.subID] = bfl + c.BlockSubChannels[bfl.subID] = chanID + initRes := newSubscriptionResponseJSON(bfl.subID, reqID) + c.safeSend(initRes) + + return bfl.subID, nil +} + +func (c *WSConn) initExtrinsicWatch(reqID float64, params interface{}) (uint, error) { + pA := params.([]interface{}) + extBytes, err := common.HexToBytes(pA[0].(string)) + if err != nil { + return 0, err + } + + // listen for built blocks + esl := &ExtrinsicSubmitListener{ + importedChan: make(chan *types.Block), + wsconn: c, + extrinsic: types.Extrinsic(extBytes), + finalisedChan: make(chan *types.FinalisationInfo), + } + + if c.BlockAPI == nil { + return 0, fmt.Errorf("error BlockAPI not set") + } + esl.importedChanID, err = c.BlockAPI.RegisterImportedChannel(esl.importedChan) + if err != nil { + return 0, err + } + + esl.finalisedChanID, err = c.BlockAPI.RegisterFinalizedChannel(esl.finalisedChan) + if err != nil { + return 0, err + } + + c.qtyListeners++ + esl.subID = c.qtyListeners + c.Subscriptions[esl.subID] = esl + c.BlockSubChannels[esl.subID] = esl.importedChanID + + err = c.CoreAPI.HandleSubmittedExtrinsic(extBytes) + if err != nil { + return 0, err + } + c.safeSend(newSubscriptionResponseJSON(esl.subID, reqID)) + + // TODO (ed) since HandleSubmittedExtrinsic has been called we assume the extrinsic is in the tx queue + // should we add a channel to tx queue so we're notified when it's in the queue (See issue #1535) + if c.CoreAPI.IsBlockProducer() { + c.safeSend(newSubscriptionResponse(AuthorExtrinsicUpdates, esl.subID, "ready")) + } + + // todo (ed) determine which peer extrinsic has been broadcast to, and set status + return esl.subID, err +} + +func (c *WSConn) initRuntimeVersionListener(reqID float64) (uint, error) { + rvl := &RuntimeVersionListener{ + wsconn: c, + } + if c.CoreAPI == nil { + c.safeSendError(reqID, nil, "error CoreAPI not set") + return 0, fmt.Errorf("error CoreAPI not set") + } + c.qtyListeners++ + rvl.subID = c.qtyListeners + c.Subscriptions[rvl.subID] = rvl + initRes := newSubscriptionResponseJSON(rvl.subID, reqID) + c.safeSend(initRes) + + return rvl.subID, nil +} + +func (c *WSConn) safeSend(msg interface{}) { + c.mu.Lock() + defer c.mu.Unlock() + err := c.Wsconn.WriteJSON(msg) + if err != nil { + logger.Debug("error sending websocket message", "error", err) + } +} +func (c *WSConn) safeSendError(reqID float64, errorCode *big.Int, message string) { + res := &ErrorResponseJSON{ + Jsonrpc: "2.0", + Error: &ErrorMessageJSON{ + Code: errorCode, + Message: message, + }, + ID: reqID, + } + c.mu.Lock() + defer c.mu.Unlock() + err := c.Wsconn.WriteJSON(res) + if err != nil { + logger.Debug("error sending websocket message", "error", err) + } +} + +// ErrorResponseJSON json for error responses +type ErrorResponseJSON struct { + Jsonrpc string `json:"jsonrpc"` + Error *ErrorMessageJSON `json:"error"` + ID float64 `json:"id"` +} + +// ErrorMessageJSON json for error messages +type ErrorMessageJSON struct { + Code *big.Int `json:"code"` + Message string `json:"message"` +} + +func (c *WSConn) startListener(lid uint) { + go c.Subscriptions[lid].Listen() +} diff --git a/dot/rpc/subscription/websocket_test.go b/dot/rpc/subscription/websocket_test.go new file mode 100644 index 0000000000..735df61fac --- /dev/null +++ b/dot/rpc/subscription/websocket_test.go @@ -0,0 +1,265 @@ +package subscription + +import ( + "log" + "math/big" + "net/http" + "os" + "testing" + "time" + + "github.com/ChainSafe/gossamer/dot/state" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/crypto" + "github.com/ChainSafe/gossamer/lib/runtime" + "github.com/gorilla/websocket" + "github.com/stretchr/testify/require" +) + +var upgrader = websocket.Upgrader{ + CheckOrigin: func(r *http.Request) bool { return true }, +} +var wsconn = &WSConn{ + Subscriptions: make(map[uint]Listener), + BlockSubChannels: make(map[uint]byte), +} + +func handler(w http.ResponseWriter, r *http.Request) { + c, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Print("upgrade:", err) + return + } + defer c.Close() + + wsconn.Wsconn = c + wsconn.HandleComm() +} + +func TestMain(m *testing.M) { + http.HandleFunc("/", handler) + + go func() { + err := http.ListenAndServe("localhost:8546", nil) + if err != nil { + log.Fatal("error", err) + } + }() + time.Sleep(time.Millisecond * 100) + // Start all tests + os.Exit(m.Run()) +} + +func TestWSConn_HandleComm(t *testing.T) { + c, _, err := websocket.DefaultDialer.Dial("ws://localhost:8546", nil) //nolint + if err != nil { + log.Fatal("dial:", err) + } + defer c.Close() + + // test storageChangeListener + res, err := wsconn.initStorageChangeListener(1, nil) + require.EqualError(t, err, "error StorageAPI not set") + require.Equal(t, uint(0), res) + _, msg, err := c.ReadMessage() + require.NoError(t, err) + require.Equal(t, []byte(`{"jsonrpc":"2.0","error":{"code":null,"message":"error StorageAPI not set"},"id":1}`+"\n"), msg) + + wsconn.StorageAPI = new(MockStorageAPI) + + res, err = wsconn.initStorageChangeListener(1, nil) + require.EqualError(t, err, "unknown parameter type") + require.Equal(t, uint(0), res) + + res, err = wsconn.initStorageChangeListener(2, []interface{}{}) + require.NoError(t, err) + require.Equal(t, uint(1), res) + _, msg, err = c.ReadMessage() + require.NoError(t, err) + require.Equal(t, []byte(`{"jsonrpc":"2.0","result":1,"id":2}`+"\n"), msg) + + res, err = wsconn.initStorageChangeListener(3, []interface{}{"0x26aa"}) + require.NoError(t, err) + require.Equal(t, uint(2), res) + _, msg, err = c.ReadMessage() + require.NoError(t, err) + require.Equal(t, []byte(`{"jsonrpc":"2.0","result":2,"id":3}`+"\n"), msg) + + var testFilters = []interface{}{} + var testFilter1 = []interface{}{"0x26aa", "0x26a1"} + res, err = wsconn.initStorageChangeListener(4, append(testFilters, testFilter1)) + require.NoError(t, err) + require.Equal(t, uint(3), res) + _, msg, err = c.ReadMessage() + require.NoError(t, err) + require.Equal(t, []byte(`{"jsonrpc":"2.0","result":3,"id":4}`+"\n"), msg) + + var testFilterWrongType = []interface{}{"0x26aa", 1} + res, err = wsconn.initStorageChangeListener(5, append(testFilters, testFilterWrongType)) + require.EqualError(t, err, "unknown parameter type") + require.Equal(t, uint(0), res) + + res, err = wsconn.initStorageChangeListener(6, []interface{}{1}) + require.EqualError(t, err, "unknown parameter type") + require.Equal(t, uint(0), res) + + c.WriteMessage(websocket.TextMessage, []byte(`{ + "jsonrpc": "2.0", + "method": "state_subscribeStorage", + "params": ["0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9de1e86a9a8c739864cf3cc5ec2bea59fd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d"], + "id": 7}`)) + _, msg, err = c.ReadMessage() + require.NoError(t, err) + require.Equal(t, []byte(`{"jsonrpc":"2.0","result":4,"id":7}`+"\n"), msg) + + // test initBlockListener + res, err = wsconn.initBlockListener(1) + require.EqualError(t, err, "error BlockAPI not set") + require.Equal(t, uint(0), res) + _, msg, err = c.ReadMessage() + require.NoError(t, err) + require.Equal(t, []byte(`{"jsonrpc":"2.0","error":{"code":null,"message":"error BlockAPI not set"},"id":1}`+"\n"), msg) + + wsconn.BlockAPI = new(MockBlockAPI) + + res, err = wsconn.initBlockListener(1) + require.NoError(t, err) + require.Equal(t, uint(5), res) + _, msg, err = c.ReadMessage() + require.NoError(t, err) + require.Equal(t, []byte(`{"jsonrpc":"2.0","result":5,"id":1}`+"\n"), msg) + + c.WriteMessage(websocket.TextMessage, []byte(`{ + "jsonrpc": "2.0", + "method": "chain_subscribeNewHeads", + "params": [], + "id": 8 + }`)) + _, msg, err = c.ReadMessage() + require.NoError(t, err) + require.Equal(t, []byte(`{"jsonrpc":"2.0","result":6,"id":8}`+"\n"), msg) + + // test initBlockFinalizedListener + wsconn.BlockAPI = nil + + res, err = wsconn.initBlockFinalizedListener(1) + require.EqualError(t, err, "error BlockAPI not set") + require.Equal(t, uint(0), res) + _, msg, err = c.ReadMessage() + require.NoError(t, err) + require.Equal(t, []byte(`{"jsonrpc":"2.0","error":{"code":null,"message":"error BlockAPI not set"},"id":1}`+"\n"), msg) + + wsconn.BlockAPI = new(MockBlockAPI) + + res, err = wsconn.initBlockFinalizedListener(1) + require.NoError(t, err) + require.Equal(t, uint(7), res) + _, msg, err = c.ReadMessage() + require.NoError(t, err) + require.Equal(t, []byte(`{"jsonrpc":"2.0","result":7,"id":1}`+"\n"), msg) + + // test initExtrinsicWatch + wsconn.CoreAPI = new(MockCoreAPI) + wsconn.BlockAPI = nil + res, err = wsconn.initExtrinsicWatch(0, []interface{}{"NotHex"}) + require.EqualError(t, err, "could not byteify non 0x prefixed string") + require.Equal(t, uint(0), res) + + res, err = wsconn.initExtrinsicWatch(0, []interface{}{"0x26aa"}) + require.EqualError(t, err, "error BlockAPI not set") + require.Equal(t, uint(0), res) + + wsconn.BlockAPI = new(MockBlockAPI) + res, err = wsconn.initExtrinsicWatch(0, []interface{}{"0x26aa"}) + require.NoError(t, err) + require.Equal(t, uint(8), res) + +} + +type MockStorageAPI struct{} + +func (m *MockStorageAPI) GetStorage(_ *common.Hash, key []byte) ([]byte, error) { + return nil, nil +} +func (m *MockStorageAPI) Entries(_ *common.Hash) (map[string][]byte, error) { + return nil, nil +} +func (m *MockStorageAPI) GetStorageByBlockHash(_ common.Hash, key []byte) ([]byte, error) { + return nil, nil +} +func (m *MockStorageAPI) RegisterStorageObserver(observer state.Observer) { +} + +func (m *MockStorageAPI) UnregisterStorageObserver(observer state.Observer) { +} +func (m *MockStorageAPI) GetStateRootFromBlock(bhash *common.Hash) (*common.Hash, error) { + return nil, nil +} +func (m *MockStorageAPI) GetKeysWithPrefix(root *common.Hash, prefix []byte) ([][]byte, error) { + return nil, nil +} + +type MockBlockAPI struct { +} + +func (m *MockBlockAPI) GetHeader(hash common.Hash) (*types.Header, error) { + return nil, nil +} +func (m *MockBlockAPI) BestBlockHash() common.Hash { + return common.Hash{} +} +func (m *MockBlockAPI) GetBlockByHash(hash common.Hash) (*types.Block, error) { + return nil, nil +} +func (m *MockBlockAPI) GetBlockHash(blockNumber *big.Int) (*common.Hash, error) { + return nil, nil +} +func (m *MockBlockAPI) GetFinalizedHash(uint64, uint64) (common.Hash, error) { + return common.Hash{}, nil +} +func (m *MockBlockAPI) RegisterImportedChannel(ch chan<- *types.Block) (byte, error) { + return 0, nil +} +func (m *MockBlockAPI) UnregisterImportedChannel(id byte) { +} +func (m *MockBlockAPI) RegisterFinalizedChannel(ch chan<- *types.FinalisationInfo) (byte, error) { + return 0, nil +} +func (m *MockBlockAPI) UnregisterFinalizedChannel(id byte) {} + +func (m *MockBlockAPI) GetJustification(hash common.Hash) ([]byte, error) { + return make([]byte, 10), nil +} + +func (m *MockBlockAPI) HasJustification(hash common.Hash) (bool, error) { + return true, nil +} + +func (m *MockBlockAPI) SubChain(start, end common.Hash) ([]common.Hash, error) { + return make([]common.Hash, 0), nil +} + +type MockCoreAPI struct{} + +func (m *MockCoreAPI) InsertKey(kp crypto.Keypair) {} + +func (m *MockCoreAPI) HasKey(pubKeyStr string, keyType string) (bool, error) { + return false, nil +} + +func (m *MockCoreAPI) GetRuntimeVersion(bhash *common.Hash) (runtime.Version, error) { + return nil, nil +} + +func (m *MockCoreAPI) IsBlockProducer() bool { + return false +} + +func (m *MockCoreAPI) HandleSubmittedExtrinsic(types.Extrinsic) error { + return nil +} + +func (m *MockCoreAPI) GetMetadata(bhash *common.Hash) ([]byte, error) { + return nil, nil +} diff --git a/dot/rpc/websocket_test.go b/dot/rpc/websocket_test.go index 393c8c8348..e03a8d8e7d 100644 --- a/dot/rpc/websocket_test.go +++ b/dot/rpc/websocket_test.go @@ -33,6 +33,7 @@ import ( ) var addr = flag.String("addr", "localhost:8546", "http service address") + var testCalls = []struct { call []byte expected []byte @@ -43,6 +44,8 @@ var testCalls = []struct { {[]byte(`{"jsonrpc":"2.0","method":"chain_subscribeNewHeads","params":[],"id":3}`), []byte(`{"jsonrpc":"2.0","result":1,"id":3}` + "\n")}, {[]byte(`{"jsonrpc":"2.0","method":"state_subscribeStorage","params":[],"id":4}`), []byte(`{"jsonrpc":"2.0","result":2,"id":4}` + "\n")}, {[]byte(`{"jsonrpc":"2.0","method":"chain_subscribeFinalizedHeads","params":[],"id":5}`), []byte(`{"jsonrpc":"2.0","result":3,"id":5}` + "\n")}, + {[]byte(`{"jsonrpc":"2.0","method":"author_submitAndWatchExtrinsic","params":["0x010203"],"id":6}`), []byte("{\"jsonrpc\":\"2.0\",\"error\":{\"code\":null,\"message\":\"Failed to call the `TaggedTransactionQueue_validate_transaction` exported function.\"},\"id\":6}\n")}, + {[]byte(`{"jsonrpc":"2.0","method":"state_subscribeRuntimeVersion","params":[],"id":7}`), []byte("{\"jsonrpc\":\"2.0\",\"result\":5,\"id\":7}\n")}, } func TestHTTPServer_ServeHTTP(t *testing.T) { @@ -117,7 +120,7 @@ func (m *MockBlockAPI) RegisterImportedChannel(ch chan<- *types.Block) (byte, er } func (m *MockBlockAPI) UnregisterImportedChannel(id byte) { } -func (m *MockBlockAPI) RegisterFinalizedChannel(ch chan<- *types.Header) (byte, error) { +func (m *MockBlockAPI) RegisterFinalizedChannel(ch chan<- *types.FinalisationInfo) (byte, error) { return 0, nil } func (m *MockBlockAPI) UnregisterFinalizedChannel(id byte) {} @@ -145,11 +148,10 @@ func (m *MockStorageAPI) Entries(_ *common.Hash) (map[string][]byte, error) { func (m *MockStorageAPI) GetStorageByBlockHash(_ common.Hash, key []byte) ([]byte, error) { return nil, nil } -func (m *MockStorageAPI) RegisterStorageChangeChannel(sub state.StorageSubscription) (byte, error) { - return 0, nil +func (m *MockStorageAPI) RegisterStorageObserver(observer state.Observer) { } -func (m *MockStorageAPI) UnregisterStorageChangeChannel(id byte) { +func (m *MockStorageAPI) UnregisterStorageObserver(observer state.Observer) { } func (m *MockStorageAPI) GetStateRootFromBlock(bhash *common.Hash) (*common.Hash, error) { return nil, nil diff --git a/dot/services.go b/dot/services.go index 0f5d329dbc..ddfbb28db8 100644 --- a/dot/services.go +++ b/dot/services.go @@ -52,12 +52,12 @@ func newInMemoryDB(path string) (chaindb.Database, error) { // State Service -// createStateService creates the state service and initialize state database +// createStateService creates the state service and initialise state database func createStateService(cfg *Config) (*state.Service, error) { logger.Debug("creating state service...") stateSrvc := state.NewService(cfg.Global.BasePath, cfg.Log.StateLvl) - // start state service (initialize state database) + // start state service (initialise state database) err := stateSrvc.Start() if err != nil { return nil, fmt.Errorf("failed to start state service: %s", err) @@ -71,7 +71,7 @@ func createStateService(cfg *Config) (*state.Service, error) { } // load most recent state from database - latestState, err := state.LoadLatestStorageHash(stateSrvc.DB()) + latestState, err := stateSrvc.Base.LoadLatestStorageHash() if err != nil { return nil, fmt.Errorf("failed to load latest state root hash: %s", err) } @@ -204,7 +204,7 @@ func createBABEService(cfg *Config, rt runtime.Instance, st *state.Service, ks k // create new BABE service bs, err := babe.NewService(bcfg) if err != nil { - logger.Error("failed to initialize BABE service", "error", err) + logger.Error("failed to initialise BABE service", "error", err) return nil, err } @@ -214,7 +214,7 @@ func createBABEService(cfg *Config, rt runtime.Instance, st *state.Service, ks k // Core Service // createCoreService creates the core service from the provided core configuration -func createCoreService(cfg *Config, bp core.BlockProducer, fg core.FinalityGadget, verifier *babe.VerificationManager, rt runtime.Instance, ks *keystore.GlobalKeystore, stateSrvc *state.Service, net *network.Service) (*core.Service, error) { +func createCoreService(cfg *Config, bp core.BlockProducer, verifier *babe.VerificationManager, rt runtime.Instance, ks *keystore.GlobalKeystore, stateSrvc *state.Service, net *network.Service) (*core.Service, error) { logger.Debug( "creating core service...", "authority", cfg.Core.Roles == types.AuthorityRole, @@ -222,19 +222,17 @@ func createCoreService(cfg *Config, bp core.BlockProducer, fg core.FinalityGadge // set core configuration coreConfig := &core.Config{ - LogLvl: cfg.Log.CoreLvl, - BlockState: stateSrvc.Block, - EpochState: stateSrvc.Epoch, - StorageState: stateSrvc.Storage, - TransactionState: stateSrvc.Transaction, - BlockProducer: bp, - FinalityGadget: fg, - Keystore: ks, - Runtime: rt, - IsBlockProducer: cfg.Core.BabeAuthority, - IsFinalityAuthority: cfg.Core.GrandpaAuthority, - Verifier: verifier, - Network: net, + LogLvl: cfg.Log.CoreLvl, + BlockState: stateSrvc.Block, + EpochState: stateSrvc.Epoch, + StorageState: stateSrvc.Storage, + TransactionState: stateSrvc.Transaction, + BlockProducer: bp, + Keystore: ks, + Runtime: rt, + IsBlockProducer: cfg.Core.BabeAuthority, + Verifier: verifier, + Network: net, } // create new core service @@ -329,7 +327,7 @@ func createRPCService(cfg *Config, stateSrvc *state.Service, coreSrvc *core.Serv // System service // creates a service for providing system related information func createSystemService(cfg *types.SystemInfo, stateSrvc *state.Service) (*system.Service, error) { - genesisData, err := stateSrvc.Storage.GetGenesisData() + genesisData, err := stateSrvc.Base.LoadGenesisData() if err != nil { return nil, err } @@ -348,7 +346,7 @@ func createGRANDPAService(cfg *Config, rt runtime.Instance, st *state.Service, d return nil, ErrInvalidKeystoreType } - voters := grandpa.NewVotersFromAuthorities(ad) + voters := types.NewGrandpaVotersFromAuthorities(ad) keys := ks.Keypairs() if len(keys) == 0 && cfg.Core.GrandpaAuthority { @@ -358,8 +356,8 @@ func createGRANDPAService(cfg *Config, rt runtime.Instance, st *state.Service, d gsCfg := &grandpa.Config{ LogLvl: cfg.Log.FinalityGadgetLvl, BlockState: st.Block, + GrandpaState: st.Grandpa, DigestHandler: dh, - SetID: 1, Voters: voters, Authority: cfg.Core.GrandpaAuthority, Network: net, @@ -381,13 +379,14 @@ func createBlockVerifier(st *state.Service) (*babe.VerificationManager, error) { return ver, nil } -func createSyncService(cfg *Config, st *state.Service, bp sync.BlockProducer, dh *core.DigestHandler, verifier *babe.VerificationManager, rt runtime.Instance) (*sync.Service, error) { +func createSyncService(cfg *Config, st *state.Service, bp sync.BlockProducer, fg sync.FinalityGadget, dh *core.DigestHandler, verifier *babe.VerificationManager, rt runtime.Instance) (*sync.Service, error) { syncCfg := &sync.Config{ LogLvl: cfg.Log.SyncLvl, BlockState: st.Block, StorageState: st.Storage, TransactionState: st.Transaction, BlockProducer: bp, + FinalityGadget: fg, Verifier: verifier, Runtime: rt, DigestHandler: dh, @@ -397,5 +396,5 @@ func createSyncService(cfg *Config, st *state.Service, bp sync.BlockProducer, dh } func createDigestHandler(st *state.Service, bp core.BlockProducer, verifier *babe.VerificationManager) (*core.DigestHandler, error) { - return core.NewDigestHandler(st.Block, st.Epoch, bp, nil, verifier) + return core.NewDigestHandler(st.Block, st.Epoch, st.Grandpa, bp, verifier) } diff --git a/dot/services_test.go b/dot/services_test.go index 91f55d2d09..c29bb577e9 100644 --- a/dot/services_test.go +++ b/dot/services_test.go @@ -85,16 +85,8 @@ func TestCreateCoreService(t *testing.T) { rt, err := createRuntime(cfg, stateSrvc, ks, networkSrvc) require.NoError(t, err) - dh, err := createDigestHandler(stateSrvc, nil, nil) - require.NoError(t, err) - - gs, err := createGRANDPAService(cfg, rt, stateSrvc, dh, ks.Gran, networkSrvc) - require.NoError(t, err) - - coreSrvc, err := createCoreService(cfg, nil, gs, nil, rt, ks, stateSrvc, networkSrvc) + coreSrvc, err := createCoreService(cfg, nil, nil, rt, ks, stateSrvc, networkSrvc) require.Nil(t, err) - - // TODO: improve dot tests #687 require.NotNil(t, coreSrvc) } @@ -148,7 +140,7 @@ func TestCreateSyncService(t *testing.T) { ver, err := createBlockVerifier(stateSrvc) require.NoError(t, err) - _, err = createSyncService(cfg, stateSrvc, nil, nil, ver, rt) + _, err = createSyncService(cfg, stateSrvc, nil, nil, nil, ver, rt) require.NoError(t, err) } @@ -208,13 +200,7 @@ func TestCreateRPCService(t *testing.T) { rt, err := createRuntime(cfg, stateSrvc, ks, networkSrvc) require.NoError(t, err) - dh, err := createDigestHandler(stateSrvc, nil, nil) - require.NoError(t, err) - - gs, err := createGRANDPAService(cfg, rt, stateSrvc, dh, ks.Gran, networkSrvc) - require.NoError(t, err) - - coreSrvc, err := createCoreService(cfg, nil, gs, nil, rt, ks, stateSrvc, networkSrvc) + coreSrvc, err := createCoreService(cfg, nil, nil, rt, ks, stateSrvc, networkSrvc) require.Nil(t, err) sysSrvc, err := createSystemService(&cfg.System, stateSrvc) @@ -335,13 +321,7 @@ func TestNewWebSocketServer(t *testing.T) { rt, err := createRuntime(cfg, stateSrvc, ks, networkSrvc) require.NoError(t, err) - dh, err := createDigestHandler(stateSrvc, nil, nil) - require.NoError(t, err) - - gs, err := createGRANDPAService(cfg, rt, stateSrvc, dh, ks.Gran, networkSrvc) - require.NoError(t, err) - - coreSrvc, err := createCoreService(cfg, nil, gs, nil, rt, ks, stateSrvc, networkSrvc) + coreSrvc, err := createCoreService(cfg, nil, nil, rt, ks, stateSrvc, networkSrvc) require.Nil(t, err) sysSrvc, err := createSystemService(&cfg.System, stateSrvc) diff --git a/dot/state/base.go b/dot/state/base.go new file mode 100644 index 0000000000..0d0ff47e81 --- /dev/null +++ b/dot/state/base.go @@ -0,0 +1,148 @@ +// Copyright 2019 ChainSafe Systems (ON) Corp. +// This file is part of gossamer. +// +// The gossamer library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The gossamer library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the gossamer library. If not, see . + +package state + +import ( + "encoding/binary" + "encoding/json" + "fmt" + + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/genesis" + + "github.com/ChainSafe/chaindb" +) + +// SetupDatabase will return an instance of database based on basepath +func SetupDatabase(basepath string) (chaindb.Database, error) { + return chaindb.NewBadgerDB(&chaindb.Config{ + DataDir: basepath, + }) +} + +// BaseState is a wrapper for the chaindb.Database, without any prefixes +type BaseState struct { + db chaindb.Database +} + +// NewBaseState returns a new BaseState +func NewBaseState(db chaindb.Database) *BaseState { + return &BaseState{ + db: db, + } +} + +// StoreNodeGlobalName stores the current node name to avoid create new ones after each initialization +func (s *BaseState) StoreNodeGlobalName(nodeName string) error { + return s.db.Put(common.NodeNameKey, []byte(nodeName)) +} + +// LoadNodeGlobalName loads the latest stored node global name +func (s *BaseState) LoadNodeGlobalName() (string, error) { + nodeName, err := s.db.Get(common.NodeNameKey) + if err != nil { + return "", err + } + + return string(nodeName), nil +} + +// StoreBestBlockHash stores the hash at the BestBlockHashKey +func (s *BaseState) StoreBestBlockHash(hash common.Hash) error { + return s.db.Put(common.BestBlockHashKey, hash[:]) +} + +// LoadBestBlockHash loads the hash stored at BestBlockHashKey +func (s *BaseState) LoadBestBlockHash() (common.Hash, error) { + hash, err := s.db.Get(common.BestBlockHashKey) + if err != nil { + return common.Hash{}, err + } + + return common.NewHash(hash), nil +} + +// StoreGenesisData stores the given genesis data at the known GenesisDataKey. +func (s *BaseState) StoreGenesisData(gen *genesis.Data) error { + enc, err := json.Marshal(gen) + if err != nil { + return fmt.Errorf("cannot scale encode genesis data: %s", err) + } + + return s.db.Put(common.GenesisDataKey, enc) +} + +// LoadGenesisData retrieves the genesis data stored at the known GenesisDataKey. +func (s *BaseState) LoadGenesisData() (*genesis.Data, error) { + enc, err := s.db.Get(common.GenesisDataKey) + if err != nil { + return nil, err + } + + data := &genesis.Data{} + err = json.Unmarshal(enc, data) + if err != nil { + return nil, err + } + + return data, nil +} + +// StoreLatestStorageHash stores the current root hash in the database at LatestStorageHashKey +func (s *BaseState) StoreLatestStorageHash(root common.Hash) error { + return s.db.Put(common.LatestStorageHashKey, root[:]) +} + +// LoadLatestStorageHash retrieves the hash stored at LatestStorageHashKey from the DB +func (s *BaseState) LoadLatestStorageHash() (common.Hash, error) { + hashbytes, err := s.db.Get(common.LatestStorageHashKey) + if err != nil { + return common.Hash{}, err + } + + return common.NewHash(hashbytes), nil +} + +func (s *BaseState) storeSkipToEpoch(epoch uint64) error { + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, epoch) + return s.db.Put(skipToKey, buf) +} + +func (s *BaseState) loadSkipToEpoch() (uint64, error) { + data, err := s.db.Get(skipToKey) + if err != nil { + return 0, err + } + + return binary.LittleEndian.Uint64(data), nil +} + +func (s *BaseState) storeFirstSlot(slot uint64) error { + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, slot) + return s.db.Put(firstSlotKey, buf) +} + +func (s *BaseState) loadFirstSlot() (uint64, error) { + data, err := s.db.Get(firstSlotKey) + if err != nil { + return 0, err + } + + return binary.LittleEndian.Uint64(data), nil +} diff --git a/dot/state/db_test.go b/dot/state/base_test.go similarity index 72% rename from dot/state/db_test.go rename to dot/state/base_test.go index 2a238b8139..a6aee30313 100644 --- a/dot/state/db_test.go +++ b/dot/state/base_test.go @@ -2,7 +2,6 @@ package state import ( "bytes" - "reflect" "testing" "github.com/ChainSafe/gossamer/lib/common" @@ -26,7 +25,7 @@ func TestTrie_StoreAndLoadFromDB(t *testing.T) { } } - err := StoreTrie(db, tt) + err := tt.Store(db) require.NoError(t, err) encroot, err := tt.Hash() @@ -35,7 +34,7 @@ func TestTrie_StoreAndLoadFromDB(t *testing.T) { expected := tt.MustHash() tt = trie.NewEmptyTrie() - err = LoadTrie(db, tt, encroot) + err = tt.Load(db, encroot) require.NoError(t, err) require.Equal(t, expected, tt.MustHash()) } @@ -47,6 +46,7 @@ type test struct { func TestStoreAndLoadLatestStorageHash(t *testing.T) { db := NewInMemoryDB(t) + base := NewBaseState(db) tt := trie.NewEmptyTrie() tests := []test{ @@ -65,27 +65,19 @@ func TestStoreAndLoadLatestStorageHash(t *testing.T) { } expected, err := tt.Hash() - if err != nil { - t.Fatal(err) - } - - err = StoreLatestStorageHash(db, expected) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - hash, err := LoadLatestStorageHash(db) - if err != nil { - t.Fatal(err) - } + err = base.StoreLatestStorageHash(expected) + require.NoError(t, err) - if hash != expected { - t.Fatalf("Fail: got %x expected %x", hash, expected) - } + hash, err := base.LoadLatestStorageHash() + require.NoError(t, err) + require.Equal(t, expected, hash) } func TestStoreAndLoadGenesisData(t *testing.T) { db := NewInMemoryDB(t) + base := NewBaseState(db) bootnodes := common.StringArrayToBytes([]string{ "/ip4/127.0.0.1/tcp/7001/p2p/12D3KooWHHzSeKaY8xuZVzkLbKFfvNgPPeKhFBGrMbNzbm5akpqu", @@ -99,36 +91,24 @@ func TestStoreAndLoadGenesisData(t *testing.T) { ProtocolID: "/gossamer/test/0", } - err := StoreGenesisData(db, expected) - if err != nil { - t.Fatal(err) - } - - gen, err := LoadGenesisData(db) - if err != nil { - t.Fatal(err) - } + err := base.StoreGenesisData(expected) + require.NoError(t, err) - if !reflect.DeepEqual(gen, expected) { - t.Fatalf("Fail: got %v expected %v", gen, expected) - } + gen, err := base.LoadGenesisData() + require.NoError(t, err) + require.Equal(t, expected, gen) } func TestStoreAndLoadBestBlockHash(t *testing.T) { db := NewInMemoryDB(t) - hash, _ := common.HexToHash("0x3f5a19b9e9507e05276216f3877bb289e47885f8184010c65d0e41580d3663cc") + base := NewBaseState(db) - err := StoreBestBlockHash(db, hash) - if err != nil { - t.Fatal(err) - } + hash, _ := common.HexToHash("0x3f5a19b9e9507e05276216f3877bb289e47885f8184010c65d0e41580d3663cc") - res, err := LoadBestBlockHash(db) - if err != nil { - t.Fatal(err) - } + err := base.StoreBestBlockHash(hash) + require.NoError(t, err) - if !reflect.DeepEqual(res, hash) { - t.Fatalf("Fail: got %x expected %x", res, hash) - } + res, err := base.LoadBestBlockHash() + require.NoError(t, err) + require.Equal(t, hash, res) } diff --git a/dot/state/block.go b/dot/state/block.go index 302d92fe5e..ea5d8c5267 100644 --- a/dot/state/block.go +++ b/dot/state/block.go @@ -38,17 +38,18 @@ const pruneKeyBufferSize = 1000 // BlockState defines fields for manipulating the state of blocks, such as BlockTree, BlockDB and Header type BlockState struct { - bt *blocktree.BlockTree - baseDB chaindb.Database - db chaindb.Database - lock sync.RWMutex + bt *blocktree.BlockTree + //baseDB chaindb.Database + baseState *BaseState + db chaindb.Database + sync.RWMutex genesisHash common.Hash // block notifiers imported map[byte]chan<- *types.Block - finalized map[byte]chan<- *types.Header + finalised map[byte]chan<- *types.FinalisationInfo importedLock sync.RWMutex - finalizedLock sync.RWMutex + finalisedLock sync.RWMutex pruneKeyCh chan *types.Header } @@ -61,10 +62,10 @@ func NewBlockState(db chaindb.Database, bt *blocktree.BlockTree) (*BlockState, e bs := &BlockState{ bt: bt, - baseDB: db, + baseState: NewBaseState(db), db: chaindb.NewTable(db, blockPrefix), imported: make(map[byte]chan<- *types.Block), - finalized: make(map[byte]chan<- *types.Header), + finalised: make(map[byte]chan<- *types.FinalisationInfo), pruneKeyCh: make(chan *types.Header, pruneKeyBufferSize), } @@ -77,14 +78,14 @@ func NewBlockState(db chaindb.Database, bt *blocktree.BlockTree) (*BlockState, e return bs, nil } -// NewBlockStateFromGenesis initializes a BlockState from a genesis header, saving it to the database located at basePath +// NewBlockStateFromGenesis initialises a BlockState from a genesis header, saving it to the database located at basePath func NewBlockStateFromGenesis(db chaindb.Database, header *types.Header) (*BlockState, error) { bs := &BlockState{ bt: blocktree.NewBlockTreeFromRoot(header, db), - baseDB: db, + baseState: NewBaseState(db), db: chaindb.NewTable(db, blockPrefix), imported: make(map[byte]chan<- *types.Block), - finalized: make(map[byte]chan<- *types.Header), + finalised: make(map[byte]chan<- *types.FinalisationInfo), pruneKeyCh: make(chan *types.Header, pruneKeyBufferSize), } @@ -110,7 +111,7 @@ func NewBlockStateFromGenesis(db chaindb.Database, header *types.Header) (*Block bs.genesisHash = header.Hash() - // set the latest finalized head to the genesis header + // set the latest finalised head to the genesis header err = bs.SetFinalizedHash(bs.genesisHash, 0, 0) if err != nil { return nil, err @@ -268,7 +269,7 @@ func (bs *BlockState) GetHeader(hash common.Hash) (*types.Header, error) { func (bs *BlockState) GetHashByNumber(num *big.Int) (common.Hash, error) { bh, err := bs.db.Get(headerHashKey(num.Uint64())) if err != nil { - return common.Hash{}, fmt.Errorf("cannot get block %d: %s", num, err) + return common.Hash{}, fmt.Errorf("cannot get block %d: %w", num, err) } return common.NewHash(bh), nil @@ -278,7 +279,7 @@ func (bs *BlockState) GetHashByNumber(num *big.Int) (common.Hash, error) { func (bs *BlockState) GetHeaderByNumber(num *big.Int) (*types.Header, error) { bh, err := bs.db.Get(headerHashKey(num.Uint64())) if err != nil { - return nil, fmt.Errorf("cannot get block %d: %s", num, err) + return nil, fmt.Errorf("cannot get block %d: %w", num, err) } hash := common.NewHash(bh) @@ -304,7 +305,7 @@ func (bs *BlockState) GetBlockByNumber(num *big.Int) (*types.Block, error) { // First retrieve the block hash in a byte array based on the block number from the database byteHash, err := bs.db.Get(headerHashKey(num.Uint64())) if err != nil { - return nil, fmt.Errorf("cannot get block %d: %s", num, err) + return nil, fmt.Errorf("cannot get block %d: %w", num, err) } // Then find the block based on the hash @@ -322,7 +323,7 @@ func (bs *BlockState) GetBlockHash(blockNumber *big.Int) (*common.Hash, error) { // First retrieve the block hash in a byte array based on the block number from the database byteHash, err := bs.db.Get(headerHashKey(blockNumber.Uint64())) if err != nil { - return nil, fmt.Errorf("cannot get block %d: %s", blockNumber, err) + return nil, fmt.Errorf("cannot get block %d: %w", blockNumber, err) } hash := common.NewHash(byteHash) return &hash, nil @@ -330,9 +331,6 @@ func (bs *BlockState) GetBlockHash(blockNumber *big.Int) (*common.Hash, error) { // SetHeader will set the header into DB func (bs *BlockState) SetHeader(header *types.Header) error { - bs.lock.Lock() - defer bs.lock.Unlock() - hash := header.Hash() // Write the encoded header @@ -366,14 +364,10 @@ func (bs *BlockState) GetBlockBody(hash common.Hash) (*types.Body, error) { // SetBlockBody will add a block body to the db func (bs *BlockState) SetBlockBody(hash common.Hash, body *types.Body) error { - bs.lock.Lock() - defer bs.lock.Unlock() - - err := bs.db.Put(blockBodyKey(hash), body.AsOptional().Value()) - return err + return bs.db.Put(blockBodyKey(hash), body.AsOptional().Value()) } -// HasFinalizedBlock returns true if there is a finalized block for a given round and setID, false otherwise +// HasFinalizedBlock returns true if there is a finalised block for a given round and setID, false otherwise func (bs *BlockState) HasFinalizedBlock(round, setID uint64) (bool, error) { // get current round r, err := bs.GetRound() @@ -381,15 +375,15 @@ func (bs *BlockState) HasFinalizedBlock(round, setID uint64) (bool, error) { return false, err } - // round that is being queried for has not yet finalized + // round that is being queried for has not yet finalised if round > r { - return false, fmt.Errorf("round not yet finalized") + return false, fmt.Errorf("round not yet finalised") } return bs.db.Has(finalizedHashKey(round, setID)) } -// GetFinalizedHeader returns the latest finalized block header +// GetFinalizedHeader returns the latest finalised block header func (bs *BlockState) GetFinalizedHeader(round, setID uint64) (*types.Header, error) { h, err := bs.GetFinalizedHash(round, setID) if err != nil { @@ -404,7 +398,7 @@ func (bs *BlockState) GetFinalizedHeader(round, setID uint64) (*types.Header, er return header, nil } -// GetFinalizedHash gets the latest finalized block header +// GetFinalizedHash gets the latest finalised block header func (bs *BlockState) GetFinalizedHash(round, setID uint64) (common.Hash, error) { // get current round r, err := bs.GetRound() @@ -412,9 +406,9 @@ func (bs *BlockState) GetFinalizedHash(round, setID uint64) (common.Hash, error) return common.Hash{}, err } - // round that is being queried for has not yet finalized + // round that is being queried for has not yet finalised if round > r { - return common.Hash{}, fmt.Errorf("round not yet finalized") + return common.Hash{}, fmt.Errorf("round not yet finalised") } h, err := bs.db.Get(finalizedHashKey(round, setID)) @@ -425,9 +419,12 @@ func (bs *BlockState) GetFinalizedHash(round, setID uint64) (common.Hash, error) return common.NewHash(h), nil } -// SetFinalizedHash sets the latest finalized block header +// SetFinalizedHash sets the latest finalised block header func (bs *BlockState) SetFinalizedHash(hash common.Hash, round, setID uint64) error { - go bs.notifyFinalized(hash) + bs.Lock() + defer bs.Unlock() + + go bs.notifyFinalized(hash, round, setID) if round > 0 { err := bs.SetRound(round) if err != nil { @@ -454,7 +451,7 @@ func (bs *BlockState) SetFinalizedHash(hash common.Hash, round, setID uint64) er return bs.db.Put(finalizedHashKey(round, setID), hash[:]) } -// SetRound sets the latest finalized GRANDPA round in the db +// SetRound sets the latest finalised GRANDPA round in the db // TODO: this needs to use both setID and round func (bs *BlockState) SetRound(round uint64) error { buf := make([]byte, 8) @@ -462,7 +459,7 @@ func (bs *BlockState) SetRound(round uint64) error { return bs.db.Put(common.LatestFinalizedRoundKey, buf) } -// GetRound gets the latest finalized GRANDPA round from the db +// GetRound gets the latest finalised GRANDPA round from the db func (bs *BlockState) GetRound() (uint64, error) { r, err := bs.db.Get(common.LatestFinalizedRoundKey) if err != nil { @@ -496,6 +493,8 @@ func (bs *BlockState) CompareAndSetBlockData(bd *types.BlockData) error { // AddBlock adds a block to the blocktree and the DB with arrival time as current unix time func (bs *BlockState) AddBlock(block *types.Block) error { + bs.Lock() + defer bs.Unlock() return bs.AddBlockWithArrivalTime(block, time.Now()) } @@ -506,6 +505,8 @@ func (bs *BlockState) AddBlockWithArrivalTime(block *types.Block, arrivalTime ti return err } + prevHead := bs.bt.DeepestBlockHash() + // add block to blocktree err = bs.bt.AddBlock(block.Header, uint64(arrivalTime.UnixNano())) if err != nil { @@ -541,12 +542,58 @@ func (bs *BlockState) AddBlockWithArrivalTime(block *types.Block, arrivalTime ti return err } + // check if there was a re-org, if so, re-set the canonical number->hash mapping + err = bs.handleAddedBlock(prevHead, bs.bt.DeepestBlockHash()) + if err != nil { + return err + } + go bs.notifyImported(block) - return bs.baseDB.Flush() + return bs.db.Flush() +} + +// handleAddedBlock re-sets the canonical number->hash mapping if there was a chain re-org. +// prev is the previous best block hash before the new block was added to the blocktree. +// curr is the current best blogetck hash. +func (bs *BlockState) handleAddedBlock(prev, curr common.Hash) error { + ancestor, err := bs.HighestCommonAncestor(prev, curr) + if err != nil { + return err + } + + // if the highest common ancestor of the previous chain head and current chain head is the previous chain head, + // then the current chain head is the descendant of the previous and thus are on the same chain + if ancestor == prev { + return nil + } + + subchain, err := bs.SubChain(ancestor, curr) + if err != nil { + return err + } + + batch := bs.db.NewBatch() + for _, hash := range subchain { + // TODO: set number from ancestor.Number + i ? + header, err := bs.GetHeader(hash) + if err != nil { + return fmt.Errorf("failed to get header in subchain: %w", err) + } + + err = batch.Put(headerHashKey(header.Number.Uint64()), hash.ToBytes()) + if err != nil { + return err + } + } + + return batch.Flush() } // AddBlockToBlockTree adds the given block to the blocktree. It does not write it to the database. func (bs *BlockState) AddBlockToBlockTree(header *types.Header) error { + bs.Lock() + defer bs.Unlock() + arrivalTime, err := bs.GetArrivalTime(header.Hash()) if err != nil { arrivalTime = time.Now() @@ -567,7 +614,7 @@ func (bs *BlockState) isBlockOnCurrentChain(header *types.Header) (bool, error) } // if the new block is ahead of our best block, then it is on our current chain. - if header.Number.Cmp(bestBlock.Number) == 1 { + if header.Number.Cmp(bestBlock.Number) > 0 { return true, nil } @@ -671,7 +718,7 @@ func (bs *BlockState) BlocktreeAsString() string { } func (bs *BlockState) setBestBlockHashKey(hash common.Hash) error { - return StoreBestBlockHash(bs.baseDB, hash) + return bs.baseState.StoreBestBlockHash(hash) } // HasArrivalTime returns true if the db contains the block's arrival time @@ -681,7 +728,7 @@ func (bs *BlockState) HasArrivalTime(hash common.Hash) (bool, error) { // GetArrivalTime returns the arrival time in nanoseconds since the Unix epoch of a block given its hash func (bs *BlockState) GetArrivalTime(hash common.Hash) (time.Time, error) { - arrivalTime, err := bs.baseDB.Get(arrivalTimeKey(hash)) + arrivalTime, err := bs.db.Get(arrivalTimeKey(hash)) if err != nil { return time.Time{}, err } @@ -693,5 +740,5 @@ func (bs *BlockState) GetArrivalTime(hash common.Hash) (time.Time, error) { func (bs *BlockState) setArrivalTime(hash common.Hash, arrivalTime time.Time) error { buf := make([]byte, 8) binary.LittleEndian.PutUint64(buf, uint64(arrivalTime.UnixNano())) - return bs.baseDB.Put(arrivalTimeKey(hash), buf) + return bs.db.Put(arrivalTimeKey(hash), buf) } diff --git a/dot/state/block_data.go b/dot/state/block_data.go index 67cccd8087..90987d07ff 100644 --- a/dot/state/block_data.go +++ b/dot/state/block_data.go @@ -32,8 +32,8 @@ func (bs *BlockState) HasReceipt(hash common.Hash) (bool, error) { // SetReceipt sets a Receipt in the database func (bs *BlockState) SetReceipt(hash common.Hash, data []byte) error { - bs.lock.Lock() - defer bs.lock.Unlock() + bs.Lock() + defer bs.Unlock() err := bs.db.Put(prefixKey(hash, receiptPrefix), data) if err != nil { @@ -60,8 +60,8 @@ func (bs *BlockState) HasMessageQueue(hash common.Hash) (bool, error) { // SetMessageQueue sets a MessageQueue in the database func (bs *BlockState) SetMessageQueue(hash common.Hash, data []byte) error { - bs.lock.Lock() - defer bs.lock.Unlock() + bs.Lock() + defer bs.Unlock() err := bs.db.Put(prefixKey(hash, messageQueuePrefix), data) if err != nil { @@ -88,8 +88,8 @@ func (bs *BlockState) HasJustification(hash common.Hash) (bool, error) { // SetJustification sets a Justification in the database func (bs *BlockState) SetJustification(hash common.Hash, data []byte) error { - bs.lock.Lock() - defer bs.lock.Unlock() + bs.Lock() + defer bs.Unlock() err := bs.db.Put(prefixKey(hash, justificationPrefix), data) if err != nil { diff --git a/dot/state/block_notify.go b/dot/state/block_notify.go index 68f415d870..1fbcb214f6 100644 --- a/dot/state/block_notify.go +++ b/dot/state/block_notify.go @@ -49,28 +49,28 @@ func (bs *BlockState) RegisterImportedChannel(ch chan<- *types.Block) (byte, err return id, nil } -// RegisterFinalizedChannel registers a channel for block notification upon block finalization. +// RegisterFinalizedChannel registers a channel for block notification upon block finalisation. // It returns the channel ID (used for unregistering the channel) -func (bs *BlockState) RegisterFinalizedChannel(ch chan<- *types.Header) (byte, error) { - bs.finalizedLock.RLock() +func (bs *BlockState) RegisterFinalizedChannel(ch chan<- *types.FinalisationInfo) (byte, error) { + bs.finalisedLock.RLock() - if len(bs.finalized) == 256 { + if len(bs.finalised) == 256 { return 0, errors.New("channel limit reached") } var id byte for { id = generateID() - if bs.finalized[id] == nil { + if bs.finalised[id] == nil { break } } - bs.finalizedLock.RUnlock() + bs.finalisedLock.RUnlock() - bs.finalizedLock.Lock() - bs.finalized[id] = ch - bs.finalizedLock.Unlock() + bs.finalisedLock.Lock() + bs.finalised[id] = ch + bs.finalisedLock.Unlock() return id, nil } @@ -83,13 +83,13 @@ func (bs *BlockState) UnregisterImportedChannel(id byte) { delete(bs.imported, id) } -// UnregisterFinalizedChannel removes the block finalization notification channel with the given ID. +// UnregisterFinalizedChannel removes the block finalisation notification channel with the given ID. // A channel must be unregistered before closing it. func (bs *BlockState) UnregisterFinalizedChannel(id byte) { - bs.finalizedLock.Lock() - defer bs.finalizedLock.Unlock() + bs.finalisedLock.Lock() + defer bs.finalisedLock.Unlock() - delete(bs.finalized, id) + delete(bs.finalised, id) } func (bs *BlockState) notifyImported(block *types.Block) { @@ -111,26 +111,31 @@ func (bs *BlockState) notifyImported(block *types.Block) { } } -func (bs *BlockState) notifyFinalized(hash common.Hash) { - bs.finalizedLock.RLock() - defer bs.finalizedLock.RUnlock() +func (bs *BlockState) notifyFinalized(hash common.Hash, round, setID uint64) { + bs.finalisedLock.RLock() + defer bs.finalisedLock.RUnlock() - if len(bs.finalized) == 0 { + if len(bs.finalised) == 0 { return } header, err := bs.GetHeader(hash) if err != nil { - logger.Error("failed to get finalized header", "hash", hash, "error", err) + logger.Error("failed to get finalised header", "hash", hash, "error", err) return } - logger.Trace("notifying finalized block chans...", "chans", bs.finalized) + logger.Debug("notifying finalised block chans...", "chans", bs.finalised) + info := &types.FinalisationInfo{ + Header: header, + Round: round, + SetID: setID, + } - for _, ch := range bs.finalized { - go func(ch chan<- *types.Header) { + for _, ch := range bs.finalised { + go func(ch chan<- *types.FinalisationInfo) { select { - case ch <- header: + case ch <- info: default: } }(ch) @@ -138,6 +143,7 @@ func (bs *BlockState) notifyFinalized(hash common.Hash) { } func generateID() byte { + // skipcq: GSC-G404 id := rand.Intn(256) //nolint return byte(id) } diff --git a/dot/state/block_notify_test.go b/dot/state/block_notify_test.go index a38790eb5a..ace6e1067f 100644 --- a/dot/state/block_notify_test.go +++ b/dot/state/block_notify_test.go @@ -52,7 +52,7 @@ func TestImportChannel(t *testing.T) { func TestFinalizedChannel(t *testing.T) { bs := newTestBlockState(t, testGenesisHeader) - ch := make(chan *types.Header, 3) + ch := make(chan *types.FinalisationInfo, 3) id, err := bs.RegisterFinalizedChannel(ch) require.NoError(t, err) @@ -68,7 +68,7 @@ func TestFinalizedChannel(t *testing.T) { select { case <-ch: case <-time.After(testMessageTimeout): - t.Fatal("did not receive finalized block") + t.Fatal("did not receive finalised block") } } } @@ -117,12 +117,12 @@ func TestFinalizedChannel_Multi(t *testing.T) { bs := newTestBlockState(t, testGenesisHeader) num := 5 - chs := make([]chan *types.Header, num) + chs := make([]chan *types.FinalisationInfo, num) ids := make([]byte, num) var err error for i := 0; i < num; i++ { - chs[i] = make(chan *types.Header) + chs[i] = make(chan *types.FinalisationInfo) ids[i], err = bs.RegisterFinalizedChannel(chs[i]) require.NoError(t, err) } @@ -134,11 +134,11 @@ func TestFinalizedChannel_Multi(t *testing.T) { for i, ch := range chs { - go func(i int, ch chan *types.Header) { + go func(i int, ch chan *types.FinalisationInfo) { select { case <-ch: case <-time.After(testMessageTimeout): - t.Error("did not receive finalized block: ch=", i) + t.Error("did not receive finalised block: ch=", i) } wg.Done() }(i, ch) diff --git a/dot/state/block_test.go b/dot/state/block_test.go index 0a54db54a4..fc54bbceea 100644 --- a/dot/state/block_test.go +++ b/dot/state/block_test.go @@ -19,6 +19,7 @@ package state import ( "math/big" "testing" + "time" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" @@ -310,7 +311,7 @@ func TestFinalization_DeleteBlock(t *testing.T) { // require.True(t, has, n) // } - // pick block to finalize + // pick block to finalise fin := leaves[len(leaves)-1] err := bs.SetFinalizedHash(fin, 1, 1) require.NoError(t, err) @@ -327,7 +328,7 @@ func TestFinalization_DeleteBlock(t *testing.T) { return false } - // assert that every block except finalized has been deleted + // assert that every block except finalised has been deleted for _, b := range before { if b == fin { continue @@ -337,12 +338,12 @@ func TestFinalization_DeleteBlock(t *testing.T) { continue } - isFinalized, err := btBefore.IsDescendantOf(b, fin) + isFinalised, err := btBefore.IsDescendantOf(b, fin) require.NoError(t, err) has, err := bs.HasHeader(b) require.NoError(t, err) - if isFinalized { + if isFinalised { require.True(t, has) } else { require.False(t, has) @@ -350,7 +351,7 @@ func TestFinalization_DeleteBlock(t *testing.T) { has, err = bs.HasBlockBody(b) require.NoError(t, err) - if isFinalized { + if isFinalised { require.True(t, has) } else { require.False(t, has) @@ -358,7 +359,7 @@ func TestFinalization_DeleteBlock(t *testing.T) { // has, err = bs.HasArrivalTime(b) // require.NoError(t, err) - // if isFinalized && b != bs.genesisHash { + // if isFinalised && b != bs.genesisHash { // require.True(t, has, b) // } else { // require.False(t, has) @@ -391,3 +392,128 @@ func TestGetHashByNumber(t *testing.T) { require.NoError(t, err) require.Equal(t, header.Hash(), res) } + +func TestAddBlock_WithReOrg(t *testing.T) { + bs := newTestBlockState(t, testGenesisHeader) + + header1a := &types.Header{ + Number: big.NewInt(1), + Digest: types.Digest{}, + ParentHash: testGenesisHeader.Hash(), + } + + block1a := &types.Block{ + Header: header1a, + Body: &types.Body{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + } + + err := bs.AddBlock(block1a) + require.NoError(t, err) + + block1hash, err := bs.GetHashByNumber(big.NewInt(1)) + require.NoError(t, err) + require.Equal(t, header1a.Hash(), block1hash) + + header1b := &types.Header{ + Number: big.NewInt(1), + Digest: types.Digest{}, + ParentHash: testGenesisHeader.Hash(), + ExtrinsicsRoot: common.Hash{99}, + } + + block1b := &types.Block{ + Header: header1b, + Body: &types.Body{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + } + + err = bs.AddBlock(block1b) + require.NoError(t, err) + + // should still be hash 1a since it arrived first + block1hash, err = bs.GetHashByNumber(big.NewInt(1)) + require.NoError(t, err) + require.Equal(t, header1a.Hash(), block1hash) + + header2b := &types.Header{ + Number: big.NewInt(2), + Digest: types.Digest{}, + ParentHash: header1b.Hash(), + ExtrinsicsRoot: common.Hash{99}, + } + + block2b := &types.Block{ + Header: header2b, + Body: &types.Body{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + } + + err = bs.AddBlock(block2b) + require.NoError(t, err) + + // should now be hash 1b since it's on the longer chain + block1hash, err = bs.GetHashByNumber(big.NewInt(1)) + require.NoError(t, err) + require.Equal(t, header1b.Hash(), block1hash) + + block2hash, err := bs.GetHashByNumber(big.NewInt(2)) + require.NoError(t, err) + require.Equal(t, header2b.Hash(), block2hash) + + header2a := &types.Header{ + Number: big.NewInt(2), + Digest: types.Digest{}, + ParentHash: header1a.Hash(), + } + + block2a := &types.Block{ + Header: header2a, + Body: &types.Body{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + } + + err = bs.AddBlock(block2a) + require.NoError(t, err) + + header3a := &types.Header{ + Number: big.NewInt(3), + Digest: types.Digest{}, + ParentHash: header2a.Hash(), + } + + block3a := &types.Block{ + Header: header3a, + Body: &types.Body{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + } + + err = bs.AddBlock(block3a) + require.NoError(t, err) + + // should now be hash 1a since it's on the longer chain + block1hash, err = bs.GetHashByNumber(big.NewInt(1)) + require.NoError(t, err) + require.Equal(t, header1a.Hash(), block1hash) + + // should now be hash 2a since it's on the longer chain + block2hash, err = bs.GetHashByNumber(big.NewInt(2)) + require.NoError(t, err) + require.Equal(t, header2a.Hash(), block2hash) + + block3hash, err := bs.GetHashByNumber(big.NewInt(3)) + require.NoError(t, err) + require.Equal(t, header3a.Hash(), block3hash) +} + +func TestAddBlockToBlockTree(t *testing.T) { + bs := newTestBlockState(t, testGenesisHeader) + + header := &types.Header{ + Number: big.NewInt(1), + Digest: types.Digest{}, + ParentHash: testGenesisHeader.Hash(), + } + + err := bs.setArrivalTime(header.Hash(), time.Now()) + require.NoError(t, err) + + err = bs.AddBlockToBlockTree(header) + require.NoError(t, err) + require.Equal(t, bs.BestBlockHash(), header.Hash()) +} diff --git a/dot/state/db.go b/dot/state/db.go deleted file mode 100644 index 8b88c64deb..0000000000 --- a/dot/state/db.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2019 ChainSafe Systems (ON) Corp. -// This file is part of gossamer. -// -// The gossamer library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The gossamer library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the gossamer library. If not, see . - -package state - -import ( - "encoding/json" - "fmt" - - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/genesis" - "github.com/ChainSafe/gossamer/lib/trie" - - database "github.com/ChainSafe/chaindb" -) - -// StoreBestBlockHash stores the hash at the BestBlockHashKey -func StoreBestBlockHash(db database.Database, hash common.Hash) error { - return db.Put(common.BestBlockHashKey, hash[:]) -} - -// LoadBestBlockHash loads the hash stored at BestBlockHashKey -func LoadBestBlockHash(db database.Database) (common.Hash, error) { - hash, err := db.Get(common.BestBlockHashKey) - if err != nil { - return common.Hash{}, err - } - - return common.NewHash(hash), nil -} - -// StoreGenesisData stores the given genesis data at the known GenesisDataKey. -func StoreGenesisData(db database.Database, gen *genesis.Data) error { - enc, err := json.Marshal(gen) - if err != nil { - return fmt.Errorf("cannot scale encode genesis data: %s", err) - } - - return db.Put(common.GenesisDataKey, enc) -} - -// LoadGenesisData retrieves the genesis data stored at the known GenesisDataKey. -func LoadGenesisData(db database.Database) (*genesis.Data, error) { - enc, err := db.Get(common.GenesisDataKey) - if err != nil { - return nil, err - } - - data := &genesis.Data{} - err = json.Unmarshal(enc, data) - if err != nil { - return nil, err - } - - return data, nil -} - -// StoreLatestStorageHash stores the current root hash in the database at LatestStorageHashKey -func StoreLatestStorageHash(db database.Database, root common.Hash) error { - return db.Put(common.LatestStorageHashKey, root[:]) -} - -// LoadLatestStorageHash retrieves the hash stored at LatestStorageHashKey from the DB -func LoadLatestStorageHash(db database.Database) (common.Hash, error) { - hashbytes, err := db.Get(common.LatestStorageHashKey) - if err != nil { - return common.Hash{}, err - } - - return common.NewHash(hashbytes), nil -} - -// StoreTrie encodes the entire trie and writes it to the DB -// The key to the DB entry is the root hash of the trie -func StoreTrie(db database.Database, t *trie.Trie) error { - return t.Store(db) -} - -// LoadTrie loads an encoded trie from the DB where the key is `root` -func LoadTrie(db database.Database, t *trie.Trie, root common.Hash) error { - return t.Load(db, root) -} diff --git a/dot/state/epoch.go b/dot/state/epoch.go index 9afe2cb996..fc45af6d9a 100644 --- a/dot/state/epoch.go +++ b/dot/state/epoch.go @@ -51,8 +51,8 @@ func configDataKey(epoch uint64) []byte { // EpochState tracks information related to each epoch type EpochState struct { - baseDB chaindb.Database db chaindb.Database + baseState *BaseState epochLength uint64 // measured in slots firstSlot uint64 skipToEpoch uint64 @@ -60,7 +60,9 @@ type EpochState struct { // NewEpochStateFromGenesis returns a new EpochState given information for the first epoch, fetched from the runtime func NewEpochStateFromGenesis(db chaindb.Database, genesisConfig *types.BabeConfiguration) (*EpochState, error) { - err := storeFirstSlot(db, 1) // this may change once the first block is imported + baseState := NewBaseState(db) + + err := baseState.storeFirstSlot(1) // this may change once the first block is imported if err != nil { return nil, err } @@ -76,7 +78,7 @@ func NewEpochStateFromGenesis(db chaindb.Database, genesisConfig *types.BabeConf } s := &EpochState{ - baseDB: db, + baseState: NewBaseState(db), db: epochDB, epochLength: genesisConfig.EpochLength, firstSlot: 1, @@ -109,7 +111,7 @@ func NewEpochStateFromGenesis(db chaindb.Database, genesisConfig *types.BabeConf return nil, err } - if err := storeSkipToEpoch(db, 0); err != nil { + if err := s.baseState.storeSkipToEpoch(0); err != nil { return nil, err } @@ -118,23 +120,25 @@ func NewEpochStateFromGenesis(db chaindb.Database, genesisConfig *types.BabeConf // NewEpochState returns a new EpochState func NewEpochState(db chaindb.Database) (*EpochState, error) { + baseState := NewBaseState(db) + epochLength, err := loadEpochLength(db) if err != nil { return nil, err } - firstSlot, err := loadFirstSlot(db) + firstSlot, err := baseState.loadFirstSlot() if err != nil { return nil, err } - skipToEpoch, err := loadSkipToEpoch(db) + skipToEpoch, err := baseState.loadSkipToEpoch() if err != nil { return nil, err } return &EpochState{ - baseDB: db, + baseState: baseState, db: chaindb.NewTable(db, epochPrefix), epochLength: epochLength, firstSlot: firstSlot, @@ -157,21 +161,6 @@ func loadEpochLength(db chaindb.Database) (uint64, error) { return binary.LittleEndian.Uint64(data), nil } -func storeFirstSlot(db chaindb.Database, slot uint64) error { - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, slot) - return db.Put(firstSlotKey, buf) -} - -func loadFirstSlot(db chaindb.Database) (uint64, error) { - data, err := db.Get(firstSlotKey) - if err != nil { - return 0, err - } - - return binary.LittleEndian.Uint64(data), nil -} - // SetCurrentEpoch sets the current epoch func (s *EpochState) SetCurrentEpoch(epoch uint64) error { buf := make([]byte, 8) @@ -301,22 +290,7 @@ func (s *EpochState) GetStartSlotForEpoch(epoch uint64) (uint64, error) { // SetFirstSlot sets the first slot number of the network func (s *EpochState) SetFirstSlot(slot uint64) error { s.firstSlot = slot - return storeFirstSlot(s.baseDB, slot) -} - -func storeSkipToEpoch(db chaindb.Database, epoch uint64) error { - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, epoch) - return db.Put(skipToKey, buf) -} - -func loadSkipToEpoch(db chaindb.Database) (uint64, error) { - data, err := db.Get(skipToKey) - if err != nil { - return 0, err - } - - return binary.LittleEndian.Uint64(data), nil + return s.baseState.storeFirstSlot(slot) } // SkipVerify returns whether verification for the given header should be skipped or not. diff --git a/dot/state/grandpa.go b/dot/state/grandpa.go new file mode 100644 index 0000000000..20ff4488b9 --- /dev/null +++ b/dot/state/grandpa.go @@ -0,0 +1,266 @@ +// Copyright 2019 ChainSafe Systems (ON) Corp. +// This file is part of gossamer. +// +// The gossamer library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The gossamer library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the gossamer library. If not, see . + +package state + +import ( + "bytes" + "encoding/binary" + "errors" + "math/big" + + "github.com/ChainSafe/chaindb" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/scale" +) + +var ( + genesisSetID = uint64(0) + grandpaPrefix = "grandpa" + authoritiesPrefix = []byte("auth") + setIDChangePrefix = []byte("change") + pauseKey = []byte("pause") + resumeKey = []byte("resume") + currentSetIDKey = []byte("setID") +) + +// GrandpaState tracks information related to grandpa +type GrandpaState struct { + db chaindb.Database +} + +// NewGrandpaStateFromGenesis returns a new GrandpaState given the grandpa genesis authorities +func NewGrandpaStateFromGenesis(db chaindb.Database, genesisAuthorities []*types.GrandpaVoter) (*GrandpaState, error) { + grandpaDB := chaindb.NewTable(db, grandpaPrefix) + s := &GrandpaState{ + db: grandpaDB, + } + + err := s.setCurrentSetID(genesisSetID) + if err != nil { + return nil, err + } + + err = s.setAuthorities(genesisSetID, genesisAuthorities) + if err != nil { + return nil, err + } + + err = s.setSetIDChangeAtBlock(genesisSetID, big.NewInt(0)) + if err != nil { + return nil, err + } + + return s, nil +} + +// NewGrandpaState returns a new GrandpaState +func NewGrandpaState(db chaindb.Database) (*GrandpaState, error) { + return &GrandpaState{ + db: chaindb.NewTable(db, grandpaPrefix), + }, nil +} + +func authoritiesKey(setID uint64) []byte { + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, setID) + return append(authoritiesPrefix, buf...) +} + +func setIDChangeKey(setID uint64) []byte { + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, setID) + return append(setIDChangePrefix, buf...) +} + +// setAuthorities sets the authorities for a given setID +func (s *GrandpaState) setAuthorities(setID uint64, authorities []*types.GrandpaVoter) error { + enc, err := scale.Encode(authorities) + if err != nil { + return err + } + + return s.db.Put(authoritiesKey(setID), enc) +} + +// GetAuthorities returns the authorities for the given setID +func (s *GrandpaState) GetAuthorities(setID uint64) ([]*types.GrandpaVoter, error) { + enc, err := s.db.Get(authoritiesKey(setID)) + if err != nil { + return nil, err + } + + r := &bytes.Buffer{} + _, err = r.Write(enc) + if err != nil { + return nil, err + } + + v, err := types.DecodeGrandpaVoters(r) + if err != nil { + return nil, err + } + + return v, nil +} + +// setCurrentSetID sets the current set ID +func (s *GrandpaState) setCurrentSetID(setID uint64) error { + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, setID) + return s.db.Put(currentSetIDKey, buf) +} + +// GetCurrentSetID retrieves the current set ID +func (s *GrandpaState) GetCurrentSetID() (uint64, error) { + id, err := s.db.Get(currentSetIDKey) + if err != nil { + return 0, err + } + + if len(id) < 8 { + return 0, errors.New("invalid setID") + } + + return binary.LittleEndian.Uint64(id), nil +} + +// SetNextChange sets the next authority change +func (s *GrandpaState) SetNextChange(authorities []*types.GrandpaVoter, number *big.Int) error { + currSetID, err := s.GetCurrentSetID() + if err != nil { + return err + } + + nextSetID := currSetID + 1 + err = s.setAuthorities(nextSetID, authorities) + if err != nil { + return err + } + + err = s.setSetIDChangeAtBlock(nextSetID, number) + if err != nil { + return err + } + + return nil +} + +// IncrementSetID increments the set ID +func (s *GrandpaState) IncrementSetID() error { + currSetID, err := s.GetCurrentSetID() + if err != nil { + return err + } + + nextSetID := currSetID + 1 + return s.setCurrentSetID(nextSetID) +} + +// setSetIDChangeAtBlock sets a set ID change at a certain block +func (s *GrandpaState) setSetIDChangeAtBlock(setID uint64, number *big.Int) error { + return s.db.Put(setIDChangeKey(setID), number.Bytes()) +} + +// GetSetIDChange returs the block number where the set ID was updated +func (s *GrandpaState) GetSetIDChange(setID uint64) (*big.Int, error) { + num, err := s.db.Get(setIDChangeKey(setID)) + if err != nil { + return nil, err + } + + return big.NewInt(0).SetBytes(num), nil +} + +// GetSetIDByBlockNumber returns the set ID for a given block number +func (s *GrandpaState) GetSetIDByBlockNumber(num *big.Int) (uint64, error) { + curr, err := s.GetCurrentSetID() + if err != nil { + return 0, err + } + + for { + changeUpper, err := s.GetSetIDChange(curr + 1) + if err == chaindb.ErrKeyNotFound { + if curr == 0 { + return 0, nil + } + curr = curr - 1 + continue + } + if err != nil { + return 0, err + } + + changeLower, err := s.GetSetIDChange(curr) + if err != nil { + return 0, err + } + + // if the given block number is greater or equal to the block number of the set ID change, + // return the current set ID + if num.Cmp(changeUpper) < 1 && num.Cmp(changeLower) == 1 { + return curr, nil + } + + if num.Cmp(changeUpper) == 1 { + return curr + 1, nil + } + + curr = curr - 1 + + if int(curr) < 0 { + return 0, nil + } + } +} + +// SetNextPause sets the next grandpa pause at the given block number +func (s *GrandpaState) SetNextPause(number *big.Int) error { + return s.db.Put(pauseKey, number.Bytes()) +} + +// GetNextPause returns the block number of the next grandpa pause, nil if there is no upcoming pause +func (s *GrandpaState) GetNextPause() (*big.Int, error) { + num, err := s.db.Get(pauseKey) + if err == chaindb.ErrKeyNotFound { + return nil, nil + } + + if err != nil { + return nil, err + } + + return big.NewInt(0).SetBytes(num), nil +} + +// SetNextResume sets the next grandpa resume at the given block number +func (s *GrandpaState) SetNextResume(number *big.Int) error { + return s.db.Put(resumeKey, number.Bytes()) +} + +// GetNextResume returns the block number of the next grandpa resume, nil if there is no upcoming resume +func (s *GrandpaState) GetNextResume() (*big.Int, error) { + num, err := s.db.Get(resumeKey) + if err == chaindb.ErrKeyNotFound { + return nil, nil + } + if err != nil { + return nil, err + } + + return big.NewInt(0).SetBytes(num), nil +} diff --git a/dot/state/grandpa_test.go b/dot/state/grandpa_test.go new file mode 100644 index 0000000000..cc1a2ee55a --- /dev/null +++ b/dot/state/grandpa_test.go @@ -0,0 +1,124 @@ +// Copyright 2019 ChainSafe Systems (ON) Corp. +// This file is part of gossamer. +// +// The gossamer library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The gossamer library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the gossamer library. If not, see . + +package state + +import ( + "math/big" + "testing" + + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/crypto/ed25519" + "github.com/ChainSafe/gossamer/lib/keystore" + + "github.com/stretchr/testify/require" +) + +var ( + kr, _ = keystore.NewEd25519Keyring() + testAuths = []*types.GrandpaVoter{ + {Key: kr.Alice().Public().(*ed25519.PublicKey), ID: 0}, + } +) + +func TestNewGrandpaStateFromGenesis(t *testing.T) { + db := NewInMemoryDB(t) + gs, err := NewGrandpaStateFromGenesis(db, testAuths) + require.NoError(t, err) + + currSetID, err := gs.GetCurrentSetID() + require.NoError(t, err) + require.Equal(t, genesisSetID, currSetID) + + auths, err := gs.GetAuthorities(currSetID) + require.NoError(t, err) + require.Equal(t, testAuths, auths) + + num, err := gs.GetSetIDChange(0) + require.NoError(t, err) + require.Equal(t, big.NewInt(0), num) +} + +func TestGrandpaState_SetNextChange(t *testing.T) { + db := NewInMemoryDB(t) + gs, err := NewGrandpaStateFromGenesis(db, testAuths) + require.NoError(t, err) + + testAuths2 := []*types.GrandpaVoter{ + {Key: kr.Bob().Public().(*ed25519.PublicKey), ID: 0}, + } + + err = gs.SetNextChange(testAuths2, big.NewInt(1)) + require.NoError(t, err) + + auths, err := gs.GetAuthorities(genesisSetID + 1) + require.NoError(t, err) + require.Equal(t, testAuths2, auths) + + atBlock, err := gs.GetSetIDChange(genesisSetID + 1) + require.NoError(t, err) + require.Equal(t, big.NewInt(1), atBlock) +} + +func TestGrandpaState_IncrementSetID(t *testing.T) { + db := NewInMemoryDB(t) + gs, err := NewGrandpaStateFromGenesis(db, testAuths) + require.NoError(t, err) + + err = gs.IncrementSetID() + require.NoError(t, err) + + setID, err := gs.GetCurrentSetID() + require.NoError(t, err) + require.Equal(t, genesisSetID+1, setID) +} + +func TestGrandpaState_GetSetIDByBlockNumber(t *testing.T) { + db := NewInMemoryDB(t) + gs, err := NewGrandpaStateFromGenesis(db, testAuths) + require.NoError(t, err) + + testAuths2 := []*types.GrandpaVoter{ + {Key: kr.Bob().Public().(*ed25519.PublicKey), ID: 0}, + } + + err = gs.SetNextChange(testAuths2, big.NewInt(100)) + require.NoError(t, err) + + setID, err := gs.GetSetIDByBlockNumber(big.NewInt(50)) + require.NoError(t, err) + require.Equal(t, genesisSetID, setID) + + setID, err = gs.GetSetIDByBlockNumber(big.NewInt(100)) + require.NoError(t, err) + require.Equal(t, genesisSetID, setID) + + setID, err = gs.GetSetIDByBlockNumber(big.NewInt(101)) + require.NoError(t, err) + require.Equal(t, genesisSetID+1, setID) + + err = gs.IncrementSetID() + require.NoError(t, err) + + setID, err = gs.GetSetIDByBlockNumber(big.NewInt(100)) + require.NoError(t, err) + require.Equal(t, genesisSetID, setID) + + setID, err = gs.GetSetIDByBlockNumber(big.NewInt(101)) + require.NoError(t, err) + require.Equal(t, genesisSetID+1, setID) + +} diff --git a/dot/state/initialize.go b/dot/state/initialize.go new file mode 100644 index 0000000000..c1e73cedc1 --- /dev/null +++ b/dot/state/initialize.go @@ -0,0 +1,209 @@ +// Copyright 2019 ChainSafe Systems (ON) Corp. +// This file is part of gossamer. +// +// The gossamer library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The gossamer library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the gossamer library. If not, see . + +package state + +import ( + "bytes" + "fmt" + "path/filepath" + + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/blocktree" + "github.com/ChainSafe/gossamer/lib/genesis" + "github.com/ChainSafe/gossamer/lib/runtime" + rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" + "github.com/ChainSafe/gossamer/lib/runtime/wasmer" + "github.com/ChainSafe/gossamer/lib/trie" + + "github.com/ChainSafe/chaindb" +) + +// Initialise initialises the genesis state of the DB using the given storage trie. The trie should be loaded with the genesis storage state. +// This only needs to be called during genesis initialisation of the node; it is not called during normal startup. +func (s *Service) Initialise(gen *genesis.Genesis, header *types.Header, t *trie.Trie) error { + var db chaindb.Database + cfg := &chaindb.Config{} + + // check database type + if s.isMemDB { + cfg.InMemory = true + } + + // get data directory from service + basepath, err := filepath.Abs(s.dbPath) + if err != nil { + return fmt.Errorf("failed to read basepath: %s", err) + } + + cfg.DataDir = basepath + + // initialise database using data directory + db, err = chaindb.NewBadgerDB(cfg) + if err != nil { + return fmt.Errorf("failed to create database: %s", err) + } + s.db = db + + if err = db.ClearAll(); err != nil { + return fmt.Errorf("failed to clear database: %s", err) + } + + if err = t.Store(chaindb.NewTable(db, storagePrefix)); err != nil { + return fmt.Errorf("failed to write genesis trie to database: %w", err) + } + + s.Base = NewBaseState(db) + + rt, err := s.createGenesisRuntime(t, gen) + if err != nil { + return err + } + + babeCfg, err := s.loadBabeConfigurationFromRuntime(rt) + if err != nil { + return err + } + + // write initial genesis values to database + if err = s.storeInitialValues(gen.GenesisData(), header, t); err != nil { + return fmt.Errorf("failed to write genesis values to database: %s", err) + } + + // create and store blockree from genesis block + bt := blocktree.NewBlockTreeFromRoot(header, db) + err = bt.Store() + if err != nil { + return fmt.Errorf("failed to write blocktree to database: %s", err) + } + + // create block state from genesis block + blockState, err := NewBlockStateFromGenesis(db, header) + if err != nil { + return fmt.Errorf("failed to create block state from genesis: %s", err) + } + + // create storage state from genesis trie + storageState, err := NewStorageState(db, blockState, t) + if err != nil { + return fmt.Errorf("failed to create storage state from trie: %s", err) + } + + epochState, err := NewEpochStateFromGenesis(db, babeCfg) + if err != nil { + return fmt.Errorf("failed to create epoch state: %s", err) + } + + grandpaAuths, err := loadGrandpaAuthorities(t) + if err != nil { + return fmt.Errorf("failed to load grandpa authorities: %w", err) + } + + grandpaState, err := NewGrandpaStateFromGenesis(db, grandpaAuths) + if err != nil { + return fmt.Errorf("failed to create grandpa state: %s", err) + } + + // check database type + if s.isMemDB { + // append memory database to state service + s.db = db + + // append storage state and block state to state service + s.Storage = storageState + s.Block = blockState + s.Epoch = epochState + s.Grandpa = grandpaState + } else if err = db.Close(); err != nil { + return fmt.Errorf("failed to close database: %s", err) + } + + logger.Info("state", "genesis hash", blockState.genesisHash) + return nil +} + +func (s *Service) loadBabeConfigurationFromRuntime(r runtime.Instance) (*types.BabeConfiguration, error) { + // load and store initial BABE epoch configuration + babeCfg, err := r.BabeConfiguration() + if err != nil { + return nil, fmt.Errorf("failed to fetch genesis babe configuration: %w", err) + } + + r.Stop() + + if s.BabeThresholdDenominator != 0 { + babeCfg.C1 = s.BabeThresholdNumerator + babeCfg.C2 = s.BabeThresholdDenominator + } + + return babeCfg, nil +} + +func loadGrandpaAuthorities(t *trie.Trie) ([]*types.GrandpaVoter, error) { + authsRaw := t.Get(runtime.GrandpaAuthoritiesKey) + if authsRaw == nil { + return []*types.GrandpaVoter{}, nil + } + + r := &bytes.Buffer{} + _, _ = r.Write(authsRaw[1:]) + return types.DecodeGrandpaVoters(r) +} + +// storeInitialValues writes initial genesis values to the state database +func (s *Service) storeInitialValues(data *genesis.Data, header *types.Header, t *trie.Trie) error { + // write genesis trie to database + if err := t.Store(chaindb.NewTable(s.db, storagePrefix)); err != nil { + return fmt.Errorf("failed to write trie to database: %s", err) + } + + // write storage hash to database + if err := s.Base.StoreLatestStorageHash(t.MustHash()); err != nil { + return fmt.Errorf("failed to write storage hash to database: %s", err) + } + + // write best block hash to state database + if err := s.Base.StoreBestBlockHash(header.Hash()); err != nil { + return fmt.Errorf("failed to write best block hash to database: %s", err) + } + + // write genesis data to state database + if err := s.Base.StoreGenesisData(data); err != nil { + return fmt.Errorf("failed to write genesis data to database: %s", err) + } + + return nil +} + +func (s *Service) createGenesisRuntime(t *trie.Trie, gen *genesis.Genesis) (runtime.Instance, error) { + // load genesis state into database + genTrie, err := rtstorage.NewTrieState(t) + if err != nil { + return nil, fmt.Errorf("failed to instantiate TrieState: %w", err) + } + + // create genesis runtime + rtCfg := &wasmer.Config{} + rtCfg.Storage = genTrie + rtCfg.LogLvl = s.logLvl + + r, err := wasmer.NewRuntimeFromGenesis(gen, rtCfg) + if err != nil { + return nil, fmt.Errorf("failed to create genesis runtime: %w", err) + } + + return r, nil +} diff --git a/dot/state/service.go b/dot/state/service.go index 6fb9ebdc2c..1c378c879e 100644 --- a/dot/state/service.go +++ b/dot/state/service.go @@ -25,9 +25,6 @@ import ( "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/blocktree" - "github.com/ChainSafe/gossamer/lib/genesis" - rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" - "github.com/ChainSafe/gossamer/lib/runtime/wasmer" "github.com/ChainSafe/gossamer/lib/trie" "github.com/ChainSafe/chaindb" @@ -42,10 +39,12 @@ type Service struct { logLvl log.Lvl db chaindb.Database isMemDB bool // set to true if using an in-memory database; only used for testing. + Base *BaseState Storage *StorageState Block *BlockState Transaction *TransactionState Epoch *EpochState + Grandpa *GrandpaState closeCh chan interface{} // Below are for testing only. @@ -71,7 +70,7 @@ func NewService(path string, lvl log.Lvl) *Service { } // UseMemDB tells the service to use an in-memory key-value store instead of a persistent database. -// This should be called after NewService, and before Initialize. +// This should be called after NewService, and before Initialise. // This should only be used for testing. func (s *Service) UseMemDB() { s.isMemDB = true @@ -82,151 +81,9 @@ func (s *Service) DB() chaindb.Database { return s.db } -// Initialize initializes the genesis state of the DB using the given storage trie. The trie should be loaded with the genesis storage state. -// This only needs to be called during genesis initialization of the node; it doesn't need to be called during normal startup. -func (s *Service) Initialize(gen *genesis.Genesis, header *types.Header, t *trie.Trie) error { - var db chaindb.Database - cfg := &chaindb.Config{} - - // check database type - if s.isMemDB { - cfg.InMemory = true - } - - // get data directory from service - basepath, err := filepath.Abs(s.dbPath) - if err != nil { - return fmt.Errorf("failed to read basepath: %s", err) - } - - cfg.DataDir = basepath - - // initialize database using data directory - db, err = chaindb.NewBadgerDB(cfg) - if err != nil { - return fmt.Errorf("failed to create database: %s", err) - } - - if err = db.ClearAll(); err != nil { - return fmt.Errorf("failed to clear database: %s", err) - } - - if err = t.Store(chaindb.NewTable(db, storagePrefix)); err != nil { - return fmt.Errorf("failed to write genesis trie to database: %w", err) - } - - babeCfg, err := s.loadBabeConfigurationFromRuntime(t, gen) - if err != nil { - return err - } - - // write initial genesis values to database - if err = s.storeInitialValues(db, gen.GenesisData(), header, t); err != nil { - return fmt.Errorf("failed to write genesis values to database: %s", err) - } - - // create and store blockree from genesis block - bt := blocktree.NewBlockTreeFromRoot(header, db) - err = bt.Store() - if err != nil { - return fmt.Errorf("failed to write blocktree to database: %s", err) - } - - // create block state from genesis block - blockState, err := NewBlockStateFromGenesis(db, header) - if err != nil { - return fmt.Errorf("failed to create block state from genesis: %s", err) - } - - // create storage state from genesis trie - storageState, err := NewStorageState(db, blockState, t) - if err != nil { - return fmt.Errorf("failed to create storage state from trie: %s", err) - } - - epochState, err := NewEpochStateFromGenesis(db, babeCfg) - if err != nil { - return fmt.Errorf("failed to create epoch state: %s", err) - } - - // check database type - if s.isMemDB { - // append memory database to state service - s.db = db - - // append storage state and block state to state service - s.Storage = storageState - s.Block = blockState - s.Epoch = epochState - } else if err = db.Close(); err != nil { - return fmt.Errorf("failed to close database: %s", err) - } - - logger.Info("state", "genesis hash", blockState.genesisHash) - return nil -} - -func (s *Service) loadBabeConfigurationFromRuntime(t *trie.Trie, gen *genesis.Genesis) (*types.BabeConfiguration, error) { - // load genesis state into database - genTrie, err := rtstorage.NewTrieState(t) - if err != nil { - return nil, fmt.Errorf("failed to instantiate TrieState: %w", err) - } - - // create genesis runtime - rtCfg := &wasmer.Config{} - rtCfg.Storage = genTrie - rtCfg.LogLvl = s.logLvl - - r, err := wasmer.NewRuntimeFromGenesis(gen, rtCfg) - if err != nil { - return nil, fmt.Errorf("failed to create genesis runtime: %w", err) - } - - // load and store initial BABE epoch configuration - babeCfg, err := r.BabeConfiguration() - if err != nil { - return nil, fmt.Errorf("failed to fetch genesis babe configuration: %w", err) - } - - r.Stop() - - if s.BabeThresholdDenominator != 0 { - babeCfg.C1 = s.BabeThresholdNumerator - babeCfg.C2 = s.BabeThresholdDenominator - } - - return babeCfg, nil -} - -// storeInitialValues writes initial genesis values to the state database -func (s *Service) storeInitialValues(db chaindb.Database, data *genesis.Data, header *types.Header, t *trie.Trie) error { - // write genesis trie to database - if err := StoreTrie(chaindb.NewTable(db, storagePrefix), t); err != nil { - return fmt.Errorf("failed to write trie to database: %s", err) - } - - // write storage hash to database - if err := StoreLatestStorageHash(db, t.MustHash()); err != nil { - return fmt.Errorf("failed to write storage hash to database: %s", err) - } - - // write best block hash to state database - if err := StoreBestBlockHash(db, header.Hash()); err != nil { - return fmt.Errorf("failed to write best block hash to database: %s", err) - } - - // write genesis data to state database - if err := StoreGenesisData(db, data); err != nil { - return fmt.Errorf("failed to write genesis data to database: %s", err) - } - - return nil -} - -// Start initializes the Storage database and the Block database. +// Start initialises the Storage database and the Block database. func (s *Service) Start() error { - if !s.isMemDB && (s.Storage != nil || s.Block != nil || s.Epoch != nil) { + if !s.isMemDB && (s.Storage != nil || s.Block != nil || s.Epoch != nil || s.Grandpa != nil) { return nil } @@ -241,17 +98,18 @@ func (s *Service) Start() error { DataDir: basepath, } - // initialize database + // initialise database db, err = chaindb.NewBadgerDB(cfg) if err != nil { return err } s.db = db + s.Base = NewBaseState(db) } // retrieve latest header - bestHash, err := LoadBestBlockHash(db) + bestHash, err := s.Base.LoadBestBlockHash() if err != nil { return fmt.Errorf("failed to get best block hash: %w", err) } @@ -271,17 +129,17 @@ func (s *Service) Start() error { } // if blocktree head isn't "best hash", then the node shutdown abnormally. - // restore state from last finalized hash. + // restore state from last finalised hash. btHead := bt.DeepestBlockHash() if !bytes.Equal(btHead[:], bestHash[:]) { - logger.Info("detected abnormal node shutdown, restoring from last finalized block") + logger.Info("detected abnormal node shutdown, restoring from last finalised block") - lastFinalized, err := s.Block.GetFinalizedHeader(0, 0) //nolint + lastFinalised, err := s.Block.GetFinalizedHeader(0, 0) //nolint if err != nil { - return fmt.Errorf("failed to get latest finalized block: %w", err) + return fmt.Errorf("failed to get latest finalised block: %w", err) } - s.Block.bt = blocktree.NewBlockTreeFromRoot(lastFinalized, db) + s.Block.bt = blocktree.NewBlockTreeFromRoot(lastFinalised, db) } // create storage state @@ -290,7 +148,7 @@ func (s *Service) Start() error { return fmt.Errorf("failed to create storage state: %w", err) } - stateRoot, err := LoadLatestStorageHash(s.db) + stateRoot, err := s.Base.LoadLatestStorageHash() if err != nil { return fmt.Errorf("cannot load latest storage root: %w", err) } @@ -312,6 +170,11 @@ func (s *Service) Start() error { return fmt.Errorf("failed to create epoch state: %w", err) } + s.Grandpa, err = NewGrandpaState(db) + if err != nil { + return fmt.Errorf("failed to create grandpa state: %w", err) + } + num, _ := s.Block.BestBlockNumber() logger.Info("created state service", "head", s.Block.BestBlockHash(), "highest number", num) // Start background goroutine to GC pruned keys. @@ -350,7 +213,36 @@ func (s *Service) Rewind(toBlock int64) error { return err } - return StoreBestBlockHash(s.db, newHead) + err = s.Block.SetFinalizedHash(header.Hash(), 0, 0) + if err != nil { + return err + } + + // update the current grandpa set ID + prevSetID, err := s.Grandpa.GetCurrentSetID() + if err != nil { + return err + } + + newSetID, err := s.Grandpa.GetSetIDByBlockNumber(header.Number) + if err != nil { + return err + } + + err = s.Grandpa.setCurrentSetID(newSetID) + if err != nil { + return err + } + + // remove previously set grandpa changes, need to go up to prevSetID+1 in case of a scheduled change + for i := newSetID + 1; i <= prevSetID+1; i++ { + err = s.Grandpa.db.Del(setIDChangeKey(i)) + if err != nil { + return err + } + } + + return s.Base.StoreBestBlockHash(newHead) } // Stop closes each state database @@ -368,13 +260,13 @@ func (s *Service) Stop() error { return errTrieDoesNotExist(head) } - if err = StoreLatestStorageHash(s.db, head); err != nil { + if err = s.Base.StoreLatestStorageHash(head); err != nil { return err } logger.Debug("storing latest storage trie", "root", head) - if err = StoreTrie(s.Storage.db, t); err != nil { + if err = t.Store(s.Storage.db); err != nil { return err } @@ -383,7 +275,7 @@ func (s *Service) Stop() error { } hash := s.Block.BestBlockHash() - if err = StoreBestBlockHash(s.db, hash); err != nil { + if err = s.Base.StoreBestBlockHash(hash); err != nil { return err } @@ -411,14 +303,13 @@ func (s *Service) Import(header *types.Header, t *trie.Trie, firstSlot uint64) e if s.isMemDB { cfg.InMemory = true - } else { - var err error + } - // initialize database using data directory - s.db, err = chaindb.NewBadgerDB(cfg) - if err != nil { - return fmt.Errorf("failed to create database: %s", err) - } + var err error + // initialise database using data directory + s.db, err = chaindb.NewBadgerDB(cfg) + if err != nil { + return fmt.Errorf("failed to create database: %s", err) } block := &BlockState{ @@ -434,8 +325,9 @@ func (s *Service) Import(header *types.Header, t *trie.Trie, firstSlot uint64) e return err } - logger.Info("storing first slot...", "slot", firstSlot) - if err = storeFirstSlot(s.db, firstSlot); err != nil { + s.Base = NewBaseState(s.db) + + if err = s.Base.storeFirstSlot(firstSlot); err != nil { return err } @@ -447,7 +339,7 @@ func (s *Service) Import(header *types.Header, t *trie.Trie, firstSlot uint64) e skipTo := blockEpoch + 1 - if err := storeSkipToEpoch(s.db, skipTo); err != nil { + if err := s.Base.storeSkipToEpoch(skipTo); err != nil { return err } logger.Debug("skip BABE verification up to epoch", "epoch", skipTo) @@ -461,13 +353,13 @@ func (s *Service) Import(header *types.Header, t *trie.Trie, firstSlot uint64) e return fmt.Errorf("trie state root does not equal header state root") } - if err := StoreLatestStorageHash(s.db, root); err != nil { + if err := s.Base.StoreLatestStorageHash(root); err != nil { return err } logger.Info("importing storage trie...", "basepath", s.dbPath, "root", root) - if err := StoreTrie(storage.db, t); err != nil { + if err := t.Store(storage.db); err != nil { return err } @@ -476,7 +368,7 @@ func (s *Service) Import(header *types.Header, t *trie.Trie, firstSlot uint64) e return err } - if err := StoreBestBlockHash(s.db, header.Hash()); err != nil { + if err := s.Base.StoreBestBlockHash(header.Hash()); err != nil { return err } diff --git a/dot/state/service_test.go b/dot/state/service_test.go index f603c5a065..c16df057da 100644 --- a/dot/state/service_test.go +++ b/dot/state/service_test.go @@ -28,6 +28,7 @@ import ( "github.com/ChainSafe/gossamer/lib/trie" "github.com/ChainSafe/gossamer/lib/utils" + "github.com/ChainSafe/chaindb" log "github.com/ChainSafe/log15" "github.com/stretchr/testify/require" ) @@ -63,7 +64,7 @@ func TestService_Start(t *testing.T) { defer utils.RemoveTestDir(t) genData, genTrie, genesisHeader := newTestGenesisWithTrieAndHeader(t) - err := state.Initialize(genData, genesisHeader, genTrie) + err := state.Initialise(genData, genesisHeader, genTrie) require.NoError(t, err) err = state.Start() @@ -73,18 +74,18 @@ func TestService_Start(t *testing.T) { require.NoError(t, err) } -func TestService_Initialize(t *testing.T) { +func TestService_Initialise(t *testing.T) { state := newTestService(t) defer utils.RemoveTestDir(t) genData, genTrie, genesisHeader := newTestGenesisWithTrieAndHeader(t) - err := state.Initialize(genData, genesisHeader, genTrie) + err := state.Initialise(genData, genesisHeader, genTrie) require.NoError(t, err) genesisHeader, err = types.NewHeader(common.NewHash([]byte{77}), genTrie.MustHash(), trie.EmptyHash, big.NewInt(0), types.Digest{}) require.NoError(t, err) - err = state.Initialize(genData, genesisHeader, genTrie) + err = state.Initialise(genData, genesisHeader, genTrie) require.NoError(t, err) err = state.Start() @@ -99,7 +100,7 @@ func TestMemDB_Start(t *testing.T) { state := newTestMemDBService() genData, genTrie, genesisHeader := newTestGenesisWithTrieAndHeader(t) - err := state.Initialize(genData, genesisHeader, genTrie) + err := state.Initialise(genData, genesisHeader, genTrie) require.NoError(t, err) err = state.Start() @@ -118,7 +119,7 @@ func TestService_BlockTree(t *testing.T) { stateA := NewService(testDir, log.LvlTrace) genData, genTrie, genesisHeader := newTestGenesisWithTrieAndHeader(t) - err := stateA.Initialize(genData, genesisHeader, genTrie) + err := stateA.Initialise(genData, genesisHeader, genTrie) require.NoError(t, err) err = stateA.Start() @@ -148,7 +149,7 @@ func TestService_PruneStorage(t *testing.T) { serv.UseMemDB() genData, genTrie, genesisHeader := newTestGenesisWithTrieAndHeader(t) - err := serv.Initialize(genData, genesisHeader, genTrie) + err := serv.Initialise(genData, genesisHeader, genTrie) require.NoError(t, err) err = serv.Start() @@ -171,13 +172,13 @@ func TestService_PruneStorage(t *testing.T) { err = serv.Storage.StoreTrie(trieState) require.NoError(t, err) - // Only finalize a block at height 3 + // Only finalise a block at height 3 if i == 2 { toFinalize = block.Header.Hash() } } - // add some blocks to prune, on a different chain from the finalized block + // add some blocks to prune, on a different chain from the finalised block prunedArr := []prunedBlock{} parentHash := serv.Block.GenesisHash() for i := 0; i < 3; i++ { @@ -202,7 +203,7 @@ func TestService_PruneStorage(t *testing.T) { parentHash = block.Header.Hash() } - // finalize a block + // finalise a block serv.Block.SetFinalizedHash(toFinalize, 0, 0) time.Sleep(1 * time.Second) @@ -223,12 +224,24 @@ func TestService_Rewind(t *testing.T) { serv.UseMemDB() genData, genTrie, genesisHeader := newTestGenesisWithTrieAndHeader(t) - err := serv.Initialize(genData, genesisHeader, genTrie) + err := serv.Initialise(genData, genesisHeader, genTrie) require.NoError(t, err) err = serv.Start() require.NoError(t, err) + err = serv.Grandpa.setCurrentSetID(3) + require.NoError(t, err) + + err = serv.Grandpa.setSetIDChangeAtBlock(1, big.NewInt(5)) + require.NoError(t, err) + + err = serv.Grandpa.setSetIDChangeAtBlock(2, big.NewInt(8)) + require.NoError(t, err) + + err = serv.Grandpa.setSetIDChangeAtBlock(3, big.NewInt(10)) + require.NoError(t, err) + AddBlocksToState(t, serv.Block, 12) err = serv.Rewind(6) require.NoError(t, err) @@ -236,6 +249,19 @@ func TestService_Rewind(t *testing.T) { num, err := serv.Block.BestBlockNumber() require.NoError(t, err) require.Equal(t, big.NewInt(6), num) + + setID, err := serv.Grandpa.GetCurrentSetID() + require.NoError(t, err) + require.Equal(t, uint64(1), setID) + + _, err = serv.Grandpa.GetSetIDChange(1) + require.NoError(t, err) + + _, err = serv.Grandpa.GetSetIDChange(2) + require.Equal(t, chaindb.ErrKeyNotFound, err) + + _, err = serv.Grandpa.GetSetIDChange(3) + require.Equal(t, chaindb.ErrKeyNotFound, err) } func TestService_Import(t *testing.T) { @@ -246,7 +272,9 @@ func TestService_Import(t *testing.T) { serv.UseMemDB() genData, genTrie, genesisHeader := newTestGenesisWithTrieAndHeader(t) - err := serv.Initialize(genData, genesisHeader, genTrie) + err := serv.Initialise(genData, genesisHeader, genTrie) + require.NoError(t, err) + err = serv.db.Close() require.NoError(t, err) tr := trie.NewEmptyTrie() diff --git a/dot/state/storage.go b/dot/state/storage.go index 73bea66666..1aefb4f8ac 100644 --- a/dot/state/storage.go +++ b/dot/state/storage.go @@ -25,7 +25,6 @@ import ( "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/genesis" rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" "github.com/ChainSafe/gossamer/lib/trie" ) @@ -45,13 +44,12 @@ type StorageState struct { blockState *BlockState tries map[common.Hash]*trie.Trie // map of root -> trie - baseDB chaindb.Database - db chaindb.Database - lock sync.RWMutex + db chaindb.Database + lock sync.RWMutex // change notifiers - changedLock sync.RWMutex - subscriptions map[byte]*StorageSubscription + changedLock sync.RWMutex + observerList []Observer syncing bool } @@ -70,11 +68,10 @@ func NewStorageState(db chaindb.Database, blockState *BlockState, t *trie.Trie) tries[t.MustHash()] = t return &StorageState{ - blockState: blockState, - tries: tries, - baseDB: db, - db: chaindb.NewTable(db, storagePrefix), - subscriptions: make(map[byte]*StorageSubscription), + blockState: blockState, + tries: tries, + db: chaindb.NewTable(db, storagePrefix), + observerList: []Observer{}, }, nil } @@ -116,11 +113,7 @@ func (s *StorageState) StoreTrie(ts *rtstorage.TrieState) error { return err } - go func() { - if err := s.notifyStorageSubscriptions(root); err != nil { - logger.Warn("failed to notify storage subscriptions", "error", err) - } - }() + go s.notifyAll(root) return nil } @@ -163,59 +156,10 @@ func (s *StorageState) TrieState(root *common.Hash) (*rtstorage.TrieState, error return curr, nil } -// StoreInDB encodes the entire trie and writes it to the DB -// The key to the DB entry is the root hash of the trie -func (s *StorageState) notifyStorageSubscriptions(root common.Hash) error { - s.lock.RLock() - t := s.tries[root] - s.lock.RUnlock() - - if t == nil { - return errTrieDoesNotExist(root) - } - - // notify subscribers of database changes - s.changedLock.Lock() - defer s.changedLock.Unlock() - - for _, sub := range s.subscriptions { - subRes := &SubscriptionResult{ - Hash: root, - } - if len(sub.Filter) == 0 { - // no filter, so send all changes - ent := t.Entries() - for k, v := range ent { - if k != ":code" { - // todo, currently we're ignoring :code since this is a lot of data - kv := &KeyValue{ - Key: common.MustHexToBytes(fmt.Sprintf("0x%x", k)), - Value: v, - } - subRes.Changes = append(subRes.Changes, *kv) - } - } - } else { - // filter result to include only interested keys - for k := range sub.Filter { - value := t.Get(common.MustHexToBytes(k)) - kv := &KeyValue{ - Key: common.MustHexToBytes(k), - Value: value, - } - subRes.Changes = append(subRes.Changes, *kv) - } - } - s.notifyChanged(subRes) - } - - return nil -} - // LoadFromDB loads an encoded trie from the DB where the key is `root` func (s *StorageState) LoadFromDB(root common.Hash) (*trie.Trie, error) { t := trie.NewEmptyTrie() - err := LoadTrie(s.db, t, root) + err := t.Load(s.db, root) if err != nil { return nil, err } @@ -438,8 +382,3 @@ func (s *StorageState) pruneStorage(closeCh chan interface{}) { } } } - -// GetGenesisData retrieves current genesis data from database -func (s *StorageState) GetGenesisData() (*genesis.Data, error) { - return LoadGenesisData(s.baseDB) -} diff --git a/dot/state/storage_notify.go b/dot/state/storage_notify.go index 6e9fbac8e7..0d632c09ec 100644 --- a/dot/state/storage_notify.go +++ b/dot/state/storage_notify.go @@ -16,7 +16,8 @@ package state import ( - "errors" + "fmt" + "reflect" "github.com/ChainSafe/gossamer/lib/common" ) @@ -33,55 +34,107 @@ type SubscriptionResult struct { Changes []KeyValue } -//StorageSubscription holds data for Subscription to Storage -type StorageSubscription struct { - Filter map[string]bool - Listener chan<- *SubscriptionResult +// Observer interface defines functions needed for observers, Observer Design Pattern +type Observer interface { + Update(result *SubscriptionResult) + GetID() uint + GetFilter() map[string][]byte } -// RegisterStorageChangeChannel function to register storage change channels -func (s *StorageState) RegisterStorageChangeChannel(sub StorageSubscription) (byte, error) { - s.changedLock.RLock() +// RegisterStorageObserver to add abserver to notification list +func (s *StorageState) RegisterStorageObserver(o Observer) { + s.observerList = append(s.observerList, o) - if len(s.subscriptions) == 256 { - return 0, errors.New("storage subscriptions limit reached") + // notifyObserver here to send storage value of current state + sr, err := s.blockState.BestBlockStateRoot() + if err != nil { + logger.Debug("error registering storage change channel", "error", err) + return } - - var id byte - for { - id = generateID() - if s.subscriptions[id] == nil { - break + go func() { + if err := s.notifyObserver(sr, o); err != nil { + logger.Warn("failed to notify storage subscriptions", "error", err) } - } + }() - s.changedLock.RUnlock() - - s.changedLock.Lock() - s.subscriptions[id] = &sub - s.changedLock.Unlock() - return id, nil } -// UnregisterStorageChangeChannel removes the storage change notification channel with the given ID. -// A channel must be unregistered before closing it. -func (s *StorageState) UnregisterStorageChangeChannel(id byte) { - s.changedLock.Lock() - defer s.changedLock.Unlock() +// UnregisterStorageObserver removes observer from notification list +func (s *StorageState) UnregisterStorageObserver(o Observer) { + s.observerList = s.removeFromSlice(s.observerList, o) +} - delete(s.subscriptions, id) +func (s *StorageState) notifyAll(root common.Hash) { + s.changedLock.RLock() + defer s.changedLock.RUnlock() + for _, observer := range s.observerList { + err := s.notifyObserver(root, observer) + if err != nil { + logger.Warn("failed to notify storage subscriptions", "error", err) + } + } } -func (s *StorageState) notifyChanged(change *SubscriptionResult) { - if len(s.subscriptions) == 0 { - return +func (s *StorageState) notifyObserver(root common.Hash, o Observer) error { + t, err := s.TrieState(&root) + if err != nil { + return err } - logger.Trace("notifying changed storage chans...", "chans", s.subscriptions) + if t == nil { + return errTrieDoesNotExist(root) + } - for _, ch := range s.subscriptions { - go func(ch chan<- *SubscriptionResult) { - ch <- change - }(ch.Listener) + subRes := &SubscriptionResult{ + Hash: root, + } + if len(o.GetFilter()) == 0 { + // no filter, so send all changes + ent := t.TrieEntries() + for k, v := range ent { + if k != ":code" { + // todo, currently we're ignoring :code since this is a lot of data + kv := &KeyValue{ + Key: common.MustHexToBytes(fmt.Sprintf("0x%x", k)), + Value: v, + } + subRes.Changes = append(subRes.Changes, *kv) + } + } + } else { + // filter result to include only interested keys + for k, cachedValue := range o.GetFilter() { + value := t.Get(common.MustHexToBytes(k)) + if !reflect.DeepEqual(cachedValue, value) { + kv := &KeyValue{ + Key: common.MustHexToBytes(k), + Value: value, + } + subRes.Changes = append(subRes.Changes, *kv) + o.GetFilter()[k] = value + } + } + } + + if len(subRes.Changes) > 0 { + logger.Trace("update observer", "changes", subRes.Changes) + go func() { + o.Update(subRes) + }() + } + + return nil +} + +func (s *StorageState) removeFromSlice(observerList []Observer, observerToRemove Observer) []Observer { + s.changedLock.Lock() + defer s.changedLock.Unlock() + observerListLength := len(observerList) + for i, observer := range observerList { + if observerToRemove.GetID() == observer.GetID() { + observerList[i] = observerList[observerListLength-1] + return observerList[:observerListLength-1] + } } + return observerList } diff --git a/dot/state/storage_notify_test.go b/dot/state/storage_notify_test.go index 4ed90dc073..1a832d1e5b 100644 --- a/dot/state/storage_notify_test.go +++ b/dot/state/storage_notify_test.go @@ -30,51 +30,69 @@ import ( "github.com/stretchr/testify/require" ) -func TestStorageState_RegisterStorageChangeChannel(t *testing.T) { +type MockStorageObserver struct { + id uint + filter map[string][]byte + lastUpdate *SubscriptionResult + m sync.RWMutex +} + +func (m *MockStorageObserver) Update(change *SubscriptionResult) { + m.m.Lock() + m.lastUpdate = change + m.m.Unlock() + +} +func (m *MockStorageObserver) GetID() uint { + return m.id +} +func (m *MockStorageObserver) GetFilter() map[string][]byte { + return m.filter +} + +func TestStorageState_RegisterStorageObserver(t *testing.T) { ss := newTestStorageState(t) ts, err := ss.TrieState(nil) require.NoError(t, err) - ch := make(chan *SubscriptionResult) - sub := StorageSubscription{ - Filter: make(map[string]bool), - Listener: ch, - } - id, err := ss.RegisterStorageChangeChannel(sub) - require.NoError(t, err) + observer := &MockStorageObserver{} + ss.RegisterStorageObserver(observer) - defer ss.UnregisterStorageChangeChannel(id) + defer ss.UnregisterStorageObserver(observer) ts.Set([]byte("mackcom"), []byte("wuz here")) err = ss.StoreTrie(ts) require.NoError(t, err) - for i := 0; i < 1; i++ { - select { - case <-ch: - case <-time.After(testMessageTimeout): - t.Fatal("did not receive storage change message") - } + expectedResult := &SubscriptionResult{ + Hash: ts.MustRoot(), + Changes: []KeyValue{{ + Key: []byte("mackcom"), + Value: []byte("wuz here"), + }}, } + time.Sleep(time.Millisecond) + observer.m.RLock() + defer observer.m.RUnlock() + require.Equal(t, expectedResult, observer.lastUpdate) } -func TestStorageState_RegisterStorageChangeChannel_Multi(t *testing.T) { - //t.Skip() +func TestStorageState_RegisterStorageObserver_Multi(t *testing.T) { ss := newTestStorageState(t) ts, err := ss.TrieState(nil) require.NoError(t, err) num := 5 - chs := make([]chan *SubscriptionResult, num) - ids := make([]byte, num) + + var observers []*MockStorageObserver for i := 0; i < num; i++ { - chs[i] = make(chan *SubscriptionResult) - sub := StorageSubscription{ - Listener: chs[i], + observer := &MockStorageObserver{ + id: uint(i), } - ids[i], err = ss.RegisterStorageChangeChannel(sub) + observers = append(observers, observer) + ss.RegisterStorageObserver(observer) require.NoError(t, err) } @@ -86,33 +104,22 @@ func TestStorageState_RegisterStorageChangeChannel_Multi(t *testing.T) { err = ss.StoreTrie(ts) require.NoError(t, err) - var wg sync.WaitGroup - wg.Add(num) - - for i, ch := range chs { - - go func(i int, ch chan *SubscriptionResult) { - select { - case c := <-ch: - require.NotNil(t, c.Hash) - require.Equal(t, key1, c.Changes[0].Key) - require.Equal(t, value1, c.Changes[0].Value) - case <-time.After(testMessageTimeout): - t.Error("did not receive storage change: ch=", i) - } - wg.Done() - }(i, ch) + time.Sleep(time.Millisecond * 10) + for _, observer := range observers { + observer.m.RLock() + require.NotNil(t, observer.lastUpdate.Hash) + require.Equal(t, key1, observer.lastUpdate.Changes[0].Key) + require.Equal(t, value1, observer.lastUpdate.Changes[0].Value) + observer.m.RUnlock() } - wg.Wait() - - for _, id := range ids { - ss.UnregisterStorageChangeChannel(id) + for _, observer := range observers { + ss.UnregisterStorageObserver(observer) } } -func TestStorageState_RegisterStorageChangeChannel_Multi_Filter(t *testing.T) { +func TestStorageState_RegisterStorageObserver_Multi_Filter(t *testing.T) { ss := newTestStorageState(t) ts, err := ss.TrieState(nil) require.NoError(t, err) @@ -121,51 +128,35 @@ func TestStorageState_RegisterStorageChangeChannel_Multi_Filter(t *testing.T) { value1 := []byte("value1") num := 5 - chs := make([]chan *SubscriptionResult, num) - ids := make([]byte, num) - subFilter := make(map[string]bool) - subFilter[common.BytesToHex(key1)] = true + var observers []*MockStorageObserver for i := 0; i < num; i++ { - chs[i] = make(chan *SubscriptionResult) - sub := StorageSubscription{ - Filter: subFilter, - Listener: chs[i], + observer := &MockStorageObserver{ + id: uint(i), + filter: map[string][]byte{ + common.BytesToHex(key1): {}, + }, } - ids[i], err = ss.RegisterStorageChangeChannel(sub) - require.NoError(t, err) + observers = append(observers, observer) + ss.RegisterStorageObserver(observer) } ts.Set(key1, value1) - err = ss.StoreTrie(ts) require.NoError(t, err) - time.Sleep(time.Millisecond * 500) - - var wg sync.WaitGroup - wg.Add(num) - - for i, ch := range chs { - - go func(i int, ch chan *SubscriptionResult) { - select { - case c := <-ch: - require.NotNil(t, c.Hash) - require.Equal(t, key1, c.Changes[0].Key) - require.Equal(t, value1, c.Changes[0].Value) - wg.Done() - case <-time.After(testMessageTimeout): - t.Error("did not receive storage change: ch=", i) - } - }(i, ch) + time.Sleep(time.Millisecond * 10) + for _, observer := range observers { + observer.m.RLock() + require.NotNil(t, observer.lastUpdate.Hash) + require.Equal(t, key1, observer.lastUpdate.Changes[0].Key) + require.Equal(t, value1, observer.lastUpdate.Changes[0].Value) + observer.m.RUnlock() } - wg.Wait() - - for _, id := range ids { - ss.UnregisterStorageChangeChannel(id) + for _, observer := range observers { + ss.UnregisterStorageObserver(observer) } } diff --git a/dot/sync/interface.go b/dot/sync/interface.go index 1a457091e2..4f284d19e6 100644 --- a/dot/sync/interface.go +++ b/dot/sync/interface.go @@ -69,8 +69,6 @@ type BlockProducer interface { // DigestHandler is the interface for the consensus digest handler type DigestHandler interface { - Start() - Stop() HandleConsensusDigest(*types.ConsensusDigest, *types.Header) error } @@ -78,3 +76,8 @@ type DigestHandler interface { type Verifier interface { VerifyBlock(header *types.Header) error } + +// FinalityGadget implements justification verification functionality +type FinalityGadget interface { + VerifyBlockJustification([]byte) error +} diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 2348aeeaeb..88497b49a4 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -44,8 +44,9 @@ type Service struct { storageState StorageState transactionState TransactionState blockProducer BlockProducer + finalityGadget FinalityGadget - // Synchronization variables + // Synchronisation variables synced bool highestSeenBlock *big.Int // highest block number we have seen runtime runtime.Instance @@ -63,6 +64,7 @@ type Config struct { BlockState BlockState StorageState StorageState BlockProducer BlockProducer + FinalityGadget FinalityGadget TransactionState TransactionState Runtime runtime.Instance Verifier Verifier @@ -105,6 +107,7 @@ func NewService(cfg *Config) (*Service, error) { blockState: cfg.BlockState, storageState: cfg.StorageState, blockProducer: cfg.BlockProducer, + finalityGadget: cfg.FinalityGadget, synced: true, highestSeenBlock: big.NewInt(0), transactionState: cfg.TransactionState, @@ -133,19 +136,41 @@ func (s *Service) HandleBlockAnnounce(msg *network.BlockAnnounceMessage) error { } // save block header if we don't have it already - if !has { - err = s.blockState.SetHeader(header) + if has { + return nil + } + + err = s.blockState.SetHeader(header) + if err != nil { + return err + } + logger.Debug( + "saved block header to block state", + "number", header.Number, + "hash", header.Hash(), + ) + return nil +} + +// ProcessJustification processes block data containing justifications +func (s *Service) ProcessJustification(data []*types.BlockData) (int, error) { + if len(data) == 0 { + return 0, ErrNilBlockData + } + + for i, bd := range data { + header, err := s.blockState.GetHeader(bd.Hash) if err != nil { - return err + return i, err + } + + if bd.Justification != nil && bd.Justification.Exists() { + logger.Debug("handling Justification...", "number", header.Number, "hash", bd.Hash) + s.handleJustification(header, bd.Justification.Value()) } - logger.Debug( - "saved block header to block state", - "number", header.Number, - "hash", header.Hash(), - ) } - return nil + return 0, nil } // ProcessBlockData processes the BlockData from a BlockResponse and returns the index of the last BlockData it handled on success, @@ -177,8 +202,14 @@ func (s *Service) ProcessBlockData(data []*types.BlockData) (int, error) { } err = s.blockState.AddBlockToBlockTree(header) - if err != nil { - logger.Debug("failed to add block to blocktree", "hash", bd.Hash, "error", err) + if err != nil && !errors.Is(err, blocktree.ErrBlockExists) { + logger.Warn("failed to add block to blocktree", "hash", bd.Hash, "error", err) + return i, err + } + + // handle consensus digests for authority changes + if s.digestHandler != nil { + s.handleDigests(header) } if bd.Justification != nil && bd.Justification.Exists() { @@ -319,7 +350,7 @@ func (s *Service) handleBlock(block *types.Block) error { if err != nil { return err } - logger.Trace("stored resulting state", "state root", ts.MustRoot()) + logger.Trace("executed block and stored resulting state", "state root", ts.MustRoot()) // TODO: batch writes in AddBlock err = s.blockState.AddBlock(block) @@ -349,9 +380,15 @@ func (s *Service) handleJustification(header *types.Header, justification []byte return } - err := s.blockState.SetFinalizedHash(header.Hash(), 0, 0) + err := s.finalityGadget.VerifyBlockJustification(justification) + if err != nil { + logger.Warn("failed to verify block justification", "hash", header.Hash(), "number", header.Number, "error", err) + return + } + + err = s.blockState.SetFinalizedHash(header.Hash(), 0, 0) if err != nil { - logger.Error("failed to set finalized hash", "error", err) + logger.Error("failed to set finalised hash", "error", err) return } @@ -361,7 +398,7 @@ func (s *Service) handleJustification(header *types.Header, justification []byte return } - logger.Info("🔨 finalized block", "number", header.Number, "hash", header.Hash()) + logger.Info("🔨 finalised block", "number", header.Number, "hash", header.Hash()) } func (s *Service) handleRuntimeChanges(newState *rtstorage.TrieState) error { @@ -395,13 +432,13 @@ func (s *Service) handleDigests(header *types.Header) { if d.Type() == types.ConsensusDigestType { cd, ok := d.(*types.ConsensusDigest) if !ok { - logger.Error("handleDigests", "index", i, "error", "cannot cast invalid consensus digest item") + logger.Error("handleDigests", "block number", header.Number, "index", i, "error", "cannot cast invalid consensus digest item") continue } err := s.digestHandler.HandleConsensusDigest(cd, header) if err != nil { - logger.Error("handleDigests", "index", i, "digest", cd, "error", err) + logger.Error("handleDigests", "block number", header.Number, "index", i, "digest", cd, "error", err) } } } diff --git a/dot/sync/syncer_test.go b/dot/sync/syncer_test.go index 6872732975..7fae639cc8 100644 --- a/dot/sync/syncer_test.go +++ b/dot/sync/syncer_test.go @@ -27,6 +27,7 @@ import ( "github.com/ChainSafe/gossamer/dot/state" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/common/optional" "github.com/ChainSafe/gossamer/lib/common/variadic" "github.com/ChainSafe/gossamer/lib/genesis" "github.com/ChainSafe/gossamer/lib/runtime" @@ -41,6 +42,12 @@ import ( "github.com/stretchr/testify/require" ) +type mockFinalityGadget struct{} + +func (m mockFinalityGadget) VerifyBlockJustification(_ []byte) error { + return nil +} + func newTestGenesisWithTrieAndHeader(t *testing.T) (*genesis.Genesis, *trie.Trie, *types.Header) { gen, err := genesis.NewGenesisFromJSONRaw("../../chain/gssmr/genesis.json") require.NoError(t, err) @@ -62,7 +69,7 @@ func newTestSyncer(t *testing.T) *Service { stateSrvc.UseMemDB() gen, genTrie, genHeader := newTestGenesisWithTrieAndHeader(t) - err := stateSrvc.Initialize(gen, genHeader, genTrie) + err := stateSrvc.Initialise(gen, genHeader, genTrie) require.NoError(t, err) err = stateSrvc.Start() @@ -102,6 +109,10 @@ func newTestSyncer(t *testing.T) *Service { cfg.LogLvl = log.LvlDebug } + if cfg.FinalityGadget == nil { + cfg.FinalityGadget = &mockFinalityGadget{} + } + syncer, err := NewService(cfg) require.NoError(t, err) return syncer @@ -352,3 +363,29 @@ func TestSyncer_HandleJustification(t *testing.T) { require.NoError(t, err) require.Equal(t, just, res) } + +func TestSyncer_ProcessJustification(t *testing.T) { + syncer := newTestSyncer(t) + + parent, err := syncer.blockState.(*state.BlockState).BestBlockHeader() + require.NoError(t, err) + block := buildBlock(t, syncer.runtime, parent) + err = syncer.blockState.(*state.BlockState).AddBlock(block) + require.NoError(t, err) + + just := []byte("testjustification") + + data := []*types.BlockData{ + { + Hash: syncer.blockState.BestBlockHash(), + Justification: optional.NewBytes(true, just), + }, + } + + _, err = syncer.ProcessJustification(data) + require.NoError(t, err) + + res, err := syncer.blockState.GetJustification(syncer.blockState.BestBlockHash()) + require.NoError(t, err) + require.Equal(t, just, res) +} diff --git a/dot/telemetry/telemetry.go b/dot/telemetry/telemetry.go index 4577a4a1f3..8e94411cac 100644 --- a/dot/telemetry/telemetry.go +++ b/dot/telemetry/telemetry.go @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU Lesser General Public License // along with the gossamer library. If not, see . + package telemetry import ( @@ -23,6 +24,7 @@ import ( "sync" "time" + "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/genesis" "github.com/gorilla/websocket" log "github.com/sirupsen/logrus" @@ -30,9 +32,9 @@ import ( // Handler struct for holding telemetry related things type Handler struct { - buf bytes.Buffer - wsConn []*websocket.Conn - telemetryLogger *log.Entry + buf bytes.Buffer + wsConn []*websocket.Conn + sync.RWMutex } // MyJSONFormatter struct for defining JSON Formatter @@ -42,11 +44,11 @@ type MyJSONFormatter struct { // Format function for handling JSON formatting, this overrides default logging formatter to remove // log level, line number and timestamp func (f *MyJSONFormatter) Format(entry *log.Entry) ([]byte, error) { - serialized, err := json.Marshal(entry.Data) + serialised, err := json.Marshal(entry.Data) if err != nil { return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) } - return append(serialized, '\n'), nil + return append(serialised, '\n'), nil } var ( @@ -64,6 +66,7 @@ func GetInstance() *Handler { } log.SetOutput(&handlerInstance.buf) log.SetFormatter(new(MyJSONFormatter)) + go handlerInstance.sender() }) } return handlerInstance @@ -75,7 +78,7 @@ func (h *Handler) AddConnections(conns []*genesis.TelemetryEndpoint) { c, _, err := websocket.DefaultDialer.Dial(v.Endpoint, nil) if err != nil { fmt.Printf("Error %v\n", err) - return + continue } h.wsConn = append(h.wsConn, c) } @@ -95,29 +98,85 @@ type ConnectionData struct { // SendConnection sends connection request message to telemetry connection func (h *Handler) SendConnection(data *ConnectionData) { + h.Lock() + defer h.Unlock() payload := log.Fields{"authority": data.Authority, "chain": data.Chain, "config": "", "genesis_hash": data.GenesisHash, "implementation": data.SystemName, "msg": "system.connected", "name": data.NodeName, "network_id": data.NetworkID, "startup_time": data.StartTime, "version": data.SystemVersion} - h.telemetryLogger = log.WithFields(log.Fields{"id": 1, "payload": payload, "ts": time.Now()}) - h.telemetryLogger.Print() - h.sendTelemtry() + telemetryLogger := log.WithFields(log.Fields{"id": 1, "payload": payload, "ts": time.Now()}) + telemetryLogger.Print() } // SendBlockImport sends block imported message to telemetry connection func (h *Handler) SendBlockImport(bestHash string, height *big.Int) { + h.Lock() + defer h.Unlock() payload := log.Fields{"best": bestHash, "height": height.Int64(), "msg": "block.import", "origin": "NetworkInitialSync"} - h.telemetryLogger = log.WithFields(log.Fields{"id": 1, "payload": payload, "ts": time.Now()}) - h.telemetryLogger.Print() - h.sendTelemtry() + telemetryLogger := log.WithFields(log.Fields{"id": 1, "payload": payload, "ts": time.Now()}) + telemetryLogger.Print() +} + +// NetworkData struct to hold network data telemetry information +type NetworkData struct { + peers int + rateIn float64 + rateOut float64 +} + +// NewNetworkData creates networkData struct +func NewNetworkData(peers int, rateIn, rateOut float64) *NetworkData { + return &NetworkData{ + peers: peers, + rateIn: rateIn, + rateOut: rateOut, + } +} + +// SendNetworkData send network data system.interval message to telemetry connection +func (h *Handler) SendNetworkData(data *NetworkData) { + h.Lock() + defer h.Unlock() + payload := log.Fields{"bandwidth_download": data.rateIn, "bandwidth_upload": data.rateOut, "msg": "system.interval", "peers": data.peers} + telemetryLogger := log.WithFields(log.Fields{"id": 1, "payload": payload, "ts": time.Now()}) + telemetryLogger.Print() +} + +// BlockIntervalData struct to hold data for block system.interval message +type BlockIntervalData struct { + BestHash common.Hash + BestHeight *big.Int + FinalizedHash common.Hash + FinalizedHeight *big.Int + TXCount int + UsedStateCacheSize int } -func (h *Handler) sendTelemtry() { - for _, c := range h.wsConn { - err := c.WriteMessage(websocket.TextMessage, h.buf.Bytes()) +// SendBlockIntervalData send block data system interval information to telemetry connection +func (h *Handler) SendBlockIntervalData(data *BlockIntervalData) { + h.Lock() + defer h.Unlock() + payload := log.Fields{"best": data.BestHash.String(), "finalized_hash": data.FinalizedHash.String(), // nolint + "finalized_height": data.FinalizedHeight, "height": data.BestHeight, "msg": "system.interval", "txcount": data.TXCount, // nolint + "used_state_cache_size": data.UsedStateCacheSize} + telemetryLogger := log.WithFields(log.Fields{"id": 1, "payload": payload, "ts": time.Now()}) + telemetryLogger.Print() +} + +func (h *Handler) sender() { + for { + h.RLock() + line, err := h.buf.ReadBytes(byte(10)) // byte 10 is newline character, used as delimiter + h.RUnlock() if err != nil { - // TODO (ed) determine how to handle this error - fmt.Printf("ERROR connecting to telemetry %v\n", err) + continue + } + + for _, c := range h.wsConn { + err := c.WriteMessage(websocket.TextMessage, line) + if err != nil { + // TODO (ed) determine how to handle this error + fmt.Printf("ERROR connecting to telemetry %v\n", err) + } } } - h.buf.Reset() } diff --git a/dot/telemetry/telemetry_test.go b/dot/telemetry/telemetry_test.go index d8a505f6cc..3cdbed43e0 100644 --- a/dot/telemetry/telemetry_test.go +++ b/dot/telemetry/telemetry_test.go @@ -1,20 +1,23 @@ package telemetry import ( + "bytes" "log" "math/big" "net/http" "os" + "sort" + "sync" "testing" "time" + "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/genesis" "github.com/gorilla/websocket" "github.com/stretchr/testify/require" ) var upgrader = websocket.Upgrader{} -var lastMessage []byte func TestMain(m *testing.M) { // start server to listen for websocket connections @@ -35,30 +38,74 @@ func TestMain(m *testing.M) { code := m.Run() os.Exit(code) } -func TestHandler_SendConnection(t *testing.T) { - expected := []byte(`{"id":1,"payload":{"authority":false,"chain":"chain","config":"","genesis_hash":"hash","implementation":"systemName","msg":"system.connected","name":"nodeName","network_id":"netID","startup_time":"startTime","version":"version"},"ts":`) - data := &ConnectionData{ - Authority: false, - Chain: "chain", - GenesisHash: "hash", - SystemName: "systemName", - NodeName: "nodeName", - SystemVersion: "version", - NetworkID: "netID", - StartTime: "startTime", + +var resultCh chan []byte + +func TestHandler_SendMulti(t *testing.T) { + var wg sync.WaitGroup + wg.Add(4) + + resultCh = make(chan []byte) + + go func() { + GetInstance().SendConnection(&ConnectionData{ + Authority: false, + Chain: "chain", + GenesisHash: "hash", + SystemName: "systemName", + NodeName: "nodeName", + SystemVersion: "version", + NetworkID: "netID", + StartTime: "startTime", + }) + wg.Done() + }() + + go func() { + GetInstance().SendBlockImport("hash", big.NewInt(2)) + wg.Done() + }() + + go func() { + GetInstance().SendNetworkData(NewNetworkData(1, 2, 3)) + wg.Done() + }() + + go func() { + GetInstance().SendBlockIntervalData(&BlockIntervalData{ + BestHash: common.MustHexToHash("0x07b749b6e20fd5f1159153a2e790235018621dd06072a62bcd25e8576f6ff5e6"), + BestHeight: big.NewInt(32375), + FinalizedHash: common.MustHexToHash("0x687197c11b4cf95374159843e7f46fbcd63558db981aaef01a8bac2a44a1d6b2"), + FinalizedHeight: big.NewInt(32256), + TXCount: 2, + UsedStateCacheSize: 1886357, + }) + wg.Done() + }() + wg.Wait() + + expected1 := []byte(`{"id":1,"payload":{"bandwidth_download":2,"bandwidth_upload":3,"msg":"system.interval","peers":1},"ts":`) + expected2 := []byte(`{"id":1,"payload":{"best":"hash","height":2,"msg":"block.import","origin":"NetworkInitialSync"},"ts":`) + expected3 := []byte(`{"id":1,"payload":{"authority":false,"chain":"chain","config":"","genesis_hash":"hash","implementation":"systemName","msg":"system.connected","name":"nodeName","network_id":"netID","startup_time":"startTime","version":"version"},"ts":`) + expected4 := []byte(`{"id":1,"payload":{"best":"0x07b749b6e20fd5f1159153a2e790235018621dd06072a62bcd25e8576f6ff5e6","finalized_hash":"0x687197c11b4cf95374159843e7f46fbcd63558db981aaef01a8bac2a44a1d6b2","finalized_height":32256,"height":32375,"msg":"system.interval","txcount":2,"used_state_cache_size":1886357},"ts":`) // nolint + + expected := [][]byte{expected3, expected1, expected4, expected2} + + var actual [][]byte + for data := range resultCh { + actual = append(actual, data) + if len(actual) == 4 { + break + } } - GetInstance().SendConnection(data) - time.Sleep(time.Millisecond) - // note, we only check the first 234 bytes because the remaining bytes are the timestamp, which we can't estimate - require.Equal(t, expected, lastMessage[:234]) -} -func TestHandler_SendBlockImport(t *testing.T) { - expected := []byte(`{"id":1,"payload":{"best":"hash","height":2,"msg":"block.import","origin":"NetworkInitialSync"},"ts":`) - GetInstance().SendBlockImport("hash", big.NewInt(2)) - time.Sleep(time.Millisecond) - // note, we only check the first 101 bytes because the remaining bytes are the timestamp, which we can't estimate - require.Equal(t, expected, lastMessage[:101]) + sort.Slice(actual, func(i, j int) bool { + return bytes.Compare(actual[i], actual[j]) < 0 + }) + require.Contains(t, string(actual[0]), string(expected[0])) + require.Contains(t, string(actual[1]), string(expected[1])) + require.Contains(t, string(actual[2]), string(expected[2])) + require.Contains(t, string(actual[3]), string(expected[3])) } func listen(w http.ResponseWriter, r *http.Request) { @@ -73,6 +120,7 @@ func listen(w http.ResponseWriter, r *http.Request) { log.Printf("read err %v", err) break } - lastMessage = msg + + resultCh <- msg } } diff --git a/dot/types/block.go b/dot/types/block.go index 30c6759474..7f77c50438 100644 --- a/dot/types/block.go +++ b/dot/types/block.go @@ -36,7 +36,7 @@ func NewBlock(header *Header, body *Body) *Block { } } -// NewEmptyBlock returns a new Block with an initialized but empty Header and Body +// NewEmptyBlock returns a new Block with an initialised but empty Header and Body func NewEmptyBlock() *Block { return &Block{ Header: new(Header), diff --git a/dot/types/grandpa.go b/dot/types/grandpa.go index fa6dd627ac..dd3c6ee97f 100644 --- a/dot/types/grandpa.go +++ b/dot/types/grandpa.go @@ -1,10 +1,12 @@ package types import ( + "fmt" "io" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/ed25519" + "github.com/ChainSafe/gossamer/lib/scale" ) // GrandpaAuthoritiesRaw represents a GRANDPA authority where their key is a byte array @@ -58,3 +60,115 @@ func GrandpaAuthoritiesRawToAuthorities(adr []*GrandpaAuthoritiesRaw) ([]*Author return ad, nil } + +// GrandpaVoter represents a GRANDPA voter +type GrandpaVoter struct { + Key *ed25519.PublicKey + ID uint64 +} + +// PublicKeyBytes returns the voter key as PublicKeyBytes +func (v *GrandpaVoter) PublicKeyBytes() ed25519.PublicKeyBytes { + return v.Key.AsBytes() +} + +// String returns a formatted GrandpaVoter string +func (v *GrandpaVoter) String() string { + return fmt.Sprintf("[key=0x%s id=%d]", v.PublicKeyBytes(), v.ID) +} + +// Decode will decode the Reader into a GrandpaVoter +func (v *GrandpaVoter) Decode(r io.Reader) error { + keyBytes, err := common.Read32Bytes(r) + if err != nil { + return err + } + + key, err := ed25519.NewPublicKey(keyBytes[:]) + if err != nil { + return err + } + + id, err := common.ReadUint64(r) + if err != nil { + return err + } + + v.Key = key + v.ID = id + return nil +} + +// NewGrandpaVotersFromAuthorities returns an array of GrandpaVoters given an array of GrandpaAuthorities +func NewGrandpaVotersFromAuthorities(ad []*Authority) []*GrandpaVoter { + v := make([]*GrandpaVoter, len(ad)) + + for i, d := range ad { + if pk, ok := d.Key.(*ed25519.PublicKey); ok { + v[i] = &GrandpaVoter{ + Key: pk, + ID: d.Weight, + } + } + } + + return v +} + +// NewGrandpaVotersFromAuthoritiesRaw returns an array of GrandpaVoters given an array of GrandpaAuthoritiesRaw +func NewGrandpaVotersFromAuthoritiesRaw(ad []*GrandpaAuthoritiesRaw) ([]*GrandpaVoter, error) { + v := make([]*GrandpaVoter, len(ad)) + + for i, d := range ad { + key, err := ed25519.NewPublicKey(d.Key[:]) + if err != nil { + return nil, err + } + + v[i] = &GrandpaVoter{ + Key: key, + ID: d.ID, + } + } + + return v, nil +} + +// GrandpaVoters represents []*GrandpaVoter +type GrandpaVoters []*GrandpaVoter + +// String returns a formatted Voters string +func (v GrandpaVoters) String() string { + str := "" + for _, w := range v { + str = str + w.String() + " " + } + return str +} + +// DecodeGrandpaVoters returns a SCALE decoded GrandpaVoters +func DecodeGrandpaVoters(r io.Reader) (GrandpaVoters, error) { + sd := &scale.Decoder{Reader: r} + length, err := sd.DecodeInteger() + if err != nil { + return nil, err + } + + voters := make([]*GrandpaVoter, length) + for i := range voters { + voters[i] = new(GrandpaVoter) + err = voters[i].Decode(r) + if err != nil { + return nil, err + } + } + + return voters, nil +} + +// FinalisationInfo represents information about what block was finalised in what round and setID +type FinalisationInfo struct { + Header *Header + Round uint64 + SetID uint64 +} diff --git a/dot/types/roles.go b/dot/types/roles.go index 551a35d1db..3d7275c588 100644 --- a/dot/types/roles.go +++ b/dot/types/roles.go @@ -23,6 +23,6 @@ const ( FullNodeRole = byte(1) // LightClientRole runs a light client LightClientRole = byte(2) - // AuthorityRole runs the node as a block-producing and finalizing node + // AuthorityRole runs the node as a block-producing and finalising node AuthorityRole = byte(4) ) diff --git a/dot/utils.go b/dot/utils.go index 03c9640468..4f67021322 100644 --- a/dot/utils.go +++ b/dot/utils.go @@ -17,11 +17,14 @@ package dot import ( + "encoding/binary" "encoding/hex" "encoding/json" + "fmt" "io/ioutil" "os" "path/filepath" + "strings" "testing" ctoml "github.com/ChainSafe/gossamer/dot/config/toml" @@ -30,6 +33,7 @@ import ( "github.com/ChainSafe/gossamer/lib/runtime/wasmer" "github.com/ChainSafe/gossamer/lib/utils" log "github.com/ChainSafe/log15" + "github.com/cosmos/go-bip39" "github.com/naoina/toml" "github.com/stretchr/testify/require" ) @@ -235,3 +239,13 @@ func CreateJSONRawFile(bs *BuildSpec, fp string) *os.File { } return WriteConfig(data, fp) } + +// RandomNodeName generate a new random name +// if there is no name configured to the node +func RandomNodeName() string { + entropy, _ := bip39.NewEntropy(128) + randomNamesString, _ := bip39.NewMnemonic(entropy) + randomNames := strings.Split(randomNamesString, " ") + number := binary.BigEndian.Uint16(entropy) + return randomNames[0] + "-" + randomNames[1] + "-" + fmt.Sprint(number) +} diff --git a/go.mod b/go.mod index e16612d2e1..e5a2dcda3f 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/ChainSafe/gossamer require ( github.com/ChainSafe/chaindb v0.1.5-0.20210117220933-15e75f27268f - github.com/ChainSafe/go-schnorrkel v0.0.0-20210127175223-0f934d64ecac + github.com/ChainSafe/go-schnorrkel v0.0.0-20210222182958-bd440c890782 github.com/ChainSafe/log15 v1.0.0 github.com/OneOfOne/xxhash v1.2.5 github.com/btcsuite/btcutil v1.0.2 diff --git a/go.sum b/go.sum index 8775b1cb06..bc840ce49e 100644 --- a/go.sum +++ b/go.sum @@ -6,8 +6,8 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/ChainSafe/chaindb v0.1.5-0.20210117220933-15e75f27268f h1:qDmWdUIE1cgG19K/eVB9nkQMkldaGwcjU9U5OyUV11k= github.com/ChainSafe/chaindb v0.1.5-0.20210117220933-15e75f27268f/go.mod h1:WBsCSLGM7+DvSYU6cFVUltahwU7Sw4cN3e8kiLdNFJM= -github.com/ChainSafe/go-schnorrkel v0.0.0-20210127175223-0f934d64ecac h1:mZyOXXu+q/op10XOYRzdpIxWWreS/zCYhl+UNpRv2O0= -github.com/ChainSafe/go-schnorrkel v0.0.0-20210127175223-0f934d64ecac/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= +github.com/ChainSafe/go-schnorrkel v0.0.0-20210222182958-bd440c890782 h1:lwmjzta2Xu+3rPVY/VeNQj2xfNkyih4CwyRxYg3cpRQ= +github.com/ChainSafe/go-schnorrkel v0.0.0-20210222182958-bd440c890782/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= github.com/ChainSafe/log15 v1.0.0 h1:vRDVtWtVwIH5uSCBvgTTZh6FA58UBJ6+QiiypaZfBf8= github.com/ChainSafe/log15 v1.0.0/go.mod h1:5v1+ALHtdW0NfAeeoYyKmzCAMcAeqkdhIg4uxXWIgOg= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= diff --git a/lib/babe/babe.go b/lib/babe/babe.go index fb937b88cd..1c211a05e5 100644 --- a/lib/babe/babe.go +++ b/lib/babe/babe.go @@ -28,6 +28,7 @@ import ( "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/crypto/sr25519" "github.com/ChainSafe/gossamer/lib/runtime" + rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" log "github.com/ChainSafe/log15" ) @@ -471,8 +472,15 @@ func (b *Service) handleSlot(slotNum uint64) error { return nil } + old := ts.Snapshot() + // block built successfully, store resulting trie in storage state - err = b.storageState.StoreTrie(ts) + oldTs, err := rtstorage.NewTrieState(old) + if err != nil { + return err + } + + err = b.storageState.StoreTrie(oldTs) if err != nil { logger.Error("failed to store trie in storage state", "error", err) } diff --git a/lib/babe/babe_test.go b/lib/babe/babe_test.go index 295b50ff70..a1a7058f3e 100644 --- a/lib/babe/babe_test.go +++ b/lib/babe/babe_test.go @@ -116,7 +116,7 @@ func createTestService(t *testing.T, cfg *ServiceConfig) *Service { genesisBABEConfig.EpochLength = cfg.EpochLength } - err = dbSrv.Initialize(gen, genHeader, genTrie) + err = dbSrv.Initialise(gen, genHeader, genTrie) require.NoError(t, err) err = dbSrv.Start() diff --git a/lib/babe/build.go b/lib/babe/build.go index f47d416d39..b59488ce14 100644 --- a/lib/babe/build.go +++ b/lib/babe/build.go @@ -18,10 +18,8 @@ package babe import ( "bytes" - "errors" "fmt" "math/big" - "strings" "time" "github.com/ChainSafe/gossamer/dot/types" @@ -56,13 +54,13 @@ func (b *Service) buildBlock(parent *types.Header, slot Slot) (*types.Block, err return nil, err } - // initialize block header + // initialise block header err = b.rt.InitializeBlock(header) if err != nil { return nil, err } - logger.Trace("initialized block") + logger.Trace("initialised block") // add block inherents inherents, err := b.buildBlockInherents(slot) @@ -77,14 +75,14 @@ func (b *Service) buildBlock(parent *types.Header, slot Slot) (*types.Block, err logger.Trace("built block extrinsics") - // finalize block + // finalise block header, err = b.rt.FinalizeBlock() if err != nil { b.addToQueue(included) - return nil, fmt.Errorf("cannot finalize block: %s", err) + return nil, fmt.Errorf("cannot finalise block: %s", err) } - logger.Trace("finalized block") + logger.Trace("finalised block") header.ParentHash = parent.Hash() header.Number.Add(parent.Number, big.NewInt(1)) @@ -175,38 +173,42 @@ func (b *Service) buildBlockBABEPrimaryPreDigest(slot Slot) (*types.BabePrimaryP // for each extrinsic in queue, add it to the block, until the slot ends or the block is full. // if any extrinsic fails, it returns an empty array and an error. func (b *Service) buildBlockExtrinsics(slot Slot) []*transaction.ValidTransaction { - next := b.nextReadyExtrinsic() - included := []*transaction.ValidTransaction{} + var included []*transaction.ValidTransaction - for !hasSlotEnded(slot) && next != nil { - logger.Trace("build block", "applying extrinsic", next) + for !hasSlotEnded(slot) { + txn := b.transactionState.Pop() + // Transaction queue is empty. + if txn == nil { + return included + } - t := b.transactionState.Pop() - ret, err := b.rt.ApplyExtrinsic(next) - if err != nil { - logger.Warn("failed to apply extrinsic", "error", err, "extrinsic", next) - next = b.nextReadyExtrinsic() + // Move to next extrinsic. + if txn.Extrinsic == nil { continue } - // if ret == 0x0001, there is a dispatch error; if ret == 0x01, there is an apply error - if ret[0] == 1 || bytes.Equal(ret[:2], []byte{0, 1}) { - errTxt, err := determineError(ret) - // remove invalid extrinsic from queue - if err == nil { - logger.Warn("failed to interpret extrinsic error", "error", ret, "extrinsic", next) - } else { - logger.Warn("failed to apply extrinsic", "error", errTxt, "extrinsic", next) - } + extrinsic := txn.Extrinsic + logger.Trace("build block", "applying extrinsic", extrinsic) - next = b.nextReadyExtrinsic() + ret, err := b.rt.ApplyExtrinsic(extrinsic) + if err != nil { + logger.Warn("failed to apply extrinsic", "error", err, "extrinsic", extrinsic) continue } - logger.Debug("build block applied extrinsic", "extrinsic", next) + err = determineErr(ret) + if err != nil { + logger.Warn("failed to apply extrinsic", "error", err, "extrinsic", extrinsic) - included = append(included, t) - next = b.nextReadyExtrinsic() + // Failure of the module call dispatching doesn't invalidate the extrinsic. + // It is included in the block. + if _, ok := err.(*DispatchOutcomeError); !ok { + continue + } + } + + logger.Debug("build block applied extrinsic", "extrinsic", extrinsic) + included = append(included, txn) } return included @@ -268,12 +270,8 @@ func (b *Service) buildBlockInherents(slot Slot) ([][]byte, error) { } if !bytes.Equal(ret, []byte{0, 0}) { - errTxt, err := determineError(ret) - if err != nil { - return nil, err - } - - return nil, errors.New("error applying extrinsic: " + errTxt) + errTxt := determineErr(ret) + return nil, fmt.Errorf("error applying inherent: %s", errTxt) } } @@ -291,15 +289,6 @@ func (b *Service) addToQueue(txs []*transaction.ValidTransaction) { } } -// nextReadyExtrinsic peeks from the transaction queue. it does not remove any transactions from the queue -func (b *Service) nextReadyExtrinsic() types.Extrinsic { - transaction := b.transactionState.Peek() - if transaction == nil { - return nil - } - return transaction.Extrinsic -} - func hasSlotEnded(slot Slot) bool { slotEnd := slot.start.Add(slot.duration) return time.Since(slotEnd) >= 0 @@ -309,68 +298,12 @@ func extrinsicsToBody(inherents [][]byte, txs []*transaction.ValidTransaction) ( extrinsics := types.BytesArrayToExtrinsics(inherents) for _, tx := range txs { - extrinsics = append(extrinsics, tx.Extrinsic) - } - - return types.NewBodyFromExtrinsics(extrinsics) -} - -func determineError(res []byte) (string, error) { - var errTxt strings.Builder - var err error - - // when res[0] == 0x01 it is an apply error - if res[0] == 1 { - _, err = errTxt.WriteString("Apply error, type: ") - if bytes.Equal(res[1:], []byte{0}) { - _, err = errTxt.WriteString("NoPermission") - } - if bytes.Equal(res[1:], []byte{1}) { - _, err = errTxt.WriteString("BadState") - } - if bytes.Equal(res[1:], []byte{2}) { - _, err = errTxt.WriteString("Validity") - } - if bytes.Equal(res[1:], []byte{2, 0, 0}) { - _, err = errTxt.WriteString("Call") - } - if bytes.Equal(res[1:], []byte{2, 0, 1}) { - _, err = errTxt.WriteString("Payment") - } - if bytes.Equal(res[1:], []byte{2, 0, 2}) { - _, err = errTxt.WriteString("Future") - } - if bytes.Equal(res[1:], []byte{2, 0, 3}) { - _, err = errTxt.WriteString("Stale") - } - if bytes.Equal(res[1:], []byte{2, 0, 4}) { - _, err = errTxt.WriteString("BadProof") - } - if bytes.Equal(res[1:], []byte{2, 0, 5}) { - _, err = errTxt.WriteString("AncientBirthBlock") - } - if bytes.Equal(res[1:], []byte{2, 0, 6}) { - _, err = errTxt.WriteString("ExhaustsResources") - } - if bytes.Equal(res[1:], []byte{2, 0, 7}) { - _, err = errTxt.WriteString("Custom") - } - if bytes.Equal(res[1:], []byte{2, 1, 0}) { - _, err = errTxt.WriteString("CannotLookup") - } - if bytes.Equal(res[1:], []byte{2, 1, 1}) { - _, err = errTxt.WriteString("NoUnsignedValidator") - } - if bytes.Equal(res[1:], []byte{2, 1, 2}) { - _, err = errTxt.WriteString("Custom") + decExt, err := scale.Decode(tx.Extrinsic, []byte{}) + if err != nil { + return nil, err } + extrinsics = append(extrinsics, decExt.([]byte)) } - // when res[:2] == 0x0001 it's a dispatch error - if bytes.Equal(res[:2], []byte{0, 1}) { - mod := res[2:3] - errID := res[3:4] - _, err = errTxt.WriteString("Dispatch Error, module: " + string(mod) + " error: " + string(errID)) - } - return errTxt.String(), err + return types.NewBodyFromExtrinsics(extrinsics) } diff --git a/lib/babe/build_test.go b/lib/babe/build_test.go index 758a523ab6..839a9f432d 100644 --- a/lib/babe/build_test.go +++ b/lib/babe/build_test.go @@ -159,7 +159,7 @@ func TestApplyExtrinsic(t *testing.T) { header, err := types.NewHeader(parentHash, common.Hash{}, common.Hash{}, big.NewInt(1), types.NewEmptyDigest()) require.NoError(t, err) - //initialize block header + //initialise block header err = babeService.rt.InitializeBlock(header) require.NoError(t, err) @@ -194,7 +194,7 @@ func TestBuildAndApplyExtrinsic(t *testing.T) { header, err := types.NewHeader(parentHash, common.Hash{}, common.Hash{}, big.NewInt(1), types.NewEmptyDigest()) require.NoError(t, err) - //initialize block header + //initialise block header err = babeService.rt.InitializeBlock(header) require.NoError(t, err) diff --git a/lib/babe/errors.go b/lib/babe/errors.go index 96021e28b0..a676b21a5e 100644 --- a/lib/babe/errors.go +++ b/lib/babe/errors.go @@ -13,7 +13,13 @@ package babe -import "errors" +import ( + "errors" + "fmt" + + "github.com/ChainSafe/gossamer/lib/common/optional" + "github.com/ChainSafe/gossamer/lib/scale" +) // ErrBadSlotClaim is returned when a slot claim is invalid var ErrBadSlotClaim = errors.New("could not verify slot claim VRF proof") @@ -53,3 +59,109 @@ var ErrAuthorityDisabled = errors.New("authority has been disabled for the remai // ErrNotAuthority is returned when trying to perform authority functions when not an authority var ErrNotAuthority = errors.New("node is not an authority") + +var errInvalidResult = errors.New("invalid error value") + +// A DispatchOutcomeError is outcome of dispatching the extrinsic +type DispatchOutcomeError struct { + msg string // description of error +} + +func (e DispatchOutcomeError) Error() string { + return fmt.Sprintf("dispatch outcome error: %s", e.msg) +} + +// A TransactionValidityError is possible errors while checking the validity of a transaction +type TransactionValidityError struct { + msg string // description of error +} + +func (e TransactionValidityError) Error() string { + return fmt.Sprintf("transaction validity error: %s", e.msg) +} + +func determineCustomModuleErr(res []byte) error { + if len(res) < 3 { + return errInvalidResult + } + errMsg, err := optional.NewBytes(false, nil).DecodeBytes(res[2:]) + if err != nil { + return err + } + return fmt.Errorf("index: %d code: %d message: %s", res[0], res[1], errMsg.String()) +} + +func determineDispatchErr(res []byte) error { + switch res[0] { + case 0: + unKnownError, _ := scale.Decode(res[1:], []byte{}) + return &DispatchOutcomeError{fmt.Sprintf("unknown error: %s", string(unKnownError.([]byte)))} + case 1: + return &DispatchOutcomeError{"failed lookup"} + case 2: + return &DispatchOutcomeError{"bad origin"} + case 3: + return &DispatchOutcomeError{fmt.Sprintf("custom module error: %s", determineCustomModuleErr(res[1:]))} + } + return errInvalidResult +} + +func determineInvalidTxnErr(res []byte) error { + switch res[0] { + case 0: + return &TransactionValidityError{"call of the transaction is not expected"} + case 1: + return &TransactionValidityError{"invalid payment"} + case 2: + return &TransactionValidityError{"invalid transaction"} + case 3: + return &TransactionValidityError{"outdated transaction"} + case 4: + return &TransactionValidityError{"bad proof"} + case 5: + return &TransactionValidityError{"ancient birth block"} + case 6: + return &TransactionValidityError{"exhausts resources"} + case 7: + return &TransactionValidityError{fmt.Sprintf("unknown error: %d", res[1])} + case 8: + return &TransactionValidityError{"mandatory dispatch error"} + case 9: + return &TransactionValidityError{"invalid mandatory dispatch"} + } + return errInvalidResult +} + +func determineUnknownTxnErr(res []byte) error { + switch res[0] { + case 0: + return &TransactionValidityError{"lookup failed"} + case 1: + return &TransactionValidityError{"validator not found"} + case 2: + return &TransactionValidityError{fmt.Sprintf("unknown error: %d", res[1])} + } + return errInvalidResult +} + +func determineErr(res []byte) error { + switch res[0] { + case 0: // DispatchOutcome + switch res[1] { + case 0: + return nil + case 1: + return determineDispatchErr(res[2:]) + default: + return errInvalidResult + } + case 1: // TransactionValidityError + switch res[1] { + case 0: + return determineInvalidTxnErr(res[2:]) + case 1: + return determineUnknownTxnErr(res[2:]) + } + } + return errInvalidResult +} diff --git a/lib/babe/errors_test.go b/lib/babe/errors_test.go new file mode 100644 index 0000000000..8ec2923c6d --- /dev/null +++ b/lib/babe/errors_test.go @@ -0,0 +1,75 @@ +package babe + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestApplyExtrinsicErrors(t *testing.T) { + testCases := []struct { + name string + test []byte + expected string + }{ + { + name: "Valid extrinsic", + test: []byte{0, 0}, + expected: "", + }, + { + name: "Dispatch custom module error empty", + test: []byte{0, 1, 3, 4, 5, 1, 0}, + expected: "dispatch outcome error: custom module error: index: 4 code: 5 message: ", + }, + { + name: "Dispatch custom module error", + test: []byte{0, 1, 3, 4, 5, 1, 0x04, 0x65}, + expected: "dispatch outcome error: custom module error: index: 4 code: 5 message: 65", + }, + { + name: "Dispatch unknown error", + test: []byte{0, 1, 0, 0x04, 65}, + expected: "dispatch outcome error: unknown error: A", + }, + { + name: "Invalid txn payment error", + test: []byte{1, 0, 1}, + expected: "transaction validity error: invalid payment", + }, + { + name: "Invalid txn payment error", + test: []byte{1, 0, 7, 65}, + expected: "transaction validity error: unknown error: 65", + }, + { + name: "Unknown txn lookup failed error", + test: []byte{1, 1, 0}, + expected: "transaction validity error: lookup failed", + }, + { + name: "Invalid txn unknown error", + test: []byte{1, 1, 2, 75}, + expected: "transaction validity error: unknown error: 75", + }, + } + + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + err := determineErr(c.test) + if c.expected == "" { + require.NoError(t, err) + return + } + + if c.test[0] == 0 { + _, ok := err.(*DispatchOutcomeError) + require.True(t, ok) + } else { + _, ok := err.(*TransactionValidityError) + require.True(t, ok) + } + require.Equal(t, err.Error(), c.expected) + }) + } +} diff --git a/lib/babe/verify_test.go b/lib/babe/verify_test.go index 64c772f4db..ca24319f43 100644 --- a/lib/babe/verify_test.go +++ b/lib/babe/verify_test.go @@ -44,7 +44,7 @@ func newTestVerificationManager(t *testing.T, genCfg *types.BabeConfiguration) * } gen, genTrie, genHeader := newTestGenesisWithTrieAndHeader(t) - err = dbSrv.Initialize(gen, genHeader, genTrie) + err = dbSrv.Initialise(gen, genHeader, genTrie) require.NoError(t, err) err = dbSrv.Start() diff --git a/lib/blocktree/blocktree.go b/lib/blocktree/blocktree.go index 6e9547da35..274fe58a7e 100644 --- a/lib/blocktree/blocktree.go +++ b/lib/blocktree/blocktree.go @@ -49,8 +49,8 @@ func NewEmptyBlockTree(db database.Database) *BlockTree { } } -// NewBlockTreeFromRoot initializes a blocktree with a root block. The root block is always the most recently -// finalized block (ie the genesis block if the node is just starting.) +// NewBlockTreeFromRoot initialises a blocktree with a root block. The root block is always the most recently +// finalised block (ie the genesis block if the node is just starting.) func NewBlockTreeFromRoot(root *types.Header, db database.Database) *BlockTree { head := &node{ hash: root.Hash(), @@ -172,15 +172,15 @@ func (bt *BlockTree) getNode(h Hash) *node { // Prune sets the given hash as the new blocktree root, removing all nodes that are not the new root node or its descendant // It returns an array of hashes that have been pruned -func (bt *BlockTree) Prune(finalized Hash) (pruned []Hash) { +func (bt *BlockTree) Prune(finalised Hash) (pruned []Hash) { bt.Lock() defer bt.Unlock() - if finalized == bt.head.hash { + if finalised == bt.head.hash { return pruned } - n := bt.getNode(finalized) + n := bt.getNode(finalised) if n == nil { return pruned } @@ -192,7 +192,7 @@ func (bt *BlockTree) Prune(finalized Hash) (pruned []Hash) { return pruned } -// String utilizes github.com/disiqueira/gotree to create a printable tree +// String utilises github.com/disiqueira/gotree to create a printable tree func (bt *BlockTree) String() string { bt.RLock() defer bt.RUnlock() diff --git a/lib/blocktree/blocktree_test.go b/lib/blocktree/blocktree_test.go index 6db2df87e1..8a8032d5ff 100644 --- a/lib/blocktree/blocktree_test.go +++ b/lib/blocktree/blocktree_test.go @@ -364,18 +364,18 @@ func TestBlockTree_Prune(t *testing.T) { copy := bt.DeepCopy() - // pick some block to finalize - finalized := bt.head.children[0].children[0].children[0] - pruned := bt.Prune(finalized.hash) + // pick some block to finalise + finalised := bt.head.children[0].children[0].children[0] + pruned := bt.Prune(finalised.hash) for _, prunedHash := range pruned { prunedNode := copy.getNode(prunedHash) - if prunedNode.isDescendantOf(finalized) { - t.Fatal("pruned node that's descendant of finalized node!!") + if prunedNode.isDescendantOf(finalised) { + t.Fatal("pruned node that's descendant of finalised node!!") } - if finalized.isDescendantOf(prunedNode) { - t.Fatal("pruned an ancestor of the finalized node!!") + if finalised.isDescendantOf(prunedNode) { + t.Fatal("pruned an ancestor of the finalised node!!") } } } diff --git a/lib/blocktree/node.go b/lib/blocktree/node.go index 788da60309..4ec9c89192 100644 --- a/lib/blocktree/node.go +++ b/lib/blocktree/node.go @@ -206,8 +206,8 @@ func (n *node) deepCopy(parent *node) *node { return nCopy } -func (n *node) prune(finalized *node, pruned []Hash) []Hash { - if finalized == nil { +func (n *node) prune(finalised *node, pruned []Hash) []Hash { + if finalised == nil { return pruned } @@ -215,23 +215,23 @@ func (n *node) prune(finalized *node, pruned []Hash) []Hash { pruned = []Hash{} } - // if this is a descedent of the finalized block, keep it - // all descendents of this block will also be descendents of the finalized block, + // if this is a descedent of the finalised block, keep it + // all descendents of this block will also be descendents of the finalised block, // so don't need to check any of those - if n.isDescendantOf(finalized) { + if n.isDescendantOf(finalised) { return pruned } - // if it's not an ancestor the finalized block, prune it - if !finalized.isDescendantOf(n) { + // if it's not an ancestor the finalised block, prune it + if !finalised.isDescendantOf(n) { pruned = append(pruned, n.hash) n.parent.deleteChild(n) } - // if this is an ancestor of the finalized block, keep it, + // if this is an ancestor of the finalised block, keep it, // and check its children for _, child := range n.children { - pruned = child.prune(finalized, pruned) + pruned = child.prune(finalised, pruned) } return pruned diff --git a/lib/blocktree/node_test.go b/lib/blocktree/node_test.go index f630d8df8c..b2c37c3d3f 100644 --- a/lib/blocktree/node_test.go +++ b/lib/blocktree/node_test.go @@ -59,18 +59,18 @@ func TestNode_Prune(t *testing.T) { copy := bt.DeepCopy() - // pick some block to finalize - finalized := bt.head.children[0].children[0].children[0] - pruned := bt.head.prune(finalized, nil) + // pick some block to finalise + finalised := bt.head.children[0].children[0].children[0] + pruned := bt.head.prune(finalised, nil) for _, prunedHash := range pruned { prunedNode := copy.getNode(prunedHash) - if prunedNode.isDescendantOf(finalized) { - t.Fatal("pruned node that's descendant of finalized node!!") + if prunedNode.isDescendantOf(finalised) { + t.Fatal("pruned node that's descendant of finalised node!!") } - if finalized.isDescendantOf(prunedNode) { - t.Fatal("pruned an ancestor of the finalized node!!") + if finalised.isDescendantOf(prunedNode) { + t.Fatal("pruned an ancestor of the finalised node!!") } } } diff --git a/lib/common/common.go b/lib/common/common.go index 2b6d5a9f80..fea9e961f6 100644 --- a/lib/common/common.go +++ b/lib/common/common.go @@ -21,6 +21,7 @@ import ( "encoding/hex" "errors" "io" + "math/big" "strconv" "strings" ) @@ -103,6 +104,32 @@ func MustHexToBytes(in string) []byte { return out } +// MustHexToBigInt turns a 0x prefixed hex string into a big.Int +// it panic if it cannot decode the string +func MustHexToBigInt(in string) *big.Int { + if len(in) < 2 { + panic("invalid string") + } + + if strings.Compare(in[:2], "0x") != 0 { + panic(ErrNoPrefix) + } + + in = in[2:] + + // Ensure we have an even length + if len(in)%2 != 0 { + in = "0" + in + } + + out, err := hex.DecodeString(in) + if err != nil { + panic(err) + } + + return big.NewInt(0).SetBytes(out) +} + // BytesToHex turns a byte slice into a 0x prefixed hex string func BytesToHex(in []byte) string { s := hex.EncodeToString(in) diff --git a/lib/common/common_test.go b/lib/common/common_test.go index 0b574f09a5..b88c5aabcc 100644 --- a/lib/common/common_test.go +++ b/lib/common/common_test.go @@ -18,8 +18,12 @@ package common import ( "bytes" + "math/big" "reflect" "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestStringToInts(t *testing.T) { @@ -182,3 +186,32 @@ func TestSwapNibbles(t *testing.T) { } } } + +func TestMustHexToBigInt(t *testing.T) { + tests := []struct { + in string + out *big.Int + }{ + {"0x0", big.NewInt(0).SetBytes([]byte{0})}, + {"0x00", big.NewInt(0).SetBytes([]byte{0})}, + {"0x1", big.NewInt(1)}, + {"0x01", big.NewInt(1)}, + {"0xf", big.NewInt(15)}, + {"0x0f", big.NewInt(15)}, + {"0x10", big.NewInt(16)}, + {"0xff", big.NewInt(255)}, + {"0x50429", big.NewInt(328745)}, + {"0x050429", big.NewInt(328745)}, + } + + for _, test := range tests { + res := MustHexToBigInt(test.in) + require.Equal(t, test.out, res) + } +} + +func TestMustHexToBigIntPanic(t *testing.T) { + assert.Panics(t, func() { MustHexToBigInt("1") }, "should panic for string len < 2") + assert.Panics(t, func() { MustHexToBigInt("12") }, "should panic for string not starting with 0x") + assert.Panics(t, func() { MustHexToBigInt("0xzz") }, "should panic for string not containing hex characters") +} diff --git a/lib/common/db_keys.go b/lib/common/db_keys.go index 05e431c09d..2de8019943 100644 --- a/lib/common/db_keys.go +++ b/lib/common/db_keys.go @@ -17,18 +17,20 @@ package common var ( - // BestBlockHashKey is the db location the hash of the best (unfinalized) block header. + // BestBlockHashKey is the db location the hash of the best (unfinalised) block header. BestBlockHashKey = []byte("best_hash") // LatestStorageHashKey is the db location of the hash of the latest storage trie. LatestStorageHashKey = []byte("latest_storage_hash") - // FinalizedBlockHashKey is the db location of the hash of the latest finalized block header. - FinalizedBlockHashKey = []byte("finalized_head") + // FinalizedBlockHashKey is the db location of the hash of the latest finalised block header. + FinalizedBlockHashKey = []byte("finalised_head") // GenesisDataKey is the db location of the genesis data. GenesisDataKey = []byte("genesis_data") // BlockTreeKey is the db location of the encoded block tree structure. BlockTreeKey = []byte("block_tree") - // LatestFinalizedRoundKey is the key where the last finalized grandpa round is stored - LatestFinalizedRoundKey = []byte("latest_finalized_round") + // LatestFinalizedRoundKey is the key where the last finalised grandpa round is stored + LatestFinalizedRoundKey = []byte("latest_finalised_round") // WorkingStorageHashKey is the storage key that the runtime uses to store the latest working state root. WorkingStorageHashKey = []byte("working_storage_hash") + //NodeNameKey is the storage key to store de current node name and avoid create a new name every initialization + NodeNameKey = []byte("node_name") ) diff --git a/lib/common/optional/types.go b/lib/common/optional/types.go index 4cf9d7eed0..4d373eb6e0 100644 --- a/lib/common/optional/types.go +++ b/lib/common/optional/types.go @@ -166,6 +166,27 @@ func (x *Bytes) Decode(r io.Reader) (*Bytes, error) { return x, nil } +// DecodeBytes return an optional Bytes from scale encoded data +func (x *Bytes) DecodeBytes(data []byte) (*Bytes, error) { + if len(data) == 0 || data[0] > 1 { + return nil, ErrInvalidOptional + } + + x.exists = data[0] != 0 + + if x.exists { + decData, err := scale.Decode(data[1:], []byte{}) + if err != nil { + return nil, err + } + x.value = decData.([]byte) + } else { + x.value = nil + } + + return x, nil +} + // FixedSizeBytes represents an optional FixedSizeBytes type. It does not length-encode the value when encoding. type FixedSizeBytes struct { exists bool @@ -224,7 +245,7 @@ func (x *FixedSizeBytes) Decode(r io.Reader) (*FixedSizeBytes, error) { return nil, ErrInvalidOptional } - x.exists = (exists != 0) + x.exists = exists != 0 if x.exists { value, err := ioutil.ReadAll(r) diff --git a/lib/common/optional/types_test.go b/lib/common/optional/types_test.go index 40642aeb3d..0f0f373a32 100644 --- a/lib/common/optional/types_test.go +++ b/lib/common/optional/types_test.go @@ -19,6 +19,8 @@ package optional import ( "bytes" "testing" + + "github.com/stretchr/testify/require" ) func TestNewBoolean(t *testing.T) { @@ -172,3 +174,36 @@ func TestBooleanDecode(t *testing.T) { t.Fatal("decoded value should be false") } } + +func TestDecodeBytes(t *testing.T) { + testByteData := []byte("testData") + + testBytes := NewBytes(false, nil) + + require.False(t, testBytes.Exists(), "exist should be false") + require.Equal(t, []byte(nil), testBytes.Value(), "value should be empty") + + testBytes.Set(true, testByteData) + require.True(t, testBytes.Exists(), "exist should be true") + require.Equal(t, testByteData, testBytes.Value(), "value should be Equal") + + encData, err := testBytes.Encode() + require.NoError(t, err) + require.NotNil(t, encData) + + newBytes, err := testBytes.DecodeBytes(encData) + require.NoError(t, err) + + require.True(t, newBytes.Exists(), "exist should be true") + require.Equal(t, testBytes.Value(), newBytes.Value(), "value should be Equal") + + // Invalid data + _, err = newBytes.DecodeBytes(nil) + require.Equal(t, err, ErrInvalidOptional) + + newBytes, err = newBytes.DecodeBytes([]byte{0}) + require.NoError(t, err) + + require.False(t, newBytes.Exists(), "exist should be false") + require.Equal(t, []byte(nil), newBytes.Value(), "value should be empty") +} diff --git a/lib/grandpa/errors.go b/lib/grandpa/errors.go index 34902002ac..1d51970d0e 100644 --- a/lib/grandpa/errors.go +++ b/lib/grandpa/errors.go @@ -22,73 +22,82 @@ import ( "github.com/ChainSafe/gossamer/lib/blocktree" ) -// ErrNilBlockState is returned when BlockState is nil -var ErrNilBlockState = errors.New("cannot have nil BlockState") +//nolint +var ( + ErrNilBlockState = errors.New("cannot have nil BlockState") + ErrNilGrandpaState = errors.New("cannot have nil GrandpaState") + ErrNilDigestHandler = errors.New("cannot have nil DigestHandler") + ErrNilKeypair = errors.New("cannot have nil keypair") + ErrNilNetwork = errors.New("cannot have nil Network") -// ErrNilDigestHandler is returned when DigestHandler is nil -var ErrNilDigestHandler = errors.New("cannot have nil DigestHandler") + // ErrBlockDoesNotExist is returned when trying to validate a vote for a block that doesn't exist + ErrBlockDoesNotExist = errors.New("block does not exist") -// ErrNilKeypair is returned when the keypair is nil -var ErrNilKeypair = errors.New("cannot have nil keypair") + // ErrInvalidSignature is returned when trying to validate a vote message with an invalid signature + ErrInvalidSignature = errors.New("signature is not valid") -// ErrNilNetwork is returned when the Network is nil -var ErrNilNetwork = errors.New("cannot have nil Network") + // ErrSetIDMismatch is returned when trying to validate a vote message with an invalid voter set ID, or when receiving a catch up message with a different set ID + ErrSetIDMismatch = errors.New("set IDs do not match") -// ErrBlockDoesNotExist is returned when trying to validate a vote for a block that doesn't exist -var ErrBlockDoesNotExist = errors.New("block does not exist") + // ErrRoundMismatch is returned when trying to validate a vote message that isn't for the current round + ErrRoundMismatch = errors.New("rounds do not match") -// ErrInvalidSignature is returned when trying to validate a vote message with an invalid signature -var ErrInvalidSignature = errors.New("signature is not valid") + // ErrEquivocation is returned when trying to validate a vote for that is equivocatory + ErrEquivocation = errors.New("vote is equivocatory") -// ErrSetIDMismatch is returned when trying to validate a vote message with an invalid voter set ID, or when receiving a catch up message with a different set ID -var ErrSetIDMismatch = errors.New("set IDs do not match") + // ErrVoterNotFound is returned when trying to validate a vote for a voter that isn't in the voter set + ErrVoterNotFound = errors.New("voter is not in voter set") -// ErrRoundMismatch is returned when trying to validate a vote message that isn't for the current round -var ErrRoundMismatch = errors.New("rounds do not match") + // ErrDescendantNotFound is returned when trying to validate a vote for a block that isn't a descendant of the last finalised block + ErrDescendantNotFound = blocktree.ErrDescendantNotFound -// ErrEquivocation is returned when trying to validate a vote for that is equivocatory -var ErrEquivocation = errors.New("vote is equivocatory") + // ErrNoPreVotedBlock is returned when there is no pre-voted block for a round. + // this can only happen in the case of > 1/3 byzantine nodes (ie > 1/3 nodes equivocate or don't submit valid votes) + ErrNoPreVotedBlock = errors.New("cannot get pre-voted block") -// ErrVoterNotFound is returned when trying to validate a vote for a voter that isn't in the voter set -var ErrVoterNotFound = errors.New("voter is not in voter set") + // ErrNoGHOST is returned when there is no GHOST. the only case where this could happen is if there are no votes + // at all, so it shouldn't ever happen. + ErrNoGHOST = errors.New("cannot determine grandpa-GHOST") -// ErrDescendantNotFound is returned when trying to validate a vote for a block that isn't a descendant of the last finalized block -var ErrDescendantNotFound = blocktree.ErrDescendantNotFound + // ErrCannotDecodeSubround is returned when a subround value cannot be decoded + ErrCannotDecodeSubround = errors.New("cannot decode invalid subround value") -// ErrNoPreVotedBlock is returned when there is no pre-voted block for a round. -// this can only happen in the case of > 1/3 byzantine nodes (ie > 1/3 nodes equivocate or don't submit valid votes) -var ErrNoPreVotedBlock = errors.New("cannot get pre-voted block") + // ErrInvalidMessageType is returned when a network.Message cannot be decoded + ErrInvalidMessageType = errors.New("cannot decode invalid message type") -// ErrNoGHOST is returned when there is no GHOST. the only case where this could happen is if there are no votes -// at all, so it shouldn't ever happen. -var ErrNoGHOST = errors.New("cannot determine grandpa-GHOST") + // ErrNotCommitMessage is returned when calling GetFinalizedHash on a message that isn't a CommitMessage + ErrNotCommitMessage = errors.New("cannot get finalised hash from VoteMessage") -// ErrCannotDecodeSubround is returned when a subround value cannot be decoded -var ErrCannotDecodeSubround = errors.New("cannot decode invalid subround value") + // ErrNoJustification is returned when no justification can be found for a block, ie. it has not been finalised + ErrNoJustification = errors.New("no justification found for block") -// ErrInvalidMessageType is returned when a network.Message cannot be decoded -var ErrInvalidMessageType = errors.New("cannot decode invalid message type") + // ErrMinVotesNotMet is returned when the number of votes is less than the required minimum in a Justification + ErrMinVotesNotMet = errors.New("minimum number of votes not met in a Justification") -// ErrNotFinalizationMessage is returned when calling GetFinalizedHash on a message that isn't a FinalizationMessage -var ErrNotFinalizationMessage = errors.New("cannot get finalized hash from VoteMessage") + // ErrInvalidCatchUpRound is returned when a catch-up message is received with an invalid round + ErrInvalidCatchUpRound = errors.New("catch up request is for future round") -// ErrNoJustification is returned when no justification can be found for a block, ie. it has not been finalized -var ErrNoJustification = errors.New("no justification found for block") + // ErrInvalidCatchUpResponseRound is returned when a catch-up response is received with an invalid round + ErrInvalidCatchUpResponseRound = errors.New("catch up response is not for previous round") -// ErrMinVotesNotMet is returned when the number of votes is less than the required minimum in a Justification -var ErrMinVotesNotMet = errors.New("minimum number of votes not met in a Justification") + // ErrGHOSTlessCatchUp is returned when a catch up response does not contain a valid grandpa-GHOST (ie. finalised block) + ErrGHOSTlessCatchUp = errors.New("catch up response does not contain grandpa-GHOST") -// ErrInvalidCatchUpRound is returned when a catch-up message is received with an invalid round -var ErrInvalidCatchUpRound = errors.New("catch up request is for future round") + // ErrCatchUpResponseNotCompletable is returned when the round represented by the catch up response is not completable + ErrCatchUpResponseNotCompletable = errors.New("catch up response is not completable") -// ErrInvalidCatchUpResponseRound is returned when a catch-up response is received with an invalid round -var ErrInvalidCatchUpResponseRound = errors.New("catch up response is not for previous round") + // ErrServicePaused is returned if the service is paused and waiting for catch up messages + ErrServicePaused = errors.New("service is paused") -// ErrGHOSTlessCatchUp is returned when a catch up response does not contain a valid grandpa-GHOST (ie. finalized block) -var ErrGHOSTlessCatchUp = errors.New("catch up response does not contain grandpa-GHOST") + // ErrPrecommitSignatureMismatch is returned when the number of precommits and signatures in a CommitMessage do not match + ErrPrecommitSignatureMismatch = errors.New("number of precommits does not match number of signatures") -// ErrCatchUpResponseNotCompletable is returned when the round represented by the catch up response is not completable -var ErrCatchUpResponseNotCompletable = errors.New("catch up response is not completable") + // ErrJustificationHashMismatch is returned when a precommit hash within a justification does not match the justification hash + ErrJustificationHashMismatch = errors.New("precommit hash does not match justification hash") -// ErrServicePaused is returned if the service is paused and waiting for catch up messages -var ErrServicePaused = errors.New("service is paused") + // ErrJustificationNumberMismatch is returned when a precommit number within a justification does not match the justification number + ErrJustificationNumberMismatch = errors.New("precommit number does not match justification number") + + // ErrAuthorityNotInSet is returned when a precommit within a justification is signed by a key not in the authority set + ErrAuthorityNotInSet = errors.New("authority is not in set") +) diff --git a/lib/grandpa/grandpa.go b/lib/grandpa/grandpa.go index cd7794bdd6..436cb32ad7 100644 --- a/lib/grandpa/grandpa.go +++ b/lib/grandpa/grandpa.go @@ -34,15 +34,18 @@ import ( log "github.com/ChainSafe/log15" ) -var interval = time.Second +var ( + interval = time.Second // TODO: make this configurable; currently 1s is same as substrate; total round length is then 2s + logger = log.New("pkg", "grandpa") +) // Service represents the current state of the grandpa protocol type Service struct { // preliminaries - logger log.Logger ctx context.Context cancel context.CancelFunc blockState BlockState + grandpaState GrandpaState digestHandler DigestHandler keypair *ed25519.Keypair // TODO: change to grandpa keystore mapLock sync.Mutex @@ -58,31 +61,33 @@ type Service struct { state *State // current state prevotes map[ed25519.PublicKeyBytes]*Vote // pre-votes for the current round precommits map[ed25519.PublicKeyBytes]*Vote // pre-commits for the current round - pvJustifications map[common.Hash][]*Justification // pre-vote justifications for the current round - pcJustifications map[common.Hash][]*Justification // pre-commit justifications for the current round + pvJustifications map[common.Hash][]*SignedPrecommit // pre-vote justifications for the current round + pcJustifications map[common.Hash][]*SignedPrecommit // pre-commit justifications for the current round pvEquivocations map[ed25519.PublicKeyBytes][]*Vote // equivocatory votes for current pre-vote stage pcEquivocations map[ed25519.PublicKeyBytes][]*Vote // equivocatory votes for current pre-commit stage tracker *tracker // tracker of vote messages we may need in the future - head *types.Header // most recently finalized block - nextAuthorities []*Voter // if not nil, the updated authorities for the next round + head *types.Header // most recently finalised block // historical information - preVotedBlock map[uint64]*Vote // map of round number -> pre-voted block - bestFinalCandidate map[uint64]*Vote // map of round number -> best final candidate - justification map[uint64][]*Justification // map of round number -> precommit round justification + preVotedBlock map[uint64]*Vote // map of round number -> pre-voted block + bestFinalCandidate map[uint64]*Vote // map of round number -> best final candidate + justification map[uint64][]*SignedPrecommit // map of round number -> precommit round justification // channels for communication with other services - in chan GrandpaMessage // only used to receive *VoteMessage + in chan GrandpaMessage // only used to receive *VoteMessage + finalisedCh chan *types.FinalisationInfo + finalisedChID byte + neighbourMessage *NeighbourMessage // cached neighbour message } // Config represents a GRANDPA service configuration type Config struct { LogLvl log.Lvl BlockState BlockState + GrandpaState GrandpaState DigestHandler DigestHandler Network Network Voters []*Voter - SetID uint64 Keypair *ed25519.Keypair Authority bool } @@ -93,6 +98,10 @@ func NewService(cfg *Config) (*Service, error) { return nil, ErrNilBlockState } + if cfg.GrandpaState == nil { + return nil, ErrNilGrandpaState + } + if cfg.DigestHandler == nil { return nil, ErrNilDigestHandler } @@ -105,7 +114,6 @@ func NewService(cfg *Config) (*Service, error) { return nil, ErrNilNetwork } - logger := log.New("pkg", "grandpa") h := log.StreamHandler(os.Stdout, log.TerminalFormat()) h = log.CallerFileHandler(h) logger.SetHandler(log.LvlFilterHandler(cfg.LogLvl, h)) @@ -117,36 +125,48 @@ func NewService(cfg *Config) (*Service, error) { logger.Debug("creating service", "authority", cfg.Authority, "key", pub, "voter set", Voters(cfg.Voters)) - // get latest finalized header + // get latest finalised header head, err := cfg.BlockState.GetFinalizedHeader(0, 0) if err != nil { return nil, err } - ctx, cancel := context.WithCancel(context.Background()) + setID, err := cfg.GrandpaState.GetCurrentSetID() + if err != nil { + return nil, err + } + + finalisedCh := make(chan *types.FinalisationInfo, 16) + fid, err := cfg.BlockState.RegisterFinalizedChannel(finalisedCh) + if err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(context.Background()) s := &Service{ - logger: logger, ctx: ctx, cancel: cancel, - state: NewState(cfg.Voters, cfg.SetID, 0), // TODO: determine current round + state: NewState(cfg.Voters, setID, 0), // TODO: determine current round blockState: cfg.BlockState, + grandpaState: cfg.GrandpaState, digestHandler: cfg.DigestHandler, keypair: cfg.Keypair, authority: cfg.Authority, prevotes: make(map[ed25519.PublicKeyBytes]*Vote), precommits: make(map[ed25519.PublicKeyBytes]*Vote), - pvJustifications: make(map[common.Hash][]*Justification), - pcJustifications: make(map[common.Hash][]*Justification), + pvJustifications: make(map[common.Hash][]*SignedPrecommit), + pcJustifications: make(map[common.Hash][]*SignedPrecommit), pvEquivocations: make(map[ed25519.PublicKeyBytes][]*Vote), pcEquivocations: make(map[ed25519.PublicKeyBytes][]*Vote), preVotedBlock: make(map[uint64]*Vote), bestFinalCandidate: make(map[uint64]*Vote), - justification: make(map[uint64][]*Justification), + justification: make(map[uint64][]*SignedPrecommit), head: head, in: make(chan GrandpaMessage, 128), resumed: make(chan struct{}), network: cfg.Network, + finalisedCh: finalisedCh, + finalisedChID: fid, } s.messageHandler = NewMessageHandler(s, s.blockState) @@ -163,13 +183,20 @@ func (s *Service) Start() error { return err } + // if we're not an authority, we don't need to worry about the voting process. + // the grandpa service is only used to verify incoming block justifications + if !s.authority { + return nil + } + go func() { err := s.initiate() if err != nil { - s.logger.Error("failed to initiate", "error", err) + logger.Error("failed to initiate", "error", err) } }() + go s.sendNeighbourMessage() return nil } @@ -180,6 +207,9 @@ func (s *Service) Stop() error { s.cancel() + s.blockState.UnregisterFinalizedChannel(s.finalisedChID) + close(s.finalisedCh) + if !s.authority { return nil } @@ -188,42 +218,40 @@ func (s *Service) Stop() error { return nil } -// Authorities returns the current grandpa authorities -func (s *Service) Authorities() []*types.Authority { +// authorities returns the current grandpa authorities +func (s *Service) authorities() []*types.Authority { ad := make([]*types.Authority, len(s.state.voters)) for i, v := range s.state.voters { ad[i] = &types.Authority{ - Key: v.key, - Weight: v.id, + Key: v.Key, + Weight: v.ID, } } return ad } -// UpdateAuthorities schedules an update to the grandpa voter set and increments the setID at the end of the current round -func (s *Service) UpdateAuthorities(ad []*types.Authority) { - v := make([]*Voter, len(ad)) - for i, a := range ad { - if pk, ok := a.Key.(*ed25519.PublicKey); ok { - v[i] = &Voter{ - key: pk, - id: a.Weight, - } - } +// updateAuthorities updates the grandpa voter set, increments the setID, and resets the round numbers +func (s *Service) updateAuthorities() error { + currSetID, err := s.grandpaState.GetCurrentSetID() + if err != nil { + return err } - s.nextAuthorities = v -} + // set ID hasn't changed, do nothing + if currSetID == s.state.setID { + return nil + } -// updateAuthorities updates the grandpa voter set, increments the setID, and resets the round numbers -func (s *Service) updateAuthorities() { - if s.nextAuthorities != nil { - s.state.voters = s.nextAuthorities - s.state.setID++ - s.state.round = 0 - s.nextAuthorities = nil + nextAuthorities, err := s.grandpaState.GetAuthorities(currSetID) + if err != nil { + return err } + + s.state.voters = nextAuthorities + s.state.setID = currSetID + s.state.round = 1 // round resets to 1 after a set ID change + return nil } func (s *Service) publicKeyBytes() ed25519.PublicKeyBytes { @@ -233,7 +261,10 @@ func (s *Service) publicKeyBytes() ed25519.PublicKeyBytes { // initiate initates a GRANDPA round func (s *Service) initiate() error { // if there is an authority change, execute it - s.updateAuthorities() + err := s.updateAuthorities() + if err != nil { + return err + } if s.state.round == 0 { s.chanLock.Lock() @@ -247,27 +278,23 @@ func (s *Service) initiate() error { // make sure no votes can be validated while we are incrementing rounds s.roundLock.Lock() s.state.round++ - s.logger.Trace("incrementing grandpa round", "next round", s.state.round) + logger.Trace("incrementing grandpa round", "next round", s.state.round) if s.tracker != nil { s.tracker.stop() } - if s.authority { - var err error - s.prevotes = make(map[ed25519.PublicKeyBytes]*Vote) - s.precommits = make(map[ed25519.PublicKeyBytes]*Vote) - s.pcJustifications = make(map[common.Hash][]*Justification) - s.pvEquivocations = make(map[ed25519.PublicKeyBytes][]*Vote) - s.pcEquivocations = make(map[ed25519.PublicKeyBytes][]*Vote) - s.justification = make(map[uint64][]*Justification) - - s.tracker, err = newTracker(s.blockState, s.in) - if err != nil { - return err - } - s.tracker.start() - s.logger.Trace("started message tracker") + s.prevotes = make(map[ed25519.PublicKeyBytes]*Vote) + s.precommits = make(map[ed25519.PublicKeyBytes]*Vote) + s.pcJustifications = make(map[common.Hash][]*SignedPrecommit) + s.pvEquivocations = make(map[ed25519.PublicKeyBytes][]*Vote) + s.pcEquivocations = make(map[ed25519.PublicKeyBytes][]*Vote) + s.justification = make(map[uint64][]*SignedPrecommit) + s.tracker, err = newTracker(s.blockState, s.in) + if err != nil { + return err } + s.tracker.start() + logger.Trace("started message tracker") s.roundLock.Unlock() // don't begin grandpa until we are at block 1 @@ -284,23 +311,15 @@ func (s *Service) initiate() error { } for { - if s.authority { - err = s.playGrandpaRound() - if err == ErrServicePaused { - // wait for service to un-pause - <-s.resumed - err = s.initiate() - } + err = s.playGrandpaRound() + if err == ErrServicePaused { + // wait for service to un-pause + <-s.resumed + err = s.initiate() + } - if err != nil { - return err - } - } else { - // if not a grandpa authority, wait for a block to be finalized in the current round - err = s.waitForFinalizedBlock() - if err != nil { - return err - } + if err != nil { + return err } if s.ctx.Err() != nil { @@ -314,36 +333,6 @@ func (s *Service) initiate() error { } } -func (s *Service) waitForFinalizedBlock() error { - ch := make(chan *types.Header) - id, err := s.blockState.RegisterFinalizedChannel(ch) - if err != nil { - return err - } - - defer s.blockState.UnregisterFinalizedChannel(id) - - for { - done := false - - select { - case header := <-ch: - if header != nil && header.Number.Int64() >= s.head.Number.Int64() { - s.head = header - done = true - } - case <-s.ctx.Done(): - return nil - } - - if done { - break - } - } - - return nil -} - func (s *Service) waitForFirstBlock() error { ch := make(chan *types.Block) id, err := s.blockState.RegisterImportedChannel(ch) @@ -375,9 +364,9 @@ func (s *Service) waitForFirstBlock() error { } // playGrandpaRound executes a round of GRANDPA -// at the end of this round, a block will be finalized. +// at the end of this round, a block will be finalised. func (s *Service) playGrandpaRound() error { - s.logger.Debug("starting round", "round", s.state.round, "setID", s.state.setID) + logger.Debug("starting round", "round", s.state.round, "setID", s.state.setID) // save start time start := time.Now() @@ -386,16 +375,31 @@ func (s *Service) playGrandpaRound() error { primary := s.derivePrimary() // if primary, broadcast the best final candidate from the previous round - if bytes.Equal(primary.key.Encode(), s.keypair.Public().Encode()) { - msg, err := s.newFinalizationMessage(s.head, s.state.round-1).ToConsensusMessage() + if bytes.Equal(primary.Key.Encode(), s.keypair.Public().Encode()) { + msg, err := s.newCommitMessage(s.head, s.state.round-1).ToConsensusMessage() if err != nil { - s.logger.Error("failed to encode finalization message", "error", err) + logger.Error("failed to encode finalisation message", "error", err) } else { s.network.SendMessage(msg) } + + primProposal, err := s.createVoteMessage(&Vote{ + hash: s.head.Hash(), + number: uint32(s.head.Number.Int64()), + }, primaryProposal, s.keypair) + if err != nil { + logger.Error("failed to create primary proposal message", "error", err) + } else { + msg, err = primProposal.ToConsensusMessage() + if err != nil { + logger.Error("failed to encode finalisation message", "error", err) + } else { + s.network.SendMessage(msg) + } + } } - s.logger.Debug("receiving pre-vote messages...") + logger.Debug("receiving pre-vote messages...") go s.receiveMessages(func() bool { if s.paused.Load().(bool) { @@ -428,33 +432,33 @@ func (s *Service) playGrandpaRound() error { s.mapLock.Lock() s.prevotes[s.publicKeyBytes()] = pv - s.logger.Debug("sending pre-vote message...", "vote", pv, "prevotes", s.prevotes) + logger.Debug("sending pre-vote message...", "vote", pv, "prevotes", s.prevotes) s.mapLock.Unlock() - finalized := false + finalised := false // continue to send prevote messages until round is done - go func(finalized *bool) { + go func(finalised *bool) { for { if s.paused.Load().(bool) { return } - if *finalized { + if *finalised { return } err = s.sendMessage(pv, prevote) if err != nil { - s.logger.Error("could not send prevote message", "error", err) + logger.Error("could not send prevote message", "error", err) } time.Sleep(time.Second * 5) - s.logger.Trace("sent pre-vote message...", "vote", pv, "prevotes", s.prevotes) + logger.Trace("sent pre-vote message...", "vote", pv, "prevotes", s.prevotes) } - }(&finalized) + }(&finalised) - s.logger.Debug("receiving pre-commit messages...") + logger.Debug("receiving pre-commit messages...") go s.receiveMessages(func() bool { end := start.Add(interval * 4) @@ -483,33 +487,33 @@ func (s *Service) playGrandpaRound() error { s.mapLock.Lock() s.precommits[s.publicKeyBytes()] = pc - s.logger.Debug("sending pre-commit message...", "vote", pc, "precommits", s.precommits) + logger.Debug("sending pre-commit message...", "vote", pc, "precommits", s.precommits) s.mapLock.Unlock() // continue to send precommit messages until round is done - go func(finalized *bool) { + go func(finalised *bool) { for { if s.paused.Load().(bool) { return } - if *finalized { + if *finalised { return } err = s.sendMessage(pc, precommit) if err != nil { - s.logger.Error("could not send precommit message", "error", err) + logger.Error("could not send precommit message", "error", err) } time.Sleep(time.Second * 5) - s.logger.Trace("sent pre-commit message...", "vote", pc, "precommits", s.precommits) + logger.Trace("sent pre-commit message...", "vote", pc, "precommits", s.precommits) } - }(&finalized) + }(&finalised) go func() { - // receive messages until current round is completable and previous round is finalizable - // and the last finalized block is greater than the best final candidate from the previous round + // receive messages until current round is completable and previous round is finalisable + // and the last finalised block is greater than the best final candidate from the previous round s.receiveMessages(func() bool { if s.paused.Load().(bool) { return true @@ -521,7 +525,7 @@ func (s *Service) playGrandpaRound() error { } round := s.state.round - finalizable, err := s.isFinalizable(round) + finalisable, err := s.isFinalisable(round) if err != nil { return false } @@ -535,7 +539,7 @@ func (s *Service) playGrandpaRound() error { return false } - if completable && finalizable && uint64(s.head.Number.Int64()) >= prevBfc.number { + if completable && finalisable && uint32(s.head.Number.Int64()) >= prevBfc.number { return true } @@ -545,15 +549,15 @@ func (s *Service) playGrandpaRound() error { err = s.attemptToFinalize() if err != nil { - log.Error("failed to finalize", "error", err) + log.Error("failed to finalise", "error", err) return err } - finalized = true + finalised = true return nil } -// attemptToFinalize loops until the round is finalizable +// attemptToFinalize loops until the round is finalisable func (s *Service) attemptToFinalize() error { if s.paused.Load().(bool) { return ErrServicePaused @@ -561,7 +565,7 @@ func (s *Service) attemptToFinalize() error { has, _ := s.blockState.HasFinalizedBlock(s.state.round, s.state.setID) if has { - return nil // a block was finalized, seems like we missed some messages + return nil // a block was finalised, seems like we missed some messages } bfc, err := s.getBestFinalCandidate() @@ -574,17 +578,17 @@ func (s *Service) attemptToFinalize() error { return err } - if bfc.number >= uint64(s.head.Number.Int64()) && pc >= s.state.threshold() { - err = s.finalize() + if bfc.number >= uint32(s.head.Number.Int64()) && pc >= s.state.threshold() { + err = s.finalise() if err != nil { return err } - // if we haven't received a finalization message for this block yet, broadcast a finalization message + // if we haven't received a finalisation message for this block yet, broadcast a finalisation message votes := s.getDirectVotes(precommit) - s.logger.Debug("finalized block!!!", "setID", s.state.setID, "round", s.state.round, "hash", s.head.Hash(), + logger.Debug("finalised block!!!", "setID", s.state.setID, "round", s.state.round, "hash", s.head.Hash(), "precommits #", pc, "votes for bfc #", votes[*bfc], "total votes for bfc", pc, "precommits", s.precommits) - msg, err := s.newFinalizationMessage(s.head, s.state.round).ToConsensusMessage() + msg, err := s.newCommitMessage(s.head, s.state.round).ToConsensusMessage() if err != nil { return err } @@ -608,7 +612,7 @@ func (s *Service) determinePreVote() (*Vote, error) { prm := s.prevotes[s.derivePrimary().PublicKeyBytes()] s.mapLock.Unlock() - if prm != nil && prm.number >= uint64(s.head.Number.Int64()) { + if prm != nil && prm.number >= uint32(s.head.Number.Int64()) { vote = prm } else { header, err := s.blockState.BestBlockHeader() @@ -620,7 +624,7 @@ func (s *Service) determinePreVote() (*Vote, error) { } nextChange := s.digestHandler.NextGrandpaAuthorityChange() - if vote.number > nextChange { + if uint64(vote.number) > nextChange { header, err := s.blockState.GetHeaderByNumber(big.NewInt(int64(nextChange))) if err != nil { return nil, err @@ -645,7 +649,7 @@ func (s *Service) determinePreCommit() (*Vote, error) { s.mapLock.Unlock() nextChange := s.digestHandler.NextGrandpaAuthorityChange() - if pvb.number > nextChange { + if uint64(pvb.number) > nextChange { header, err := s.blockState.GetHeaderByNumber(big.NewInt(int64(nextChange))) if err != nil { return nil, err @@ -657,8 +661,8 @@ func (s *Service) determinePreCommit() (*Vote, error) { return &pvb, nil } -// isFinalizable returns true is the round is finalizable, false otherwise. -func (s *Service) isFinalizable(round uint64) (bool, error) { +// isFinalisable returns true is the round is finalisable, false otherwise. +func (s *Service) isFinalisable(round uint64) (bool, error) { var pvb Vote var err error @@ -704,8 +708,8 @@ func (s *Service) isFinalizable(round uint64) (bool, error) { return false, nil } -// finalize finalizes the round by setting the best final candidate for this round -func (s *Service) finalize() error { +// finalise finalises the round by setting the best final candidate for this round +func (s *Service) finalise() error { // get best final candidate bfc, err := s.getBestFinalCandidate() if err != nil { @@ -728,12 +732,12 @@ func (s *Service) finalize() error { // set justification s.justification[s.state.round] = s.pcJustifications[bfc.hash] - pvj, err := newFullJustification(s.pvJustifications[bfc.hash]).Encode() + pvj, err := newJustification(s.state.round, bfc.hash, bfc.number, s.pvJustifications[bfc.hash]).Encode() if err != nil { return err } - pcj, err := newFullJustification(s.pcJustifications[bfc.hash]).Encode() + pcj, err := newJustification(s.state.round, bfc.hash, bfc.number, s.pcJustifications[bfc.hash]).Encode() if err != nil { return err } @@ -748,13 +752,13 @@ func (s *Service) finalize() error { return err } - // set finalized head for round in db + // set finalised head for round in db err = s.blockState.SetFinalizedHash(bfc.hash, s.state.round, s.state.setID) if err != nil { return err } - // set latest finalized head in db + // set latest finalised head in db return s.blockState.SetFinalizedHash(bfc.hash, 0, 0) } @@ -897,7 +901,7 @@ func (s *Service) getPreVotedBlock() (Vote, error) { // if there are multiple, find the one with the highest number and return it highest := Vote{ - number: uint64(0), + number: uint32(0), } for h, n := range blocks { if n > highest.number { @@ -916,7 +920,7 @@ func (s *Service) getPreVotedBlock() (Vote, error) { func (s *Service) getGrandpaGHOST() (Vote, error) { threshold := s.state.threshold() - var blocks map[common.Hash]uint64 + var blocks map[common.Hash]uint32 var err error for { @@ -937,7 +941,7 @@ func (s *Service) getGrandpaGHOST() (Vote, error) { // if there are multiple, find the one with the highest number and return it highest := Vote{ - number: uint64(0), + number: uint32(0), } for h, n := range blocks { if n > highest.number { @@ -957,10 +961,10 @@ func (s *Service) getGrandpaGHOST() (Vote, error) { // thus, if there are no blocks with >=threshold total votes, but the sum of votes for blocks A and B is >=threshold, then this function returns // the first common ancestor of A and B. // in general, this function will return the highest block on each chain with >=threshold votes. -func (s *Service) getPossibleSelectedBlocks(stage subround, threshold uint64) (map[common.Hash]uint64, error) { +func (s *Service) getPossibleSelectedBlocks(stage subround, threshold uint64) (map[common.Hash]uint32, error) { // get blocks that were directly voted for votes := s.getDirectVotes(stage) - blocks := make(map[common.Hash]uint64) + blocks := make(map[common.Hash]uint32) // check if any of them have >=threshold votes for v := range votes { @@ -996,7 +1000,7 @@ func (s *Service) getPossibleSelectedBlocks(stage subround, threshold uint64) (m // getPossibleSelectedAncestors recursively searches for ancestors with >=2/3 votes // it returns a map of block hash -> number, such that the blocks in the map have >=2/3 votes -func (s *Service) getPossibleSelectedAncestors(votes []Vote, curr common.Hash, selected map[common.Hash]uint64, stage subround, threshold uint64) (map[common.Hash]uint64, error) { +func (s *Service) getPossibleSelectedAncestors(votes []Vote, curr common.Hash, selected map[common.Hash]uint32, stage subround, threshold uint64) (map[common.Hash]uint32, error) { for _, v := range votes { if v.hash == curr { continue @@ -1026,7 +1030,7 @@ func (s *Service) getPossibleSelectedAncestors(votes []Vote, curr common.Hash, s return nil, err } - selected[pred] = uint64(h.Number.Int64()) + selected[pred] = uint32(h.Number.Int64()) } else { selected, err = s.getPossibleSelectedAncestors(votes, pred, selected, stage, threshold) if err != nil { @@ -1123,7 +1127,7 @@ func (s *Service) getVotes(stage subround) []Vote { } // findParentWithNumber returns a Vote for an ancestor with number n given an existing Vote -func (s *Service) findParentWithNumber(v *Vote, n uint64) (*Vote, error) { +func (s *Service) findParentWithNumber(v *Vote, n uint32) (*Vote, error) { if v.number <= n { return v, nil } diff --git a/lib/grandpa/grandpa_test.go b/lib/grandpa/grandpa_test.go index e1eaebfc39..6d79c53ce8 100644 --- a/lib/grandpa/grandpa_test.go +++ b/lib/grandpa/grandpa_test.go @@ -41,7 +41,10 @@ var testGenesisHeader = &types.Header{ StateRoot: trie.EmptyHash, } -var kr, _ = keystore.NewEd25519Keyring() +var ( + kr, _ = keystore.NewEd25519Keyring() + voters = newTestVoters() +) type mockDigestHandler struct{} @@ -64,30 +67,34 @@ func newTestState(t *testing.T) *state.Service { block, err := state.NewBlockStateFromGenesis(db, testGenesisHeader) require.NoError(t, err) + grandpa, err := state.NewGrandpaStateFromGenesis(db, voters) + require.NoError(t, err) + return &state.Service{ - Block: block, + Block: block, + Grandpa: grandpa, } } func newTestVoters() []*Voter { - voters := []*Voter{} + vs := []*Voter{} for i, k := range kr.Keys { - voters = append(voters, &Voter{ - key: k.Public().(*ed25519.PublicKey), - id: uint64(i), + vs = append(vs, &Voter{ + Key: k.Public().(*ed25519.PublicKey), + ID: uint64(i), }) } - return voters + return vs } func newTestService(t *testing.T) (*Service, *state.Service) { st := newTestState(t) - voters := newTestVoters() net := newTestNetwork(t) cfg := &Config{ BlockState: st.Block, + GrandpaState: st.Grandpa, DigestHandler: &mockDigestHandler{}, Voters: voters, Keypair: kr.Alice().(*ed25519.Keypair), @@ -97,31 +104,30 @@ func newTestService(t *testing.T) (*Service, *state.Service) { gs, err := NewService(cfg) require.NoError(t, err) - return gs, st } func TestUpdateAuthorities(t *testing.T) { gs, _ := newTestService(t) - gs.UpdateAuthorities([]*types.Authority{ - {Key: kr.Alice().Public().(*ed25519.PublicKey), Weight: 0}, - }) - - err := gs.Start() + err := gs.updateAuthorities() require.NoError(t, err) + require.Equal(t, uint64(0), gs.state.setID) - time.Sleep(time.Second) - require.Equal(t, uint64(1), gs.state.setID) - require.Equal(t, []*Voter{ - {key: kr.Alice().Public().(*ed25519.PublicKey), id: 0}, - }, gs.state.voters) + next := []*Voter{ + {Key: kr.Alice().Public().(*ed25519.PublicKey), ID: 0}, + } - gs.UpdateAuthorities([]*types.Authority{ - {Key: kr.Alice().Public().(*ed25519.PublicKey), Weight: 0}, - }) + err = gs.grandpaState.(*state.GrandpaState).SetNextChange(next, big.NewInt(1)) + require.NoError(t, err) - err = gs.Stop() + err = gs.grandpaState.(*state.GrandpaState).IncrementSetID() require.NoError(t, err) + + err = gs.updateAuthorities() + require.NoError(t, err) + + require.Equal(t, uint64(1), gs.state.setID) + require.Equal(t, next, gs.state.voters) } func TestGetDirectVotes(t *testing.T) { @@ -263,8 +269,8 @@ func TestGetPossibleSelectedAncestors_SameAncestor(t *testing.T) { } votes := gs.getVotes(prevote) - prevoted := make(map[common.Hash]uint64) - var blocks map[common.Hash]uint64 + prevoted := make(map[common.Hash]uint32) + var blocks map[common.Hash]uint32 for _, curr := range leaves { blocks, err = gs.getPossibleSelectedAncestors(votes, curr, prevoted, prevote, gs.state.threshold()) @@ -277,7 +283,7 @@ func TestGetPossibleSelectedAncestors_SameAncestor(t *testing.T) { // this should return the highest common ancestor of (a, b, c) with >=2/3 votes, // which is the node at depth 6. require.Equal(t, 1, len(blocks)) - require.Equal(t, uint64(6), blocks[expected]) + require.Equal(t, uint32(6), blocks[expected]) } func TestGetPossibleSelectedAncestors_VaryingAncestor(t *testing.T) { @@ -313,8 +319,8 @@ func TestGetPossibleSelectedAncestors_VaryingAncestor(t *testing.T) { } votes := gs.getVotes(prevote) - prevoted := make(map[common.Hash]uint64) - var blocks map[common.Hash]uint64 + prevoted := make(map[common.Hash]uint32) + var blocks map[common.Hash]uint32 for _, curr := range leaves { blocks, err = gs.getPossibleSelectedAncestors(votes, curr, prevoted, prevote, gs.state.threshold()) @@ -330,8 +336,8 @@ func TestGetPossibleSelectedAncestors_VaryingAncestor(t *testing.T) { // this should return the highest common ancestor of (a, b) and (b, c) with >=2/3 votes, // which are the nodes at depth 6 and 7. require.Equal(t, 2, len(blocks)) - require.Equal(t, uint64(6), blocks[expectedAt6]) - require.Equal(t, uint64(7), blocks[expectedAt7]) + require.Equal(t, uint32(6), blocks[expectedAt6]) + require.Equal(t, uint32(7), blocks[expectedAt7]) } func TestGetPossibleSelectedAncestors_VaryingAncestor_MoreBranches(t *testing.T) { @@ -373,8 +379,8 @@ func TestGetPossibleSelectedAncestors_VaryingAncestor_MoreBranches(t *testing.T) } votes := gs.getVotes(prevote) - prevoted := make(map[common.Hash]uint64) - var blocks map[common.Hash]uint64 + prevoted := make(map[common.Hash]uint32) + var blocks map[common.Hash]uint32 for _, curr := range leaves { blocks, err = gs.getPossibleSelectedAncestors(votes, curr, prevoted, prevote, gs.state.threshold()) @@ -390,8 +396,8 @@ func TestGetPossibleSelectedAncestors_VaryingAncestor_MoreBranches(t *testing.T) // this should return the highest common ancestor of (a, b) and (b, c) with >=2/3 votes, // which are the nodes at depth 6 and 7. require.Equal(t, 2, len(blocks)) - require.Equal(t, uint64(6), blocks[expectedAt6]) - require.Equal(t, uint64(7), blocks[expectedAt7]) + require.Equal(t, uint32(6), blocks[expectedAt6]) + require.Equal(t, uint32(7), blocks[expectedAt7]) } func TestGetPossibleSelectedBlocks_OneBlock(t *testing.T) { @@ -462,7 +468,7 @@ func TestGetPossibleSelectedBlocks_EqualVotes_SameAncestor(t *testing.T) { // this should return the highest common ancestor of (a, b, c) require.Equal(t, 1, len(blocks)) - require.Equal(t, uint64(6), blocks[expected]) + require.Equal(t, uint32(6), blocks[expected]) } func TestGetPossibleSelectedBlocks_EqualVotes_VaryingAncestor(t *testing.T) { @@ -509,8 +515,8 @@ func TestGetPossibleSelectedBlocks_EqualVotes_VaryingAncestor(t *testing.T) { // this should return the highest common ancestor of (a, b) and (b, c) with >=2/3 votes, // which are the nodes at depth 6 and 7. require.Equal(t, 2, len(blocks)) - require.Equal(t, uint64(6), blocks[expectedAt6]) - require.Equal(t, uint64(7), blocks[expectedAt7]) + require.Equal(t, uint32(6), blocks[expectedAt6]) + require.Equal(t, uint32(7), blocks[expectedAt7]) } func TestGetPossibleSelectedBlocks_OneThirdEquivocating(t *testing.T) { @@ -651,7 +657,7 @@ func TestGetPreVotedBlock_MultipleCandidates(t *testing.T) { block, err := gs.getPreVotedBlock() require.NoError(t, err) require.Equal(t, expected, block.hash) - require.Equal(t, uint64(7), block.number) + require.Equal(t, uint32(7), block.number) } func TestGetPreVotedBlock_EvenMoreCandidates(t *testing.T) { @@ -714,7 +720,7 @@ func TestGetPreVotedBlock_EvenMoreCandidates(t *testing.T) { block, err := gs.getPreVotedBlock() require.NoError(t, err) require.Equal(t, expected, block.hash) - require.Equal(t, uint64(5), block.number) + require.Equal(t, uint32(5), block.number) } func TestIsCompletable(t *testing.T) { @@ -945,7 +951,7 @@ func TestDeterminePreVote_WithInvalidPrimaryPreVote(t *testing.T) { require.Equal(t, gs.head.Hash(), pv.hash) } -func TestIsFinalizable_True(t *testing.T) { +func TestIsFinalisable_True(t *testing.T) { gs, st := newTestService(t) branches := make(map[int]int) @@ -970,12 +976,12 @@ func TestIsFinalizable_True(t *testing.T) { } } - finalizable, err := gs.isFinalizable(gs.state.round) + finalisable, err := gs.isFinalisable(gs.state.round) require.NoError(t, err) - require.True(t, finalizable) + require.True(t, finalisable) } -func TestIsFinalizable_False(t *testing.T) { +func TestIsFinalisable_False(t *testing.T) { gs, st := newTestService(t) branches := make(map[int]int) @@ -1000,16 +1006,16 @@ func TestIsFinalizable_False(t *testing.T) { } } - // previous round has finalized block # higher than current, so round is not finalizable + // previous round has finalised block # higher than current, so round is not finalisable gs.state.round = 1 gs.bestFinalCandidate[0] = &Vote{ number: 4, } gs.preVotedBlock[gs.state.round] = voteA - finalizable, err := gs.isFinalizable(gs.state.round) + finalisable, err := gs.isFinalisable(gs.state.round) require.NoError(t, err) - require.False(t, finalizable) + require.False(t, finalisable) } func TestGetGrandpaGHOST_CommonAncestor(t *testing.T) { @@ -1083,7 +1089,7 @@ func TestGetGrandpaGHOST_MultipleCandidates(t *testing.T) { block, err := gs.getGrandpaGHOST() require.NoError(t, err) require.Equal(t, expected, block.hash) - require.Equal(t, uint64(3), block.number) + require.Equal(t, uint32(3), block.number) pv, err := gs.getPreVotedBlock() require.NoError(t, err) diff --git a/lib/grandpa/message.go b/lib/grandpa/message.go index 55ceb9c255..0a4f88e521 100644 --- a/lib/grandpa/message.go +++ b/lib/grandpa/message.go @@ -19,6 +19,8 @@ package grandpa import ( "bytes" "fmt" + "io" + "math/big" "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/types" @@ -28,6 +30,7 @@ import ( ) // GrandpaMessage is implemented by all GRANDPA network messages +// TODO: the fields can be un-exported, as can all the message implementations type GrandpaMessage interface { //nolint ToConsensusMessage() (*network.ConsensusMessage, error) Type() byte @@ -35,8 +38,8 @@ type GrandpaMessage interface { //nolint var ( voteType byte = 0 - precommitType byte = 1 - finalizationType byte = 2 + commitType byte = 1 + neighbourType byte = 2 catchUpRequestType byte = 3 catchUpResponseType byte = 4 ) @@ -52,8 +55,9 @@ type FullVote struct { // SignedMessage represents a block hash and number signed by an authority type SignedMessage struct { + Stage subround // 0 for pre-vote, 1 for pre-commit, 2 for primary proposal Hash common.Hash - Number uint64 + Number uint32 Signature [64]byte // ed25519.SignatureLength AuthorityID ed25519.PublicKeyBytes } @@ -63,18 +67,65 @@ func (m *SignedMessage) String() string { return fmt.Sprintf("hash=%s number=%d authorityID=0x%x", m.Hash, m.Number, m.AuthorityID) } +// Decode SCALE decodes the data into a SignedMessage +func (m *SignedMessage) Decode(r io.Reader) (err error) { + m.Stage, err = subround(0).Decode(r) + if err != nil { + return err + } + + vote, err := new(Vote).Decode(r) + if err != nil { + return err + } + + m.Hash = vote.hash + m.Number = vote.number + + sig, err := common.Read64Bytes(r) + if err != nil { + return err + } + + copy(m.Signature[:], sig[:]) + + id, err := common.Read32Bytes(r) + if err != nil { + return err + } + + copy(m.AuthorityID[:], id[:]) + return nil +} + // VoteMessage represents a network-level vote message // https://github.com/paritytech/substrate/blob/master/client/finality-grandpa/src/communication/gossip.rs#L336 type VoteMessage struct { Round uint64 SetID uint64 - Stage subround // 0 for pre-vote, 1 for pre-commit Message *SignedMessage } -// Type returns voteType or precommitType +// Decode SCALE decodes the data into a VoteMessage +func (v *VoteMessage) Decode(r io.Reader) (err error) { + v.Round, err = common.ReadUint64(r) + if err != nil { + return err + } + + v.SetID, err = common.ReadUint64(r) + if err != nil { + return err + } + + v.Message = new(SignedMessage) + err = v.Message.Decode(r) + return err +} + +// Type returns voteType func (v *VoteMessage) Type() byte { - return byte(v.Stage) + return voteType } // ToConsensusMessage converts the VoteMessage into a network-level consensus message @@ -84,44 +135,167 @@ func (v *VoteMessage) ToConsensusMessage() (*ConsensusMessage, error) { return nil, err } - typ := byte(v.Stage) return &ConsensusMessage{ - ConsensusEngineID: types.GrandpaEngineID, - Data: append([]byte{typ}, enc...), + Data: append([]byte{voteType}, enc...), }, nil } -// FinalizationMessage represents a network finalization message -type FinalizationMessage struct { - Round uint64 - Vote *Vote - Justification []*Justification +// NeighbourMessage represents a network-level neighbour message +type NeighbourMessage struct { + Version byte + Round uint64 + SetID uint64 + Number uint32 +} + +// ToConsensusMessage converts the NeighbourMessage into a network-level consensus message +func (m *NeighbourMessage) ToConsensusMessage() (*network.ConsensusMessage, error) { + enc, err := scale.Encode(m) + if err != nil { + return nil, err + } + + return &ConsensusMessage{ + Data: append([]byte{neighbourType}, enc...), + }, nil +} + +// Type returns neighbourType +func (m *NeighbourMessage) Type() byte { + return neighbourType +} + +// AuthData represents signature data within a CommitMessage to be paired with a Precommit +type AuthData struct { + Signature [64]byte + AuthorityID ed25519.PublicKeyBytes +} + +// Encode SCALE encodes the AuthData +func (d *AuthData) Encode() ([]byte, error) { + return append(d.Signature[:], d.AuthorityID[:]...), nil +} + +// Decode SCALE decodes the data into an AuthData +func (d *AuthData) Decode(r io.Reader) error { + sig, err := common.Read64Bytes(r) + if err != nil { + return err + } + + copy(d.Signature[:], sig[:]) + + id, err := common.Read32Bytes(r) + if err != nil { + return err + } + + copy(d.AuthorityID[:], id[:]) + return nil +} + +// CommitMessage represents a network finalisation message +type CommitMessage struct { + Round uint64 + SetID uint64 + Vote *Vote + Precommits []*Vote + AuthData []*AuthData +} + +// Decode SCALE decodes the data into a CommitMessage +func (f *CommitMessage) Decode(r io.Reader) (err error) { + f.Round, err = common.ReadUint64(r) + if err != nil { + return err + } + + f.SetID, err = common.ReadUint64(r) + if err != nil { + return err + } + + f.Vote, err = new(Vote).Decode(r) + if err != nil { + return err + } + + sd := &scale.Decoder{Reader: r} + numPrecommits, err := sd.Decode(new(big.Int)) + if err != nil { + return err + } + + f.Precommits = make([]*Vote, numPrecommits.(*big.Int).Int64()) + for i := range f.Precommits { + f.Precommits[i], err = new(Vote).Decode(r) + if err != nil { + return err + } + } + + numAuthData, err := sd.Decode(new(big.Int)) + if err != nil { + return err + } + + if numAuthData.(*big.Int).Cmp(numPrecommits.(*big.Int)) != 0 { + return ErrPrecommitSignatureMismatch + } + + f.AuthData = make([]*AuthData, numAuthData.(*big.Int).Int64()) + for i := range f.AuthData { + f.AuthData[i] = new(AuthData) + err = f.AuthData[i].Decode(r) + if err != nil { + return err + } + } + + return nil } -// Type returns finalizationType -func (f *FinalizationMessage) Type() byte { - return finalizationType +// Type returns commitType +func (f *CommitMessage) Type() byte { + return commitType } -// ToConsensusMessage converts the FinalizationMessage into a network-level consensus message -func (f *FinalizationMessage) ToConsensusMessage() (*ConsensusMessage, error) { +// ToConsensusMessage converts the CommitMessage into a network-level consensus message +func (f *CommitMessage) ToConsensusMessage() (*ConsensusMessage, error) { enc, err := scale.Encode(f) if err != nil { return nil, err } return &ConsensusMessage{ - ConsensusEngineID: types.GrandpaEngineID, - Data: append([]byte{finalizationType}, enc...), + Data: append([]byte{commitType}, enc...), }, nil } -func (s *Service) newFinalizationMessage(header *types.Header, round uint64) *FinalizationMessage { - return &FinalizationMessage{ - Round: round, - Vote: NewVoteFromHeader(header), - Justification: s.justification[round], +func (s *Service) newCommitMessage(header *types.Header, round uint64) *CommitMessage { + just := s.justification[round] + precommits, authData := justificationToCompact(just) + return &CommitMessage{ + Round: round, + Vote: NewVoteFromHeader(header), + Precommits: precommits, + AuthData: authData, + } +} + +func justificationToCompact(just []*SignedPrecommit) ([]*Vote, []*AuthData) { + precommits := make([]*Vote, len(just)) + authData := make([]*AuthData, len(just)) + + for i, j := range just { + precommits[i] = j.Vote + authData[i] = &AuthData{ + Signature: j.Signature, + AuthorityID: j.AuthorityID, + } } + + return precommits, authData } type catchUpRequest struct { @@ -149,18 +323,17 @@ func (r *catchUpRequest) ToConsensusMessage() (*ConsensusMessage, error) { } return &ConsensusMessage{ - ConsensusEngineID: types.GrandpaEngineID, - Data: append([]byte{catchUpRequestType}, enc...), + Data: append([]byte{catchUpRequestType}, enc...), }, nil } type catchUpResponse struct { - Round uint64 SetID uint64 - PreVoteJustification FullJustification - PreCommitJustification FullJustification + Round uint64 + PreVoteJustification []*SignedPrecommit + PreCommitJustification []*SignedPrecommit Hash common.Hash - Number uint64 + Number uint32 } func (s *Service) newCatchUpResponse(round, setID uint64) (*catchUpResponse, error) { @@ -184,28 +357,31 @@ func (s *Service) newCatchUpResponse(round, setID uint64) (*catchUpResponse, err } r := &bytes.Buffer{} + sd := &scale.Decoder{Reader: r} _, err = r.Write(just) if err != nil { return nil, err } - pvj, err := FullJustification{}.Decode(r) + d, err := sd.Decode([]*SignedPrecommit{}) if err != nil { return nil, err } + pvj := d.([]*SignedPrecommit) - pcj, err := FullJustification{}.Decode(r) + d, err = sd.Decode([]*SignedPrecommit{}) if err != nil { return nil, err } + pcj := d.([]*SignedPrecommit) return &catchUpResponse{ - Round: round, SetID: setID, + Round: round, PreVoteJustification: pvj, PreCommitJustification: pcj, Hash: header.Hash(), - Number: header.Number.Uint64(), + Number: uint32(header.Number.Uint64()), }, nil } @@ -222,7 +398,6 @@ func (r *catchUpResponse) ToConsensusMessage() (*ConsensusMessage, error) { } return &ConsensusMessage{ - ConsensusEngineID: types.GrandpaEngineID, - Data: append([]byte{catchUpResponseType}, enc...), + Data: append([]byte{catchUpResponseType}, enc...), }, nil } diff --git a/lib/grandpa/message_handler.go b/lib/grandpa/message_handler.go index 8d4db4eed9..31654edc27 100644 --- a/lib/grandpa/message_handler.go +++ b/lib/grandpa/message_handler.go @@ -17,11 +17,18 @@ package grandpa import ( + "bytes" + "fmt" + "math/big" "reflect" + "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/ed25519" "github.com/ChainSafe/gossamer/lib/scale" + + "github.com/libp2p/go-libp2p-core/peer" ) // MessageHandler handles GRANDPA consensus messages @@ -39,30 +46,29 @@ func NewMessageHandler(grandpa *Service, blockState BlockState) *MessageHandler } // HandleMessage handles a GRANDPA consensus message -// if it is a FinalizationMessage, it updates the BlockState +// if it is a CommitMessage, it updates the BlockState // if it is a VoteMessage, it sends it to the GRANDPA service -func (h *MessageHandler) handleMessage(msg *ConsensusMessage) (*ConsensusMessage, error) { - if msg == nil || len(msg.Data) == 0 { - h.grandpa.logger.Trace("received nil message or message with nil data") - return nil, nil - } - - m, err := decodeMessage(msg) - if err != nil { - return nil, err - } +func (h *MessageHandler) handleMessage(from peer.ID, m GrandpaMessage) (network.NotificationsMessage, error) { + logger.Trace("handling grandpa message", "msg", m) switch m.Type() { - case voteType, precommitType: + case voteType: vm, ok := m.(*VoteMessage) if h.grandpa != nil && ok { // send vote message to grandpa service h.grandpa.in <- vm } - case finalizationType: - if fm, ok := m.(*FinalizationMessage); ok { - return h.handleFinalizationMessage(fm) + case commitType: + if fm, ok := m.(*CommitMessage); ok { + return h.handleCommitMessage(fm) + } + case neighbourType: + nm, ok := m.(*NeighbourMessage) + if !ok { + return nil, nil } + + return nil, h.handleNeighbourMessage(from, nm) case catchUpRequestType: if r, ok := m.(*catchUpRequest); ok { return h.handleCatchUpRequest(r) @@ -78,37 +84,88 @@ func (h *MessageHandler) handleMessage(msg *ConsensusMessage) (*ConsensusMessage return nil, nil } -func (h *MessageHandler) handleFinalizationMessage(msg *FinalizationMessage) (*ConsensusMessage, error) { - h.grandpa.logger.Debug("received finalization message", "round", msg.Round, "hash", msg.Vote.hash) +func (h *MessageHandler) handleNeighbourMessage(from peer.ID, msg *NeighbourMessage) error { + currFinalized, err := h.grandpa.blockState.GetFinalizedHeader(0, 0) + if err != nil { + return err + } + + // ignore neighbour messages where our best finalised number is greater than theirs + if uint32(currFinalized.Number.Int64()) >= msg.Number { + return nil + } + + // TODO; determine if there is some reason we don't receive justifications in responses near the head (usually), + // and remove the following code if it's fixed. + head, err := h.grandpa.blockState.BestBlockNumber() + if err != nil { + return err + } + + // ignore neighbour messages that are above our head + if int64(msg.Number) > head.Int64() { + return nil + } + + logger.Debug("got neighbour message", "number", msg.Number, "set id", msg.SetID, "round", msg.Round) + h.grandpa.network.SendJustificationRequest(from, msg.Number) + + // don't finalise too close to head, until we add justification request + verification functionality. + // this prevents us from marking the wrong block as final and getting stuck on the wrong chain + if uint32(head.Int64())-4 < msg.Number { + return nil + } + + // TODO: instead of assuming the finalised hash is the one we currently know about, + // request the justification from the network before setting it as finalised. + hash, err := h.grandpa.blockState.GetHashByNumber(big.NewInt(int64(msg.Number))) + if err != nil { + return err + } + + if err = h.grandpa.blockState.SetFinalizedHash(hash, msg.Round, msg.SetID); err != nil { + return err + } + + if err = h.grandpa.blockState.SetFinalizedHash(hash, 0, 0); err != nil { + return err + } + + logger.Info("🔨 finalised block", "number", msg.Number, "hash", hash) + return nil +} + +func (h *MessageHandler) handleCommitMessage(msg *CommitMessage) (*ConsensusMessage, error) { + logger.Debug("received finalisation message", "round", msg.Round, "hash", msg.Vote.hash) if has, _ := h.blockState.HasFinalizedBlock(msg.Round, h.grandpa.state.setID); has { return nil, nil } // check justification here - err := h.verifyFinalizationMessageJustification(msg) + err := h.verifyCommitMessageJustification(msg) if err != nil { return nil, err } - // set finalized head for round in db + // set finalised head for round in db err = h.blockState.SetFinalizedHash(msg.Vote.hash, msg.Round, h.grandpa.state.setID) if err != nil { return nil, err } - // set latest finalized head in db + // set latest finalised head in db err = h.blockState.SetFinalizedHash(msg.Vote.hash, 0, 0) if err != nil { return nil, err } // check if msg has same setID but is 2 or more rounds ahead of us, if so, return catch-up request to send - if msg.Round > h.grandpa.state.round+1 && !h.grandpa.paused.Load().(bool) { // TODO: FinalizationMessage does not have setID, confirm this is correct + if msg.Round > h.grandpa.state.round+1 && !h.grandpa.paused.Load().(bool) { // TODO: CommitMessage does not have setID, confirm this is correct h.grandpa.paused.Store(true) h.grandpa.state.round = msg.Round + 1 req := newCatchUpRequest(msg.Round, h.grandpa.state.setID) - h.grandpa.logger.Debug("sending catch-up request; paused service", "round", msg.Round) + logger.Debug("sending catch-up request; paused service", "round", msg.Round) return req.ToConsensusMessage() } @@ -116,7 +173,7 @@ func (h *MessageHandler) handleFinalizationMessage(msg *FinalizationMessage) (*C } func (h *MessageHandler) handleCatchUpRequest(msg *catchUpRequest) (*ConsensusMessage, error) { - h.grandpa.logger.Debug("received catch up request", "round", msg.Round, "setID", msg.SetID) + logger.Debug("received catch up request", "round", msg.Round, "setID", msg.SetID) if msg.SetID != h.grandpa.state.setID { return nil, ErrSetIDMismatch } @@ -130,16 +187,16 @@ func (h *MessageHandler) handleCatchUpRequest(msg *catchUpRequest) (*ConsensusMe return nil, err } - h.grandpa.logger.Debug("sending catch up response", "round", msg.Round, "setID", msg.SetID, "hash", resp.Hash) + logger.Debug("sending catch up response", "round", msg.Round, "setID", msg.SetID, "hash", resp.Hash) return resp.ToConsensusMessage() } func (h *MessageHandler) handleCatchUpResponse(msg *catchUpResponse) error { - h.grandpa.logger.Debug("received catch up response", "round", msg.Round, "setID", msg.SetID, "hash", msg.Hash) + logger.Debug("received catch up response", "round", msg.Round, "setID", msg.SetID, "hash", msg.Hash) // if we aren't currently expecting a catch up response, return if !h.grandpa.paused.Load().(bool) { - h.grandpa.logger.Debug("not currently paused, ignoring catch up response") + logger.Debug("not currently paused, ignoring catch up response") return nil } @@ -179,7 +236,7 @@ func (h *MessageHandler) handleCatchUpResponse(msg *catchUpResponse) error { close(h.grandpa.resumed) h.grandpa.resumed = make(chan struct{}) h.grandpa.paused.Store(false) - h.grandpa.logger.Debug("caught up to round; unpaused service", "round", h.grandpa.state.round) + logger.Debug("caught up to round; unpaused service", "round", h.grandpa.state.round) return nil } @@ -202,7 +259,7 @@ func (h *MessageHandler) verifyCatchUpResponseCompletability(prevote, precommit return nil } -// decodeMessage decodes a network-level consensus message into a GRANDPA VoteMessage or FinalizationMessage +// decodeMessage decodes a network-level consensus message into a GRANDPA VoteMessage or CommitMessage func decodeMessage(msg *ConsensusMessage) (m GrandpaMessage, err error) { var ( mi interface{} @@ -210,14 +267,18 @@ func decodeMessage(msg *ConsensusMessage) (m GrandpaMessage, err error) { ) switch msg.Data[0] { - case voteType, precommitType: - mi, err = scale.Decode(msg.Data[1:], &VoteMessage{Message: new(SignedMessage)}) - if m, ok = mi.(*VoteMessage); !ok { - return nil, ErrInvalidMessageType - } - case finalizationType: - mi, err = scale.Decode(msg.Data[1:], &FinalizationMessage{}) - if m, ok = mi.(*FinalizationMessage); !ok { + case voteType: + m = &VoteMessage{} + _, err = scale.Decode(msg.Data[1:], m) + case commitType: + r := &bytes.Buffer{} + _, _ = r.Write(msg.Data[1:]) + cm := &CommitMessage{} + err = cm.Decode(r) + m = cm + case neighbourType: + mi, err = scale.Decode(msg.Data[1:], &NeighbourMessage{}) + if m, ok = mi.(*NeighbourMessage); !ok { return nil, ErrInvalidMessageType } case catchUpRequestType: @@ -241,11 +302,20 @@ func decodeMessage(msg *ConsensusMessage) (m GrandpaMessage, err error) { return m, nil } -func (h *MessageHandler) verifyFinalizationMessageJustification(fm *FinalizationMessage) error { - // verify justifications +func (h *MessageHandler) verifyCommitMessageJustification(fm *CommitMessage) error { + if len(fm.Precommits) != len(fm.AuthData) { + return ErrPrecommitSignatureMismatch + } + count := 0 - for _, just := range fm.Justification { - err := h.verifyJustification(just, just.Vote, fm.Round, h.grandpa.state.setID, precommit) + for i, pc := range fm.Precommits { + just := &SignedPrecommit{ + Vote: pc, + Signature: fm.AuthData[i].Signature, + AuthorityID: fm.AuthData[i].AuthorityID, + } + + err := h.verifyJustification(just, fm.Round, h.grandpa.state.setID, precommit) if err != nil { continue } @@ -257,8 +327,8 @@ func (h *MessageHandler) verifyFinalizationMessageJustification(fm *Finalization // confirm total # signatures >= grandpa threshold if uint64(count) < h.grandpa.state.threshold() { - h.grandpa.logger.Error("minimum votes not met for finalization message", "votes needed", h.grandpa.state.threshold(), - "votes", fm.Justification) + logger.Error("minimum votes not met for finalisation message", "votes needed", h.grandpa.state.threshold(), + "votes received", len(fm.Precommits)) return ErrMinVotesNotMet } return nil @@ -269,7 +339,7 @@ func (h *MessageHandler) verifyPreVoteJustification(msg *catchUpResponse) (commo votes := make(map[common.Hash]uint64) for _, just := range msg.PreVoteJustification { - err := h.verifyJustification(just, just.Vote, msg.Round, msg.SetID, prevote) + err := h.verifyJustification(just, msg.Round, msg.SetID, prevote) if err != nil { continue } @@ -296,7 +366,7 @@ func (h *MessageHandler) verifyPreCommitJustification(msg *catchUpResponse) erro // verify pre-commit justification count := 0 for _, just := range msg.PreCommitJustification { - err := h.verifyJustification(just, just.Vote, msg.Round, msg.SetID, precommit) + err := h.verifyJustification(just, msg.Round, msg.SetID, precommit) if err != nil { continue } @@ -313,11 +383,11 @@ func (h *MessageHandler) verifyPreCommitJustification(msg *catchUpResponse) erro return nil } -func (h *MessageHandler) verifyJustification(just *Justification, vote *Vote, round, setID uint64, stage subround) error { +func (h *MessageHandler) verifyJustification(just *SignedPrecommit, round, setID uint64, stage subround) error { // verify signature msg, err := scale.Encode(&FullVote{ Stage: stage, - Vote: vote, + Vote: just.Vote, Round: round, SetID: setID, }) @@ -341,7 +411,7 @@ func (h *MessageHandler) verifyJustification(just *Justification, vote *Vote, ro // verify authority in justification set authFound := false - for _, auth := range h.grandpa.Authorities() { + for _, auth := range h.grandpa.authorities() { justKey, err := just.AuthorityID.Encode() if err != nil { return err @@ -356,3 +426,88 @@ func (h *MessageHandler) verifyJustification(just *Justification, vote *Vote, ro } return nil } + +// VerifyBlockJustification verifies the finality justification for a block +func (s *Service) VerifyBlockJustification(justification []byte) error { + r := &bytes.Buffer{} + _, _ = r.Write(justification) + fj := new(Justification) + err := fj.Decode(r) + if err != nil { + return err + } + + setID, err := s.grandpaState.GetSetIDByBlockNumber(big.NewInt(int64(fj.Commit.Number))) + if err != nil { + return fmt.Errorf("cannot get set ID from block number: %w", err) + } + + auths, err := s.grandpaState.GetAuthorities(setID) + if err != nil { + return fmt.Errorf("cannot get authorities for set ID: %w", err) + } + + logger.Debug("verifying justification", + "setID", setID, + "round", fj.Round, + "hash", fj.Commit.Hash, + "number", fj.Commit.Number, + "sig count", len(fj.Commit.Precommits), + ) + + if len(fj.Commit.Precommits) < (2 * len(auths) / 3) { + return ErrMinVotesNotMet + } + + for _, just := range fj.Commit.Precommits { + if just.Vote.hash != fj.Commit.Hash { + return ErrJustificationHashMismatch + } + + if just.Vote.number != fj.Commit.Number { + return ErrJustificationNumberMismatch + } + + pk, err := ed25519.NewPublicKey(just.AuthorityID[:]) + if err != nil { + return err + } + + ok := isInAuthSet(pk, auths) + if !ok { + return ErrAuthorityNotInSet + } + + // verify signature for each precommit + msg, err := scale.Encode(&FullVote{ + Stage: precommit, + Vote: just.Vote, + Round: fj.Round, + SetID: setID, + }) + if err != nil { + return err + } + + ok, err = pk.Verify(msg, just.Signature[:]) + if err != nil { + return err + } + + if !ok { + return ErrInvalidSignature + } + } + + return nil +} + +func isInAuthSet(auth *ed25519.PublicKey, set []*types.GrandpaVoter) bool { + for _, a := range set { + if bytes.Equal(a.Key.Encode(), auth.Encode()) { + return true + } + } + + return false +} diff --git a/lib/grandpa/message_handler_test.go b/lib/grandpa/message_handler_test.go index 090920aa09..04e7f30dca 100644 --- a/lib/grandpa/message_handler_test.go +++ b/lib/grandpa/message_handler_test.go @@ -38,12 +38,12 @@ var testHeader = &types.Header{ var testHash = testHeader.Hash() -func buildTestJustifications(t *testing.T, qty int, round, setID uint64, kr *keystore.Ed25519Keyring, subround subround) []*Justification { - just := []*Justification{} +func buildTestJustification(t *testing.T, qty int, round, setID uint64, kr *keystore.Ed25519Keyring, subround subround) []*SignedPrecommit { + just := []*SignedPrecommit{} for i := 0; i < qty; i++ { - j := &Justification{ - Vote: NewVote(testHash, round), - Signature: createSignedVoteMsg(t, round, round, setID, kr.Keys[i%len(kr.Keys)], subround), + j := &SignedPrecommit{ + Vote: NewVote(testHash, uint32(round)), + Signature: createSignedVoteMsg(t, uint32(round), round, setID, kr.Keys[i%len(kr.Keys)], subround), AuthorityID: kr.Keys[i%len(kr.Keys)].Public().(*ed25519.PublicKey).AsBytes(), } just = append(just, j) @@ -52,7 +52,7 @@ func buildTestJustifications(t *testing.T, qty int, round, setID uint64, kr *key } -func createSignedVoteMsg(t *testing.T, number, round, setID uint64, pk *ed25519.Keypair, subround subround) [64]byte { +func createSignedVoteMsg(t *testing.T, number uint32, round, setID uint64, pk *ed25519.Keypair, subround subround) [64]byte { // create vote message msg, err := scale.Encode(&FullVote{ Stage: subround, @@ -71,8 +71,7 @@ func createSignedVoteMsg(t *testing.T, number, round, setID uint64, pk *ed25519. func TestDecodeMessage_VoteMessage(t *testing.T) { cm := &ConsensusMessage{ - ConsensusEngineID: types.GrandpaEngineID, - Data: common.MustHexToBytes("0x004d000000000000006300000000000000017db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a777700000000000036e6eca85489bebbb0f687ca5404748d5aa2ffabee34e3ed272cc7b2f6d0a82c65b99bc7cd90dbc21bb528289ebf96705dbd7d96918d34d815509b4e0e2a030f34602b88f60513f1c805d87ef52896934baf6a662bc37414dbdbf69356b1a691"), + Data: common.MustHexToBytes("0x004d000000000000006300000000000000017db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a7777000036e6eca85489bebbb0f687ca5404748d5aa2ffabee34e3ed272cc7b2f6d0a82c65b99bc7cd90dbc21bb528289ebf96705dbd7d96918d34d815509b4e0e2a030f34602b88f60513f1c805d87ef52896934baf6a662bc37414dbdbf69356b1a691"), } msg, err := decodeMessage(cm) @@ -85,8 +84,8 @@ func TestDecodeMessage_VoteMessage(t *testing.T) { expected := &VoteMessage{ Round: 77, SetID: 99, - Stage: precommit, Message: &SignedMessage{ + Stage: precommit, Hash: common.MustHexToHash("0x7db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a"), Number: 0x7777, Signature: sig, @@ -97,37 +96,52 @@ func TestDecodeMessage_VoteMessage(t *testing.T) { require.Equal(t, expected, msg) } -func TestDecodeMessage_FinalizationMessage(t *testing.T) { - cm := &ConsensusMessage{ - ConsensusEngineID: types.GrandpaEngineID, - Data: common.MustHexToBytes("0x024d000000000000007db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a0000000000000000040a0b0c0d00000000000000000000000000000000000000000000000000000000e7030000000000000102030400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000034602b88f60513f1c805d87ef52896934baf6a662bc37414dbdbf69356b1a691"), - } - - msg, err := decodeMessage(cm) - require.NoError(t, err) - - expected := &FinalizationMessage{ +func TestDecodeMessage_CommitMessage(t *testing.T) { + expected := &CommitMessage{ Round: 77, + SetID: 1, Vote: &Vote{ hash: common.MustHexToHash("0x7db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a"), - number: 0, + number: 99, + }, + Precommits: []*Vote{ + testVote, }, - Justification: []*Justification{ + AuthData: []*AuthData{ { - Vote: testVote, Signature: testSignature, AuthorityID: kr.Alice().Public().(*ed25519.PublicKey).AsBytes(), }, }, } + cm, err := expected.ToConsensusMessage() + require.NoError(t, err) + + msg, err := decodeMessage(cm) + require.NoError(t, err) + require.Equal(t, expected, msg) +} + +func TestDecodeMessage_NeighbourMessage(t *testing.T) { + cm := &ConsensusMessage{ + Data: common.MustHexToBytes("0x020102000000000000000300000000000000ff000000"), + } + msg, err := decodeMessage(cm) + require.NoError(t, err) + + expected := &NeighbourMessage{ + Version: 1, + Round: 2, + SetID: 3, + Number: 255, + } require.Equal(t, expected, msg) } func TestDecodeMessage_CatchUpRequest(t *testing.T) { cm := &ConsensusMessage{ - ConsensusEngineID: types.GrandpaEngineID, - Data: common.MustHexToBytes("0x0311000000000000002200000000000000"), + Data: common.MustHexToBytes("0x0311000000000000002200000000000000"), } msg, err := decodeMessage(cm) @@ -153,11 +167,8 @@ func TestMessageHandler_VoteMessage(t *testing.T) { vm, err := gs.createVoteMessage(v, precommit, gs.keypair) require.NoError(t, err) - cm, err := vm.ToConsensusMessage() - require.NoError(t, err) - h := NewMessageHandler(gs, st.Block) - out, err := h.handleMessage(cm) + out, err := h.handleMessage("", vm) require.NoError(t, err) require.Nil(t, out) @@ -169,35 +180,67 @@ func TestMessageHandler_VoteMessage(t *testing.T) { } } +func TestMessageHandler_NeighbourMessage(t *testing.T) { + gs, st := newTestService(t) + h := NewMessageHandler(gs, st.Block) + + msg := &NeighbourMessage{ + Version: 1, + Round: 2, + SetID: 3, + Number: 1, + } + + _, err := h.handleMessage("", msg) + require.NoError(t, err) + + block := &types.Block{ + Header: &types.Header{ + Number: big.NewInt(1), + ParentHash: st.Block.GenesisHash(), + }, + Body: &types.Body{0}, + } + + err = st.Block.AddBlock(block) + require.NoError(t, err) + + out, err := h.handleMessage("", msg) + require.NoError(t, err) + require.Nil(t, out) + + finalised, err := st.Block.GetFinalizedHash(0, 0) + require.NoError(t, err) + require.Equal(t, block.Header.Hash(), finalised) +} + func TestMessageHandler_VerifyJustification_InvalidSig(t *testing.T) { gs, st := newTestService(t) gs.state.round = 77 - just := &Justification{ + just := &SignedPrecommit{ Vote: testVote, Signature: [64]byte{0x1}, AuthorityID: gs.publicKeyBytes(), } h := NewMessageHandler(gs, st.Block) - err := h.verifyJustification(just, just.Vote, gs.state.round, gs.state.setID, precommit) + err := h.verifyJustification(just, gs.state.round, gs.state.setID, precommit) require.Equal(t, err, ErrInvalidSignature) } -func TestMessageHandler_FinalizationMessage_NoCatchUpRequest_ValidSig(t *testing.T) { +func TestMessageHandler_CommitMessage_NoCatchUpRequest_ValidSig(t *testing.T) { gs, st := newTestService(t) round := uint64(77) gs.state.round = round - gs.justification[round] = buildTestJustifications(t, int(gs.state.threshold()), round, gs.state.setID, kr, precommit) + gs.justification[round] = buildTestJustification(t, int(gs.state.threshold()), round, gs.state.setID, kr, precommit) - fm := gs.newFinalizationMessage(gs.head, round) - fm.Vote = NewVote(testHash, round) - cm, err := fm.ToConsensusMessage() - require.NoError(t, err) + fm := gs.newCommitMessage(gs.head, round) + fm.Vote = NewVote(testHash, uint32(round)) h := NewMessageHandler(gs, st.Block) - out, err := h.handleMessage(cm) + out, err := h.handleMessage("", fm) require.NoError(t, err) require.Nil(t, out) @@ -210,28 +253,26 @@ func TestMessageHandler_FinalizationMessage_NoCatchUpRequest_ValidSig(t *testing require.Equal(t, fm.Vote.hash, hash) } -func TestMessageHandler_FinalizationMessage_NoCatchUpRequest_MinVoteError(t *testing.T) { +func TestMessageHandler_CommitMessage_NoCatchUpRequest_MinVoteError(t *testing.T) { gs, st := newTestService(t) round := uint64(77) gs.state.round = round - gs.justification[round] = buildTestJustifications(t, int(gs.state.threshold()), round, gs.state.setID, kr, precommit) + gs.justification[round] = buildTestJustification(t, int(gs.state.threshold()), round, gs.state.setID, kr, precommit) - fm := gs.newFinalizationMessage(gs.head, round) - cm, err := fm.ToConsensusMessage() - require.NoError(t, err) + fm := gs.newCommitMessage(gs.head, round) h := NewMessageHandler(gs, st.Block) - out, err := h.handleMessage(cm) + out, err := h.handleMessage("", fm) require.EqualError(t, err, ErrMinVotesNotMet.Error()) require.Nil(t, out) } -func TestMessageHandler_FinalizationMessage_WithCatchUpRequest(t *testing.T) { +func TestMessageHandler_CommitMessage_WithCatchUpRequest(t *testing.T) { gs, st := newTestService(t) - gs.justification[77] = []*Justification{ + gs.justification[77] = []*SignedPrecommit{ { Vote: testVote, Signature: testSignature, @@ -239,13 +280,11 @@ func TestMessageHandler_FinalizationMessage_WithCatchUpRequest(t *testing.T) { }, } - fm := gs.newFinalizationMessage(gs.head, 77) - cm, err := fm.ToConsensusMessage() - require.NoError(t, err) + fm := gs.newCommitMessage(gs.head, 77) gs.state.voters = gs.state.voters[:1] h := NewMessageHandler(gs, st.Block) - out, err := h.handleMessage(cm) + out, err := h.handleMessage("", fm) require.NoError(t, err) require.NotNil(t, out) @@ -257,25 +296,19 @@ func TestMessageHandler_FinalizationMessage_WithCatchUpRequest(t *testing.T) { func TestMessageHandler_CatchUpRequest_InvalidRound(t *testing.T) { gs, st := newTestService(t) - req := newCatchUpRequest(77, 0) - cm, err := req.ToConsensusMessage() - require.NoError(t, err) h := NewMessageHandler(gs, st.Block) - _, err = h.handleMessage(cm) + _, err := h.handleMessage("", req) require.Equal(t, ErrInvalidCatchUpRound, err) } func TestMessageHandler_CatchUpRequest_InvalidSetID(t *testing.T) { gs, st := newTestService(t) - req := newCatchUpRequest(1, 77) - cm, err := req.ToConsensusMessage() - require.NoError(t, err) h := NewMessageHandler(gs, st.Block) - _, err = h.handleMessage(cm) + _, err := h.handleMessage("", req) require.Equal(t, ErrSetIDMismatch, err) } @@ -297,7 +330,7 @@ func TestMessageHandler_CatchUpRequest_WithResponse(t *testing.T) { err = gs.blockState.(*state.BlockState).SetHeader(testHeader) require.NoError(t, err) - pvj := []*Justification{ + pvj := []*SignedPrecommit{ { Vote: testVote, Signature: testSignature, @@ -308,7 +341,7 @@ func TestMessageHandler_CatchUpRequest_WithResponse(t *testing.T) { pvjEnc, err := scale.Encode(pvj) require.NoError(t, err) - pcj := []*Justification{ + pcj := []*SignedPrecommit{ { Vote: testVote2, Signature: testSignature, @@ -330,11 +363,9 @@ func TestMessageHandler_CatchUpRequest_WithResponse(t *testing.T) { // create and handle request req := newCatchUpRequest(round, setID) - cm, err := req.ToConsensusMessage() - require.NoError(t, err) h := NewMessageHandler(gs, st.Block) - out, err := h.handleMessage(cm) + out, err := h.handleMessage("", req) require.NoError(t, err) require.Equal(t, expected, out) } @@ -344,13 +375,13 @@ func TestVerifyJustification(t *testing.T) { h := NewMessageHandler(gs, st.Block) vote := NewVote(testHash, 123) - just := &Justification{ + just := &SignedPrecommit{ Vote: vote, Signature: createSignedVoteMsg(t, vote.number, 77, gs.state.setID, kr.Alice().(*ed25519.Keypair), precommit), AuthorityID: kr.Alice().Public().(*ed25519.PublicKey).AsBytes(), } - err := h.verifyJustification(just, vote, 77, gs.state.setID, precommit) + err := h.verifyJustification(just, 77, gs.state.setID, precommit) require.NoError(t, err) } @@ -359,14 +390,14 @@ func TestVerifyJustification_InvalidSignature(t *testing.T) { h := NewMessageHandler(gs, st.Block) vote := NewVote(testHash, 123) - just := &Justification{ + just := &SignedPrecommit{ Vote: vote, // create signed vote with mismatched vote number Signature: createSignedVoteMsg(t, vote.number+1, 77, gs.state.setID, kr.Alice().(*ed25519.Keypair), precommit), AuthorityID: kr.Alice().Public().(*ed25519.PublicKey).AsBytes(), } - err := h.verifyJustification(just, vote, 77, gs.state.setID, precommit) + err := h.verifyJustification(just, 77, gs.state.setID, precommit) require.EqualError(t, err, ErrInvalidSignature.Error()) } @@ -378,13 +409,13 @@ func TestVerifyJustification_InvalidAuthority(t *testing.T) { require.NoError(t, err) vote := NewVote(testHash, 123) - just := &Justification{ + just := &SignedPrecommit{ Vote: vote, Signature: createSignedVoteMsg(t, vote.number, 77, gs.state.setID, fakeKey, precommit), AuthorityID: fakeKey.Public().(*ed25519.PublicKey).AsBytes(), } - err = h.verifyJustification(just, vote, 77, gs.state.setID, precommit) + err = h.verifyJustification(just, 77, gs.state.setID, precommit) require.EqualError(t, err, ErrVoterNotFound.Error()) } @@ -392,7 +423,7 @@ func TestMessageHandler_VerifyPreVoteJustification(t *testing.T) { gs, st := newTestService(t) h := NewMessageHandler(gs, st.Block) - just := buildTestJustifications(t, int(gs.state.threshold()), 1, gs.state.setID, kr, prevote) + just := buildTestJustification(t, int(gs.state.threshold()), 1, gs.state.setID, kr, prevote) msg := &catchUpResponse{ Round: 1, SetID: gs.state.setID, @@ -409,13 +440,13 @@ func TestMessageHandler_VerifyPreCommitJustification(t *testing.T) { h := NewMessageHandler(gs, st.Block) round := uint64(1) - just := buildTestJustifications(t, int(gs.state.threshold()), round, gs.state.setID, kr, precommit) + just := buildTestJustification(t, int(gs.state.threshold()), round, gs.state.setID, kr, precommit) msg := &catchUpResponse{ Round: round, SetID: gs.state.setID, PreCommitJustification: just, Hash: testHash, - Number: round, + Number: uint32(round), } err := h.verifyPreCommitJustification(msg) @@ -433,22 +464,93 @@ func TestMessageHandler_HandleCatchUpResponse(t *testing.T) { round := uint64(77) gs.state.round = round + 1 - pvJust := buildTestJustifications(t, int(gs.state.threshold()), round, gs.state.setID, kr, prevote) - pcJust := buildTestJustifications(t, int(gs.state.threshold()), round, gs.state.setID, kr, precommit) + pvJust := buildTestJustification(t, int(gs.state.threshold()), round, gs.state.setID, kr, prevote) + pcJust := buildTestJustification(t, int(gs.state.threshold()), round, gs.state.setID, kr, precommit) msg := &catchUpResponse{ Round: round, SetID: gs.state.setID, PreVoteJustification: pvJust, PreCommitJustification: pcJust, Hash: testHash, - Number: round, + Number: uint32(round), } - cm, err := msg.ToConsensusMessage() - require.NoError(t, err) - - out, err := h.handleMessage(cm) + out, err := h.handleMessage("", msg) require.NoError(t, err) require.Nil(t, out) require.Equal(t, round+1, gs.state.round) } + +func TestMessageHandler_VerifyBlockJustification(t *testing.T) { + auths := []*types.GrandpaVoter{ + { + Key: kr.Alice().Public().(*ed25519.PublicKey), + }, + { + Key: kr.Bob().Public().(*ed25519.PublicKey), + }, + { + Key: kr.Charlie().Public().(*ed25519.PublicKey), + }, + } + + gs, st := newTestService(t) + err := st.Grandpa.SetNextChange(auths, big.NewInt(1)) + require.NoError(t, err) + + err = st.Grandpa.IncrementSetID() + require.NoError(t, err) + + setID, err := st.Grandpa.GetCurrentSetID() + require.NoError(t, err) + require.Equal(t, uint64(1), setID) + + round := uint64(2) + number := uint32(2) + precommits := buildTestJustification(t, 2, round, setID, kr, precommit) + just := newJustification(round, testHash, number, precommits) + data, err := just.Encode() + require.NoError(t, err) + err = gs.VerifyBlockJustification(data) + require.NoError(t, err) + + // use wrong hash, shouldn't verify + just = newJustification(round, common.Hash{}, number, precommits) + data, err = just.Encode() + require.NoError(t, err) + err = gs.VerifyBlockJustification(data) + require.NotNil(t, err) + require.Equal(t, ErrJustificationHashMismatch, err) + + // use wrong number, shouldn't verify + just = newJustification(round, testHash, number+1, precommits) + data, err = just.Encode() + require.NoError(t, err) + err = gs.VerifyBlockJustification(data) + require.NotNil(t, err) + require.Equal(t, ErrJustificationNumberMismatch, err) + + // use wrong round, shouldn't verify + just = newJustification(round+1, testHash, number, precommits) + data, err = just.Encode() + require.NoError(t, err) + err = gs.VerifyBlockJustification(data) + require.NotNil(t, err) + require.Equal(t, ErrInvalidSignature, err) + + // add authority not in set, shouldn't verify + precommits = buildTestJustification(t, len(auths)+1, round, setID, kr, precommit) + just = newJustification(round, testHash, number, precommits) + data, err = just.Encode() + require.NoError(t, err) + err = gs.VerifyBlockJustification(data) + require.Equal(t, ErrAuthorityNotInSet, err) + + // not enough signatures, shouldn't verify + precommits = buildTestJustification(t, 1, round, setID, kr, precommit) + just = newJustification(round, testHash, number, precommits) + data, err = just.Encode() + require.NoError(t, err) + err = gs.VerifyBlockJustification(data) + require.Equal(t, ErrMinVotesNotMet, err) +} diff --git a/lib/grandpa/message_test.go b/lib/grandpa/message_test.go index 1cd458667d..f9b61282ec 100644 --- a/lib/grandpa/message_test.go +++ b/lib/grandpa/message_test.go @@ -7,6 +7,7 @@ import ( "github.com/ChainSafe/gossamer/dot/state" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/crypto/ed25519" "github.com/ChainSafe/gossamer/lib/scale" "github.com/stretchr/testify/require" @@ -38,35 +39,43 @@ func TestVoteMessageToConsensusMessage(t *testing.T) { // test precommit vm, err := gs.createVoteMessage(v, precommit, gs.keypair) require.NoError(t, err) - - cm, err := vm.ToConsensusMessage() - require.NoError(t, err) - - expected := &ConsensusMessage{ - ConsensusEngineID: types.GrandpaEngineID, - Data: common.MustHexToBytes("0x014d000000000000006300000000000000017db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a7777000000000000a28633c3a1046351931209fe9182fd530dc659d54ece48e9f88f4277e47f39eb78a84d50e3d37e1b50786d88abafceb5137044b6122fb6b7b5ae8ff62787cc0e34602b88f60513f1c805d87ef52896934baf6a662bc37414dbdbf69356b1a691"), + vm.Message.Signature = [64]byte{} + + expected := &VoteMessage{ + Round: gs.state.round, + SetID: gs.state.setID, + Message: &SignedMessage{ + Stage: precommit, + Hash: v.hash, + Number: v.number, + AuthorityID: gs.keypair.Public().(*ed25519.PublicKey).AsBytes(), + }, } - require.Equal(t, expected, cm) + require.Equal(t, expected, vm) // test prevote vm, err = gs.createVoteMessage(v, prevote, gs.keypair) require.NoError(t, err) - - cm, err = vm.ToConsensusMessage() - require.NoError(t, err) - - expected = &ConsensusMessage{ - ConsensusEngineID: types.GrandpaEngineID, - Data: common.MustHexToBytes("0x004d000000000000006300000000000000007db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a7777000000000000215cea37b45853e63d4cc2f0a04c7a33aec9fc5683ac46b03a01e6c41ce46e4339bb7456667f14d109b49e8af26090f7087991f3b22494df997551ae44a0ef0034602b88f60513f1c805d87ef52896934baf6a662bc37414dbdbf69356b1a691"), + vm.Message.Signature = [64]byte{} + + expected = &VoteMessage{ + Round: gs.state.round, + SetID: gs.state.setID, + Message: &SignedMessage{ + Stage: prevote, + Hash: v.hash, + Number: v.number, + AuthorityID: gs.keypair.Public().(*ed25519.PublicKey).AsBytes(), + }, } - require.Equal(t, expected, cm) + require.Equal(t, expected, vm) } -func TestFinalizationMessageToConsensusMessage(t *testing.T) { +func TestCommitMessageToConsensusMessage(t *testing.T) { gs, _ := newTestService(t) - gs.justification[77] = []*Justification{ + gs.justification[77] = []*SignedPrecommit{ { Vote: testVote, Signature: testSignature, @@ -74,16 +83,17 @@ func TestFinalizationMessageToConsensusMessage(t *testing.T) { }, } - fm := gs.newFinalizationMessage(gs.head, 77) - cm, err := fm.ToConsensusMessage() - require.NoError(t, err) + fm := gs.newCommitMessage(gs.head, 77) + precommits, authData := justificationToCompact(gs.justification[77]) - expected := &ConsensusMessage{ - ConsensusEngineID: types.GrandpaEngineID, - Data: common.MustHexToBytes("0x024d000000000000007db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a0000000000000000040a0b0c0d00000000000000000000000000000000000000000000000000000000e7030000000000000102030400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000034602b88f60513f1c805d87ef52896934baf6a662bc37414dbdbf69356b1a691"), + expected := &CommitMessage{ + Round: 77, + Vote: NewVoteFromHeader(gs.head), + Precommits: precommits, + AuthData: authData, } - require.Equal(t, expected, cm) + require.Equal(t, expected, fm) } func TestNewCatchUpResponse(t *testing.T) { @@ -106,7 +116,7 @@ func TestNewCatchUpResponse(t *testing.T) { err = gs.blockState.(*state.BlockState).SetHeader(testHeader) require.NoError(t, err) - pvj := []*Justification{ + pvj := []*SignedPrecommit{ { Vote: testVote, Signature: testSignature, @@ -117,7 +127,7 @@ func TestNewCatchUpResponse(t *testing.T) { pvjEnc, err := scale.Encode(pvj) require.NoError(t, err) - pcj := []*Justification{ + pcj := []*SignedPrecommit{ { Vote: testVote2, Signature: testSignature, @@ -137,11 +147,29 @@ func TestNewCatchUpResponse(t *testing.T) { expected := &catchUpResponse{ Round: round, SetID: setID, - PreVoteJustification: FullJustification(pvj), - PreCommitJustification: FullJustification(pcj), + PreVoteJustification: pvj, + PreCommitJustification: pcj, Hash: v.hash, Number: v.number, } require.Equal(t, expected, resp) } + +func TestNeighbourMessageToConsensusMessage(t *testing.T) { + msg := &NeighbourMessage{ + Version: 1, + Round: 2, + SetID: 3, + Number: 255, + } + + cm, err := msg.ToConsensusMessage() + require.NoError(t, err) + + expected := &ConsensusMessage{ + Data: common.MustHexToBytes("0x020102000000000000000300000000000000ff000000"), + } + + require.Equal(t, expected, cm) +} diff --git a/lib/grandpa/network.go b/lib/grandpa/network.go index f596282b5f..ba712d347d 100644 --- a/lib/grandpa/network.go +++ b/lib/grandpa/network.go @@ -18,6 +18,7 @@ package grandpa import ( "fmt" + "time" "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/lib/common" @@ -28,8 +29,9 @@ import ( ) var ( - grandpaID protocol.ID = "/paritytech/grandpa/1" - messageID = network.ConsensusMsgType + grandpaID protocol.ID = "/paritytech/grandpa/1" + messageID = network.ConsensusMsgType + neighbourMessageInterval = time.Minute * 5 ) // Handshake is an alias for network.Handshake @@ -124,20 +126,65 @@ func (s *Service) decodeMessage(in []byte) (NotificationsMessage, error) { return msg, err } -func (s *Service) handleNetworkMessage(_ peer.ID, msg NotificationsMessage) error { +func (s *Service) handleNetworkMessage(from peer.ID, msg NotificationsMessage) (bool, error) { + if msg == nil { + logger.Trace("received nil message, ignoring") + return false, nil + } + cm, ok := msg.(*network.ConsensusMessage) if !ok { - return ErrInvalidMessageType + return false, ErrInvalidMessageType + } + + if len(cm.Data) == 0 { + logger.Trace("received message with nil data, ignoring") + return false, nil } - resp, err := s.messageHandler.handleMessage(cm) + m, err := decodeMessage(cm) if err != nil { - return err + return false, err + } + + resp, err := s.messageHandler.handleMessage(from, m) + if err != nil { + return false, err } if resp != nil { s.network.SendMessage(resp) } - return nil + if m.Type() == neighbourType || m.Type() == catchUpResponseType { + return false, nil + } + + return true, nil +} + +func (s *Service) sendNeighbourMessage() { + for { + select { + case <-time.After(neighbourMessageInterval): + if s.neighbourMessage == nil { + continue + } + case info := <-s.finalisedCh: + s.neighbourMessage = &NeighbourMessage{ + Version: 1, + Round: info.Round, + SetID: info.SetID, + Number: uint32(info.Header.Number.Int64()), + } + } + + cm, err := s.neighbourMessage.ToConsensusMessage() + if err != nil { + logger.Warn("failed to convert NeighbourMessage to network message", "error", err) + continue + } + + s.network.SendMessage(cm) + } } diff --git a/lib/grandpa/network_test.go b/lib/grandpa/network_test.go index d8f2fc8ba2..e41d4b7d0b 100644 --- a/lib/grandpa/network_test.go +++ b/lib/grandpa/network_test.go @@ -17,9 +17,12 @@ package grandpa import ( + "math/big" "testing" "time" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/libp2p/go-libp2p-core/peer" "github.com/stretchr/testify/require" ) @@ -46,7 +49,7 @@ func TestGrandpaHandshake_Encode(t *testing.T) { func TestHandleNetworkMessage(t *testing.T) { gs, st := newTestService(t) - gs.justification[77] = []*Justification{ + gs.justification[77] = []*SignedPrecommit{ { Vote: testVote, Signature: testSignature, @@ -54,7 +57,7 @@ func TestHandleNetworkMessage(t *testing.T) { }, } - fm := gs.newFinalizationMessage(gs.head, 77) + fm := gs.newCommitMessage(gs.head, 77) cm, err := fm.ToConsensusMessage() require.NoError(t, err) gs.state.voters = gs.state.voters[:1] @@ -62,12 +65,74 @@ func TestHandleNetworkMessage(t *testing.T) { h := NewMessageHandler(gs, st.Block) gs.messageHandler = h - err = gs.handleNetworkMessage(peer.ID(""), cm) + propagate, err := gs.handleNetworkMessage(peer.ID(""), cm) require.NoError(t, err) + require.True(t, propagate) select { case <-gs.network.(*testNetwork).out: case <-time.After(testTimeout): t.Fatal("expected to send message") } + + neighbourMsg := &NeighbourMessage{} + cm, err = neighbourMsg.ToConsensusMessage() + require.NoError(t, err) + + propagate, err = gs.handleNetworkMessage(peer.ID(""), cm) + require.NoError(t, err) + require.False(t, propagate) +} + +func TestSendNeighbourMessage(t *testing.T) { + gs, st := newTestService(t) + neighbourMessageInterval = time.Second + defer func() { + neighbourMessageInterval = time.Minute * 5 + }() + go gs.sendNeighbourMessage() + + block := &types.Block{ + Header: &types.Header{ + ParentHash: testGenesisHeader.Hash(), + Number: big.NewInt(1), + }, + Body: &types.Body{}, + } + + err := st.Block.AddBlock(block) + require.NoError(t, err) + + hash := block.Header.Hash() + round := uint64(7) + setID := uint64(33) + err = st.Block.SetFinalizedHash(hash, round, setID) + require.NoError(t, err) + + expected := &NeighbourMessage{ + Version: 1, + SetID: setID, + Round: round, + Number: 1, + } + + select { + case <-time.After(time.Second): + t.Fatal("did not send message") + case msg := <-gs.network.(*testNetwork).out: + nm, ok := msg.(*NeighbourMessage) + require.True(t, ok) + require.Equal(t, expected, nm) + } + + require.Equal(t, expected, gs.neighbourMessage) + + select { + case <-time.After(time.Second * 2): + t.Fatal("did not send message") + case msg := <-gs.network.(*testNetwork).out: + nm, ok := msg.(*NeighbourMessage) + require.True(t, ok) + require.Equal(t, expected, nm) + } } diff --git a/lib/grandpa/round_test.go b/lib/grandpa/round_test.go index 51c34e814f..d33d6b32e9 100644 --- a/lib/grandpa/round_test.go +++ b/lib/grandpa/round_test.go @@ -30,6 +30,7 @@ import ( "github.com/ChainSafe/gossamer/lib/keystore" log "github.com/ChainSafe/log15" + "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/protocol" "github.com/stretchr/testify/require" ) @@ -39,14 +40,14 @@ var testTimeout = 20 * time.Second type testNetwork struct { t *testing.T out chan GrandpaMessage - finalized chan GrandpaMessage + finalised chan GrandpaMessage } func newTestNetwork(t *testing.T) *testNetwork { return &testNetwork{ t: t, out: make(chan GrandpaMessage, 128), - finalized: make(chan GrandpaMessage, 128), + finalised: make(chan GrandpaMessage, 128), } } @@ -57,13 +58,15 @@ func (n *testNetwork) SendMessage(msg NotificationsMessage) { gmsg, err := decodeMessage(cm) require.NoError(n.t, err) - if gmsg.Type() == finalizationType { - n.finalized <- gmsg + if gmsg.Type() == commitType { + n.finalised <- gmsg } else { n.out <- gmsg } } +func (n *testNetwork) SendJustificationRequest(_ peer.ID, _ uint32) {} + func (n *testNetwork) RegisterNotificationsProtocol(sub protocol.ID, messageID byte, handshakeGetter network.HandshakeGetter, @@ -94,11 +97,11 @@ func onSameChain(blockState BlockState, a, b common.Hash) bool { func setupGrandpa(t *testing.T, kp *ed25519.Keypair) (*Service, chan GrandpaMessage, chan GrandpaMessage, chan GrandpaMessage) { st := newTestState(t) - voters := newTestVoters() net := newTestNetwork(t) cfg := &Config{ BlockState: st.Block, + GrandpaState: st.Grandpa, DigestHandler: &mockDigestHandler{}, Voters: voters, Keypair: kp, @@ -109,11 +112,11 @@ func setupGrandpa(t *testing.T, kp *ed25519.Keypair) (*Service, chan GrandpaMess gs, err := NewService(cfg) require.NoError(t, err) - return gs, gs.in, net.out, net.finalized + return gs, gs.in, net.out, net.finalised } func TestGrandpa_BaseCase(t *testing.T) { - // this is a base test case that asserts that all validators finalize the same block if they all see the + // this is a base test case that asserts that all validators finalise the same block if they all see the // same pre-votes and pre-commits, even if their chains are different kr, err := keystore.NewEd25519Keyring() require.NoError(t, err) @@ -137,16 +140,16 @@ func TestGrandpa_BaseCase(t *testing.T) { for _, gs := range gss { precommits[gs.publicKeyBytes()], err = gs.determinePreCommit() require.NoError(t, err) - err = gs.finalize() + err = gs.finalise() require.NoError(t, err) has, err := gs.blockState.HasJustification(gs.head.Hash()) require.NoError(t, err) require.True(t, has) } - finalized := gss[0].head.Hash() + finalised := gss[0].head.Hash() for _, gs := range gss { - require.Equal(t, finalized, gs.head.Hash()) + require.Equal(t, finalised, gs.head.Hash()) } } @@ -155,7 +158,7 @@ func TestGrandpa_DifferentChains(t *testing.T) { t.Skip() } - // this asserts that all validators finalize the same block if they all see the + // this asserts that all validators finalise the same block if they all see the // same pre-votes and pre-commits, even if their chains are different lengths kr, err := keystore.NewEd25519Keyring() require.NoError(t, err) @@ -187,18 +190,18 @@ func TestGrandpa_DifferentChains(t *testing.T) { for _, gs := range gss { precommits[gs.publicKeyBytes()], err = gs.determinePreCommit() require.NoError(t, err) - err = gs.finalize() + err = gs.finalise() require.NoError(t, err) } t.Log(gss[0].blockState.BlocktreeAsString()) - finalized := gss[0].head + finalised := gss[0].head for i, gs := range gss { // TODO: this can be changed to equal once attemptToFinalizeRound is implemented (needs check for >=2/3 precommits) - headOk := onSameChain(gss[0].blockState, finalized.Hash(), gs.head.Hash()) - finalizedOK := onSameChain(gs.blockState, finalized.Hash(), gs.head.Hash()) - require.True(t, headOk || finalizedOK, "node %d did not match: %s", i, gs.blockState.BlocktreeAsString()) + headOk := onSameChain(gss[0].blockState, finalised.Hash(), gs.head.Hash()) + finalisedOK := onSameChain(gs.blockState, finalised.Hash(), gs.head.Hash()) + require.True(t, headOk || finalisedOK, "node %d did not match: %s", i, gs.blockState.BlocktreeAsString()) } } @@ -221,7 +224,7 @@ func cleanup(gs *Service, in, out chan GrandpaMessage, done *bool) { //nolint } func TestPlayGrandpaRound_BaseCase(t *testing.T) { - // this asserts that all validators finalize the same block if they all see the + // this asserts that all validators finalise the same block if they all see the // same pre-votes and pre-commits, even if their chains are different lengths kr, err := keystore.NewEd25519Keyring() require.NoError(t, err) @@ -256,26 +259,26 @@ func TestPlayGrandpaRound_BaseCase(t *testing.T) { wg := sync.WaitGroup{} wg.Add(len(kr.Keys)) - finalized := make([]*FinalizationMessage, len(kr.Keys)) + finalised := make([]*CommitMessage, len(kr.Keys)) for i, fin := range fins { go func(i int, fin <-chan GrandpaMessage) { select { case f := <-fin: - // receive first message, which is finalized block from previous round - if f.(*FinalizationMessage).Round == 0 { + // receive first message, which is finalised block from previous round + if f.(*CommitMessage).Round == 0 { select { case f = <-fin: case <-time.After(testTimeout): - t.Errorf("did not receive finalized block from %d", i) + t.Errorf("did not receive finalised block from %d", i) } } - finalized[i] = f.(*FinalizationMessage) + finalised[i] = f.(*CommitMessage) case <-time.After(testTimeout): - t.Errorf("did not receive finalized block from %d", i) + t.Errorf("did not receive finalised block from %d", i) } wg.Done() }(i, fin) @@ -284,12 +287,14 @@ func TestPlayGrandpaRound_BaseCase(t *testing.T) { wg.Wait() - for _, fb := range finalized { + for _, fb := range finalised { require.NotNil(t, fb) - require.GreaterOrEqual(t, len(fb.Justification), len(kr.Keys)/2) - finalized[0].Justification = []*Justification{} - fb.Justification = []*Justification{} - require.Equal(t, finalized[0], fb) + require.GreaterOrEqual(t, len(fb.Precommits), len(kr.Keys)/2) + finalised[0].Precommits = []*Vote{} + finalised[0].AuthData = []*AuthData{} + fb.Precommits = []*Vote{} + fb.AuthData = []*AuthData{} + require.Equal(t, finalised[0], fb) } } @@ -298,7 +303,7 @@ func TestPlayGrandpaRound_VaryingChain(t *testing.T) { t.Skip() } - // this asserts that all validators finalize the same block if they all see the + // this asserts that all validators finalise the same block if they all see the // same pre-votes and pre-commits, even if their chains are different lengths kr, err := keystore.NewEd25519Keyring() require.NoError(t, err) @@ -354,7 +359,7 @@ func TestPlayGrandpaRound_VaryingChain(t *testing.T) { wg := sync.WaitGroup{} wg.Add(len(kr.Keys)) - finalized := make([]*FinalizationMessage, len(kr.Keys)) + finalised := make([]*CommitMessage, len(kr.Keys)) for i, fin := range fins { @@ -362,18 +367,19 @@ func TestPlayGrandpaRound_VaryingChain(t *testing.T) { select { case f := <-fin: - // receive first message, which is finalized block from previous round - if f.(*FinalizationMessage).Round == 0 { + // receive first message, which is finalised block from previous round + if f.(*CommitMessage).Round == 0 { select { case f = <-fin: case <-time.After(testTimeout): - t.Errorf("did not receive finalized block from %d", i) + t.Errorf("did not receive finalised block from %d", i) } } - finalized[i] = f.(*FinalizationMessage) + finalised[i] = f.(*CommitMessage) + case <-time.After(testTimeout): - t.Errorf("did not receive finalized block from %d", i) + t.Errorf("did not receive finalised block from %d", i) } wg.Done() }(i, fin) @@ -382,12 +388,15 @@ func TestPlayGrandpaRound_VaryingChain(t *testing.T) { wg.Wait() - for _, fb := range finalized { + for _, fb := range finalised { require.NotNil(t, fb) - require.GreaterOrEqual(t, len(fb.Justification), len(kr.Keys)/2) - finalized[0].Justification = []*Justification{} - fb.Justification = []*Justification{} - require.Equal(t, finalized[0], fb) + require.GreaterOrEqual(t, len(fb.Precommits), len(kr.Keys)/2) + require.GreaterOrEqual(t, len(fb.AuthData), len(kr.Keys)/2) + finalised[0].Precommits = []*Vote{} + finalised[0].AuthData = []*AuthData{} + fb.Precommits = []*Vote{} + fb.AuthData = []*AuthData{} + require.Equal(t, finalised[0], fb) } } @@ -396,7 +405,7 @@ func TestPlayGrandpaRound_OneThirdEquivocating(t *testing.T) { t.Skip() } - // this asserts that all validators finalize the same block even if 1/3 of voters equivocate + // this asserts that all validators finalise the same block even if 1/3 of voters equivocate kr, err := keystore.NewEd25519Keyring() require.NoError(t, err) @@ -451,7 +460,7 @@ func TestPlayGrandpaRound_OneThirdEquivocating(t *testing.T) { wg := sync.WaitGroup{} wg.Add(len(kr.Keys)) - finalized := make([]*FinalizationMessage, len(kr.Keys)) + finalised := make([]*CommitMessage, len(kr.Keys)) for i, fin := range fins { @@ -459,18 +468,19 @@ func TestPlayGrandpaRound_OneThirdEquivocating(t *testing.T) { select { case f := <-fin: - // receive first message, which is finalized block from previous round - if f.(*FinalizationMessage).Round == 0 { + // receive first message, which is finalised block from previous round + if f.(*CommitMessage).Round == 0 { + select { case f = <-fin: case <-time.After(testTimeout): - t.Errorf("did not receive finalized block from %d", i) + t.Errorf("did not receive finalised block from %d", i) } } - finalized[i] = f.(*FinalizationMessage) + finalised[i] = f.(*CommitMessage) case <-time.After(testTimeout): - t.Errorf("did not receive finalized block from %d", i) + t.Errorf("did not receive finalised block from %d", i) } wg.Done() }(i, fin) @@ -479,12 +489,15 @@ func TestPlayGrandpaRound_OneThirdEquivocating(t *testing.T) { wg.Wait() - for _, fb := range finalized { + for _, fb := range finalised { require.NotNil(t, fb) - require.GreaterOrEqual(t, len(fb.Justification), len(kr.Keys)/2) - finalized[0].Justification = []*Justification{} - fb.Justification = []*Justification{} - require.Equal(t, finalized[0], fb) + require.GreaterOrEqual(t, len(fb.Precommits), len(kr.Keys)/2) + require.GreaterOrEqual(t, len(fb.AuthData), len(kr.Keys)/2) + finalised[0].Precommits = []*Vote{} + finalised[0].AuthData = []*AuthData{} + fb.Precommits = []*Vote{} + fb.AuthData = []*AuthData{} + require.Equal(t, finalised[0], fb) } } @@ -493,7 +506,7 @@ func TestPlayGrandpaRound_MultipleRounds(t *testing.T) { t.Skip() } - // this asserts that all validators finalize the same block in successive rounds + // this asserts that all validators finalise the same block in successive rounds kr, err := keystore.NewEd25519Keyring() require.NoError(t, err) @@ -532,7 +545,7 @@ func TestPlayGrandpaRound_MultipleRounds(t *testing.T) { wg := sync.WaitGroup{} wg.Add(len(kr.Keys)) - finalized := make([]*FinalizationMessage, len(kr.Keys)) + finalised := make([]*CommitMessage, len(kr.Keys)) for i, fin := range fins { @@ -540,18 +553,18 @@ func TestPlayGrandpaRound_MultipleRounds(t *testing.T) { select { case f := <-fin: - // receive first message, which is finalized block from previous round - if f.(*FinalizationMessage).Round == uint64(j) { + // receive first message, which is finalised block from previous round + if f.(*CommitMessage).Round == uint64(j) { select { case f = <-fin: case <-time.After(testTimeout): - t.Errorf("did not receive finalized block from %d", i) + t.Errorf("did not receive finalised block from %d", i) } } - finalized[i] = f.(*FinalizationMessage) + finalised[i] = f.(*CommitMessage) case <-time.After(testTimeout): - t.Errorf("did not receive finalized block from %d", i) + t.Errorf("did not receive finalised block from %d", i) } wg.Done() }(i, fin) @@ -561,13 +574,16 @@ func TestPlayGrandpaRound_MultipleRounds(t *testing.T) { wg.Wait() head := gss[0].blockState.(*state.BlockState).BestBlockHash() - for _, fb := range finalized { + for _, fb := range finalised { require.NotNil(t, fb) require.Equal(t, head, fb.Vote.hash) - require.GreaterOrEqual(t, len(fb.Justification), len(kr.Keys)/2) - finalized[0].Justification = []*Justification{} - fb.Justification = []*Justification{} - require.Equal(t, finalized[0], fb) + require.GreaterOrEqual(t, len(fb.Precommits), len(kr.Keys)/2) + require.GreaterOrEqual(t, len(fb.AuthData), len(kr.Keys)/2) + finalised[0].Precommits = []*Vote{} + finalised[0].AuthData = []*AuthData{} + fb.Precommits = []*Vote{} + fb.AuthData = []*AuthData{} + require.Equal(t, finalised[0], fb) } for _, gs := range gss { diff --git a/lib/grandpa/state.go b/lib/grandpa/state.go index fc610f35aa..b145d816e2 100644 --- a/lib/grandpa/state.go +++ b/lib/grandpa/state.go @@ -23,6 +23,7 @@ import ( "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" + "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/protocol" ) @@ -43,21 +44,31 @@ type BlockState interface { BlocktreeAsString() string RegisterImportedChannel(ch chan<- *types.Block) (byte, error) UnregisterImportedChannel(id byte) - RegisterFinalizedChannel(ch chan<- *types.Header) (byte, error) + RegisterFinalizedChannel(ch chan<- *types.FinalisationInfo) (byte, error) UnregisterFinalizedChannel(id byte) SetJustification(hash common.Hash, data []byte) error HasJustification(hash common.Hash) (bool, error) GetJustification(hash common.Hash) ([]byte, error) + GetHashByNumber(num *big.Int) (common.Hash, error) + BestBlockNumber() (*big.Int, error) +} + +// GrandpaState is the interface required by grandpa into the grandpa state +type GrandpaState interface { //nolint + GetCurrentSetID() (uint64, error) + GetAuthorities(setID uint64) ([]*types.GrandpaVoter, error) + GetSetIDByBlockNumber(num *big.Int) (uint64, error) } // DigestHandler is the interface required by GRANDPA for the digest handler -type DigestHandler interface { +type DigestHandler interface { // TODO: remove, use GrandpaState NextGrandpaAuthorityChange() uint64 } // Network is the interface required by GRANDPA for the network type Network interface { SendMessage(msg network.NotificationsMessage) + SendJustificationRequest(to peer.ID, num uint32) RegisterNotificationsProtocol(sub protocol.ID, messageID byte, handshakeGetter network.HandshakeGetter, diff --git a/lib/grandpa/types.go b/lib/grandpa/types.go index 7466a82885..be3798658b 100644 --- a/lib/grandpa/types.go +++ b/lib/grandpa/types.go @@ -28,10 +28,19 @@ import ( "github.com/ChainSafe/gossamer/lib/scale" ) +//nolint +type ( + Voter = types.GrandpaVoter + Voters = types.GrandpaVoters +) + type subround byte -var prevote subround = 0 -var precommit subround = 1 +var ( + prevote subround = 0 + precommit subround = 1 + primaryProposal subround = 2 +) func (s subround) Encode() ([]byte, error) { return []byte{byte(s)}, nil @@ -62,50 +71,6 @@ func (s subround) String() string { return "unknown" } -// Voter represents a GRANDPA voter -type Voter struct { - key *ed25519.PublicKey - id uint64 //nolint:unused -} - -// PublicKeyBytes returns the voter key as PublicKeyBytes -func (v *Voter) PublicKeyBytes() ed25519.PublicKeyBytes { - return v.key.AsBytes() -} - -// String returns a formatted Voter string -func (v *Voter) String() string { - return fmt.Sprintf("[key=0x%s id=%d]", v.PublicKeyBytes(), v.id) -} - -// NewVotersFromAuthorities returns an array of Voters given an array of GrandpaAuthorities -func NewVotersFromAuthorities(ad []*types.Authority) []*Voter { - v := make([]*Voter, len(ad)) - - for i, d := range ad { - if pk, ok := d.Key.(*ed25519.PublicKey); ok { - v[i] = &Voter{ - key: pk, - id: d.Weight, - } - } - } - - return v -} - -// Voters represents []*Voter -type Voters []*Voter - -// String returns a formatted Voters string -func (v Voters) String() string { - str := "" - for _, w := range v { - str = str + w.String() + " " - } - return str -} - // State represents a GRANDPA state type State struct { voters []*Voter // set of voters @@ -128,7 +93,7 @@ func (s *State) pubkeyToVoter(pk *ed25519.PublicKey) (*Voter, error) { id := max for i, v := range s.voters { - if bytes.Equal(pk.Encode(), v.key.Encode()) { + if bytes.Equal(pk.Encode(), v.Key.Encode()) { id = uint64(i) break } @@ -139,8 +104,8 @@ func (s *State) pubkeyToVoter(pk *ed25519.PublicKey) (*Voter, error) { } return &Voter{ - key: pk, - id: id, + Key: pk, + ID: id, }, nil } @@ -153,11 +118,11 @@ func (s *State) threshold() uint64 { // Vote represents a vote for a block with the given hash and number type Vote struct { hash common.Hash - number uint64 + number uint32 } // NewVote returns a new Vote given a block hash and number -func NewVote(hash common.Hash, number uint64) *Vote { +func NewVote(hash common.Hash, number uint32) *Vote { return &Vote{ hash: hash, number: number, @@ -168,7 +133,7 @@ func NewVote(hash common.Hash, number uint64) *Vote { func NewVoteFromHeader(h *types.Header) *Vote { return &Vote{ hash: h.Hash(), - number: uint64(h.Number.Int64()), + number: uint32(h.Number.Int64()), } } @@ -193,8 +158,8 @@ func NewVoteFromHash(hash common.Hash, blockState BlockState) (*Vote, error) { // Encode returns the SCALE encoding of a Vote func (v *Vote) Encode() ([]byte, error) { - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, v.number) + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, v.number) return append(v.hash[:], buf...), nil } @@ -210,7 +175,7 @@ func (v *Vote) Decode(r io.Reader) (*Vote, error) { return nil, err } - v.number, err = common.ReadUint64(r) + v.number, err = common.ReadUint32(r) if err != nil { return nil, err } @@ -223,15 +188,15 @@ func (v *Vote) String() string { return fmt.Sprintf("hash=%s number=%d", v.hash, v.number) } -// Justification represents a justification for a finalized block -type Justification struct { +// SignedPrecommit represents a signed precommit message for a finalised block +type SignedPrecommit struct { Vote *Vote Signature [64]byte AuthorityID ed25519.PublicKeyBytes } // Encode returns the SCALE encoded Justification -func (j *Justification) Encode() ([]byte, error) { +func (j *SignedPrecommit) Encode() ([]byte, error) { enc, err := j.Vote.Encode() if err != nil { return nil, err @@ -243,32 +208,59 @@ func (j *Justification) Encode() ([]byte, error) { } // Decode returns the SCALE decoded Justification -func (j *Justification) Decode(r io.Reader) (*Justification, error) { +func (j *SignedPrecommit) Decode(r io.Reader) (*SignedPrecommit, error) { sd := &scale.Decoder{Reader: r} i, err := sd.Decode(j) - return i.(*Justification), err + if err != nil { + return nil, err + } + + d := i.(*SignedPrecommit) + j.Vote = d.Vote + j.Signature = d.Signature + j.AuthorityID = d.AuthorityID + return j, nil +} + +// Commit contains all the signed precommits for a given block +type Commit struct { + Hash common.Hash + Number uint32 + Precommits []*SignedPrecommit } -// FullJustification represents an array of Justifications, used to respond to catch up requests -type FullJustification []*Justification +// Justification represents a finality justification for a block +type Justification struct { + Round uint64 + Commit *Commit +} -func newFullJustification(j []*Justification) FullJustification { - return FullJustification(j) +func newJustification(round uint64, hash common.Hash, number uint32, j []*SignedPrecommit) *Justification { + return &Justification{ + Round: round, + Commit: &Commit{ + Hash: hash, + Number: number, + Precommits: j, + }, + } } // Encode returns the SCALE encoding of a FullJustification -func (j FullJustification) Encode() ([]byte, error) { +func (j *Justification) Encode() ([]byte, error) { return scale.Encode(j) } // Decode returns a SCALE decoded FullJustification -func (j FullJustification) Decode(r io.Reader) (FullJustification, error) { +func (j *Justification) Decode(r io.Reader) error { sd := &scale.Decoder{Reader: r} - i, err := sd.Decode([]*Justification{}) + i, err := sd.Decode(&Justification{Commit: &Commit{}}) if err != nil { - return FullJustification{}, err + return err } - j = FullJustification(i.([]*Justification)) - return j, nil + dec := i.(*Justification) + j.Round = dec.Round + j.Commit = dec.Commit + return nil } diff --git a/lib/grandpa/types_test.go b/lib/grandpa/types_test.go index 6ec49d61e7..c88d68b866 100644 --- a/lib/grandpa/types_test.go +++ b/lib/grandpa/types_test.go @@ -20,6 +20,7 @@ import ( "bytes" "testing" + "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/ed25519" "github.com/ChainSafe/gossamer/lib/keystore" "github.com/ChainSafe/gossamer/lib/scale" @@ -28,7 +29,6 @@ import ( ) func TestPubkeyToVoter(t *testing.T) { - voters := newTestVoters() kr, err := keystore.NewEd25519Keyring() require.NoError(t, err) @@ -38,8 +38,8 @@ func TestPubkeyToVoter(t *testing.T) { require.Equal(t, voters[0], voter) } -func TestJustificationEncoding(t *testing.T) { - just := &Justification{ +func TestSignedPrecommitEncoding(t *testing.T) { + just := &SignedPrecommit{ Vote: testVote, Signature: testSignature, AuthorityID: testAuthorityID, @@ -50,13 +50,14 @@ func TestJustificationEncoding(t *testing.T) { rw := &bytes.Buffer{} rw.Write(enc) - dec, err := new(Justification).Decode(rw) + dec := new(SignedPrecommit) + _, err = dec.Decode(rw) require.NoError(t, err) require.Equal(t, just, dec) } -func TestJustificationArrayEncoding(t *testing.T) { - just := []*Justification{ +func TestSignedPrecommitArrayEncoding(t *testing.T) { + just := []*SignedPrecommit{ { Vote: testVote, Signature: testSignature, @@ -67,25 +68,46 @@ func TestJustificationArrayEncoding(t *testing.T) { enc, err := scale.Encode(just) require.NoError(t, err) - dec, err := scale.Decode(enc, make([]*Justification, 1)) + dec, err := scale.Decode(enc, make([]*SignedPrecommit, 1)) require.NoError(t, err) - require.Equal(t, just, dec.([]*Justification)) + require.Equal(t, just, dec.([]*SignedPrecommit)) } -func TestFullJustification(t *testing.T) { - just := &Justification{ +func TestJustification(t *testing.T) { + just := &SignedPrecommit{ Vote: testVote, Signature: testSignature, AuthorityID: testAuthorityID, } - fj := FullJustification([]*Justification{just}) + fj := &Justification{ + Round: 99, + Commit: &Commit{ + Precommits: []*SignedPrecommit{just}, + }, + } enc, err := fj.Encode() require.NoError(t, err) rw := &bytes.Buffer{} rw.Write(enc) - dec, err := FullJustification{}.Decode(rw) + dec := &Justification{} + err = dec.Decode(rw) require.NoError(t, err) require.Equal(t, fj, dec) } + +func TestJustification_Decode(t *testing.T) { + // data received from network + data := common.MustHexToBytes("0x3b1b0000000000002a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46001d032a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d5be226e7e7b6b54eb6c830ab47d4fa29bc228f46f176be79bd9b687d01ad7c4441f0c5b7f489462f29ba1641672519a2bbfd9162fb11d646bf1990b0c858e0e026905dab6c71c2a664e9ca8e4f066bdee9265ec45b7885ab14a797ffe1bee362a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c393865ec446b236036e3c846577c930568ea67f1cfe34048cb360d3892d9da61c4b1bd7ee31df96907662e10dae18646ceec91181c5e3dd97605b15f0bfc20d02aabb29f640813f718f1e7495f42415f742457517d536ba5de50990d182df252a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46000d68aa7ce3c3902cca498cf6c0051d5b0f901f22092a427faa23227a19bc55eda954191acba6b5c82ec03543633facf3f84176ba3b860428d0e160b5a3a2db0802c70dcaf367b35740713c0e6761d88a2e26f9762e9715fac8bbeabedd8c5eca2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600756e9f4187f078001a7368eedc194df7ac7b1cae92827dae36d345061afa3d49df2041a41387501c5fd1b282707303aa3a7c820cce1b44c081c0051e7fc229070339088379600f5bd507277eb894aba34d2a8f27ce1964f41e60ef2a4142dc6e2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600ea73d78ec38c271aebb2c9f7ebebb5d3a99edf327bbf4d03b5c245c08d0c2cceaf0030ecb5e04152255f32e4edfbee970f3ab05607f6d8b718826e559bc18b0e03d105a30087d96b5f0684f6ded76f826b01dab61e4136e1d851a24f0088b5ed2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600681cd145a0375e5f72a737bbf92bc57c1e2b9429cf8b66bda13d0096e1898385ada09ccad93971530349592a430aa61331efb46c04a1bd9af7c34d5be9f2d30b044a5968644cf9f9b5d1c7f657a65343af076730a89b07b4995a494aaa0c967a2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600620c51368e57db2cbf7e1edee0a746dc950c6da129ae5dbf81f3171fbba241f564b847fe376bf7a6f62d9bcefeba69d7d8b23000c40b7c9ec4f2868e6d748408049603b355867a2d3a1978a771b71b2bee4e8ae1f344d9ee034f47b01a48f89b2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46000dec8ecfc90b708446f112ad2730149b124d1d4b891edcc7f192ad4f6634fb1529471e902b590049ee6049361537752f8b4780e1900f5134f101039d617a710e069a481209b07b48c1d548d4af68b7584f95b32eb374be0682b342cd64d3406c2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46008f6792c7ae77c3dbde17b000c86743bccd9f2247fb323044b12378a0b060f122674fc796d910fb2eaaf7512a5858ce467da7409a2e5fb14edc077c939efcc802088f8736e1cf2ea3d102f0a96ccf51222b8aa1f93d8e42947892f4395cb5477c2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460043286aff05190884dc7be3f5b4e42728a1eb766c73cc9745f08322ed672ad944503ec837a379efef89b4c78a5aac7ffe8bd26e649eac482f5324280e259bcc030a790c3ba374fdcf1a1d457adcaa8fc01bb46eba8cae4e1940825364fe2d7ce02a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460025437e042850b2648ecd98937694a76d041c65b471a81344c55d9aae0650459daaa5f5e734ddf02ae030178ee8714c5f6cc241f8148eafeff450424fe62548040cd72d0e6daf0275f2eec96f6129243e01e255ad6b104ed500e614077d0710982a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600fecfa0a948e6cdb687849ea41dc46083ed8562ca4657d751f6c303ae0e35dba38e5c1550cfdd71011239872139770b8151eb4a04df948f666d43811009f12f0b0d49982b323ded139008e150fe97b0e85213a92b411c68f993aaa43ea62a7b282a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460063dc716d5616a3125fabc437ccc98b3e38bb2730a2564a11edf528552245f20657199cbc982288e2ff3127497a8a725852c35352ee3c0a2109d2e603c33c37000e1b3f7368c230b609c745aa7c2119b60218da3ade7e6a0dace3e337703c51992a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600a07873b9fc9d6b71bba7749a8886a13c8d5f4c2b66907bcdf5fddbd88df3c6ece1d601799af6ffb7fb53cece0278df81bd34676ecd20a7494515eea9bb61af020ef0093d298f1adee0947ea81400adf0c27bd2e5c10b760d2c8be3b784e1fa002a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46006bc6c891010819529a51deedc4a7033de5e85aede0eb4103118110239cbfdad07327aeafc68acfa3b11872770653290f50c09658d416cb2534f0794ddc4b1f0c10d20c8996f5448cff82ded879bacf7534e4d499ec65f040024bd7c2402eba942a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460090c37f3814f7b66ad1008f43715aa91e3f7713285d44c9b82b88d7125a5a5be38f9c38675891de2455b276ea008321b742f420baa7b2ed8519918edbb8b9b90311addb386a5088eaafbe4c93e265b8c1459a8504f0d31a3799836224c6077d532a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460085114883f06b533bc6494706326c7191f224f0c086d0a21c8534269c3dd2d7a10c1beaffac80e05250d0c230aa3da0ebfbbd4fe47a8323766b09a8e3ba797b04124cb8ef23a2aa6f5d748070a84465b1cf4df73db94257d96c63df4b5ea8b80c2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460037c148f4242ac604913f055679371442e9fdfd7f989ae9d73dbd5cd439a31f941e5c4a63f315a3b4c8faefe537dd51fed26ed948d5cc0110c8c89775e5a2a70914b432d81cead21854d70b19f9691b3d2a8ca7862b64a71f4f3172387078c4642a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d3acc5151d7a54944aa00350c835fed6f9dfb4f12b8124903d301a959bd63f087d19016a6f18b732d58d37f93ccdc5a9a27511054f54436dc8bf8a12e1a06c03155763a153e0c02b2eef1d8a9cd8a50a9eaac9d3af7ad1e559b1e2320b521e1c2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46007fd97784bffbeb89e3fc4053e6667d73a24a15e5a864f8264d544f93b8d4938a5e57b079247499e1d0de9cfe038754c930d1427f491ba0f8a956c56b6abdc409172fa913b2ba5833a78ed51e13692e9d1045e7516b5ac738e6745c217f49fc1d2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46000b091d7bd11344b81890eaa9eb51f38fef522d5b38e05449ab04cf720c389c9a9669d8cf69411500ba2a78a4f7783d75c424b0ca60e5a564728a7908b90dec0918a1a6e0e81ae2cf403c328d1195eb73e9b78091bf1e33be4dc1e29736eaa0382a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600a3aeb6a64836a35659a253533ca38c5e8f5dec712d764bbbfaaf079f671c4487345d232be14aead855a06873549f712ded1127217560fb315be740e2071812061c67ae7a3c999b95732bdfe95fa8670aac497a1027aba327a1438bc097fa700a2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46009ed7a0e3b08dafbdfa66248a9abe9f6bdaad694b2c8b1a85c2fb8ba387895d11890d482eea4078d1049373c9a7d95123093e2e63d4472c186c18ae7f47c310071d5ca234f8eeb85fedd882230378855b4e7390a78a802f0be7230156a8ce624c2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600a5b924123457be20ebb04eb12a248f693620331120a053e1837a343460769d6edb3316966f5784ed15e5e8f6105f8af34ad0bd6a8f11aef6f79eeec6e658090b1dc6da633d00a38d0d0b44056980bafbd624f2e1067027cd2b09267acfb70e9c2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600ed204b31f36a1c06fb6b69e206525afbb5449064e39aa477602ea878ac65a4e81e24fc35c1a8ba764ed1d75200a2c5681a43da66a8096349365a9302706d1d041e0695d7beee6c4a67c9a1a75f89f3f858a50a09ce1b96a08e685a49e20e7e442a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600124362e041e4e25522fef30f7267b7d52338dabb1a2bfa21d163b06df2261cf465067921371fa5be83279004e174d5cf5bc79102e2208139bf62892c64521e0b20d4e595c50bb9558dad9be6a8784492bbbfc754b9c5fae17edf4f8a84e8b4712a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46002d19070e68ddfa34d42b207919fcdf1583ffa6458a82e2680ee5c880ff145e4b6a18b00c1c683c511e196d7b24cf4abd508f6522ee3e8eb8b335a42a0f7dfc0121699d6780c6aa58e433f27d36b7f1ea5afd4fa404d8254bd62ed5b17c3f75672a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46002bd9ee43aaa4cd4691315b505af64f05fdb9d70e1fe5f8c7d9e536bcf723b67035239765e727d4e1146c786c97b1e900cd33d8c25440d9e38907a6d0af3d0602221a227686d900f6be260a84e81bab79adcac88ee2ceb1f4efe83e0be86fc88a2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46001b561a74a01dd3e3957d7930fae671d9a107a0e36b1ab1fe38a2acecf24ce281f0270b516c0d73ca05c1e7e7b6b5870e973ed04f8b4b2d8f486731e2871c5d012275aebac33745b7c0f47957b0f9c1dab14d6dc5cbece544e26830248b9b638d2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600f394746cbe78e56e004009e2f374bc8fbca67b5228f33d328ba173673bc7ebdd31669b9272de648f7d19b7b79ea68937a6bcc395695c03c8afd36647bf20f70422b1625c123c72fa70ee375da98f15cf1326b254ef833a27ffaccbbe81e993d42a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460043359e329c56b481de900abf705e49957358bd8261d5215ac63a96b9098eb07d68b12aa67f2e4559fe6fa838233795e32e9f08b1680f89fa7db4da2cbf40010d23192a82e612c4b047a6efe2c0b20d6b8300a81639a6d1578bb94a66a721649f2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600ef4e7891ef4f0703d2ad1833ba02e1ebc743fd656dc4e0d5b2e9346694f142ab262a9143fda3c3a199e8f57820192a49648a0872e7b1de04bbc101bcbfd0ab0524796621f90c2d9f2d12a962e14f9852757f89de7b1f66c22506321ef38266bf2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d65939fa1869e5e2e2ed5800cac361424eb5449365fee6914719dcb768267f37be9efb0a21b1720308dd5961e3ad43b00eae969a117b8e1121ba36362f0b220225d5c2769f768059b45c6843d37949ab7866678eab000a5e6340e30d1dba306e2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600aa0831e40a2cbc1ba04377e7cf9a0c6f5fb037c9516e4e9cd4d6ad0564953b99c8ab329e5f667f7fdaed70e9b8d3b4123e6ed9249b4e999b41dbb82ef04ce50d25ef09e15fe616e090335b55cf729832e2c9f562428fcb00fd3e3bed2dde843a2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46001396a84de1c3a492a93b29d74c536ea8231bd01657a1621a3af64b5993383781947c771948a7166dd9d21b4c34a36208dd102d77f3f6c99ef85037ccd4fb5b09271f0ce7fd9e0460b2b5f68afb847a80ea0821bf168dceeca396a1fe35754af52a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600804dd690fd66f6c15a74a4c1c8be43b57f0eba4d45d75f66777a5204a506c6344e189d33c41ee6085420683807345324cf24602b2eac5a307603a1dfeb71040d27a748182c49d7c4ec091c9589da5c9c35fa99ac8ffa4d613129dd4d08e9f1122a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c91a2c346304d33ac0121707248a0560bb32b4446fb0f2d29c6c4f27f4073bf6455e662af07a4c693f199a1b850fa794d45d0640d33883e08681b525f99bfd0627e96e8217ce52d55d5cfe97580a03b8f9ffd41e72ad863300db45749088faed2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d91892fba8d2b41a62f81cc5dc225fd193aa215c9941dfacb197f7b11262609953c57726361b6c1458d500cdc90eed86c0d9ae4cb98ec1457a438af9e706d40e297b86aac8a8843ef944f9633404cf307ae264f031f6dc803bd93c7ea69889742a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46002c85b6e9e5c28ccc33e4d59857cdc0afd0e4f4d1c2218dd018123af7d96ec3b08e49ca35a5231431e2663db3a178dbbe8a1a4f4c0005014770c0cc85876a88062ae745fd0181edf42f9313bef7ddc5d62ae8289212df03f8f73e210e0ed907562a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600e4ea8f7d9584724897151d5a1412a73e42c96aecbd84020e52770c1767e8f807befe8dd28f7fc8cb12addd1f1b72d6c58e418702111cd708ce7da9f61574d60d2ba3f6bd3084ac7d9448dd341d4c7ca8c991b659c85f27cc68e80fe7cbd73e422a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46008c5d3fb36a8da0e1770f923bfbac75470ec6408c06f0cb1be5cdb5893a2c09eb1e370073aac480ee2ec8b9c3a9bea659869a4328e8e441851bfb846610ddaa072c3ce4d7102f4236e0df2e8f5797f6dc3d2e6f0e57d373c9a4b89b21d4d228682a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600b6e743e03f526851e054ff4766ad6db181f83fbe2370a51431fb8c310f1cbf3788d99480e9fffd8164d31cad3db297d9b26c34523aa25d5c7f83e79b6c02390c2dad5b2212ee688f2eeb9ca1fb6a90574f006dc1c6680ac3a8523363a248940b2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460020716a5266a471356b3780eeaf4ad7dc37f50352ece4e1416bbe47b5b2606bb59dbcb1d270aecdfa6eece9d3e8f0862966ea7c7643cf1c05945fc4a56ac65902304d970cf7e07987f8d7a500edaeb1c0973de1ca588512e5d9f268a9ec0874ad2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600ed2d6b378c40cb99f357b0458085a0b0504c1622adf0316050396479b08ef1859c2978fa15b4e53fe911857add9ec253cfc8d6b949abaca4437824f324c1070e360e744fe76d5d471445ce6ad9587af67392b8d960d7715dc0efb43c698465a52a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600013ddc65f819aa4c592cbbd59ab1f4a4bb27c3a4b717e19d409fde187da9e2e7dd6c9c72ecb1bb772c112f3229dc5140c202ab9341d04a14dc2c24a2b4e34a0936ea5662f48dd131b91defa20bde06049edcee2982714f78519fb64450d1b62d2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600087672b184ff663524bfc5d05631172d3eff695a117cf6c8884053e97697b48dc255835918bcc10f915ff4d8fc3470ff5143eb433c00e2dfc8b46db2ff16360e37035dbee1de6bef71e401cd19d0a26cb8b69cf719ae340ea53c9677d6a9aaae2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600a2d8a30c22e7f78ea1a477a0d7971533020276d5d10bbbf530f31ec5d9ae688108c2b4535dcf9204604406f21d104131e88d9c8399151bb67c14a790945ab30439712f967c4e06d284d6da4735cf2823c1f770674b668f7bc896071c3fd41eda2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46007770937b4a1d62bc1df63b1f80162728b198d26492e93765025f9c71dcf992482c9e85ef9100f7fb89931c0d2fd82d063e24f7dcb3d28e549d1d45d8a91f2c013a2f175229490c3169b5942260eb2572198b494e63c984dc364d3f48aaa7bf012a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600f7f152e001649cf1353540ca716715bad9fb6703c1f34b78673baccc8027c3885a6c9660cce85fbf2b2ab696220a2e417964c91912b030a0861ab7ca541daf073d75d8fd47f1074a78cc88248f1f6b9ea6cab42ccb676f94226e3fb5e16249c12a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600dc58eb9647b223730213a6f217010fc35556325166e03edd470b04b9b38f97a98acc1695a71120f06b2beda919889a5c2de43f40f9954894ba861cc2082393003db99a9882de9b7666591c1f9f2d87685a2569bd0c611358d6e144a014c5612d2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460070846e878685dbe30ef68636052fcb92da91ff77ad2f7839edab14ff2f5a28cc27e0c464d175c995647ca3a0cedb2ec732e5732789ad3684ec4f08d94034c3023ded8aac5210831cbcc4d0cf1f96f2c711afc90a6f6f35a4ffb766be1dfbaee32a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600674883cb865697effe33d717b63eb6746fd144e8dce5df79892d453b51fa49cdce9eee1500be36a245e33a9254ce65631b0049218fa305c2e9d5ee1a8855b00b401def7965bb1e8373fbbb6561d5ca51832cbb92b67b0bb08c407d2c19cca96e2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600b4d95db0f1cd96e18f87a0c67e71cf7b8d2facf75d10671bbbc9f7e5642a4c2223100eae4e64a54ca179525b19a763fdf624d78e209f7268d7cf4cf70788200f406c4081abf0f3a151dcaecc6b7ca1a7489cfa1810cb02c8cb249bb67dae09302a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600bda3c89a9f0257273f01c786914fcac144189a20db45d05fb42ded836fe2067362fd1baa2ec4727a7d8d72530474f60a39070cb0f2af31c372a511c55ef2b20a41ef4a31eb7dc1e01f4630604e1908e644d7cdee3f66a60f98d6d59605326f8b2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600a8bec76286b65de5693a2cbc309bc3c301bd6812eb493113449e9d30d39b60ee58a4d918f127d54522e74d6c6167ed99dc3b10e8aa64f3e4e5a2a497e68d9e0542d0c88f0accb5117e31bc057d4f277a38e01f2325f02d0a9f647db09b67cf202a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600a243df7ec7aea8df7944c4474fe09c1a2a006f697e29d15300da8a51ac7258244501ea6c87983d0ec4643c051ee1489087a3017a63c2ac3b7d06aaadb1b88a0943701217f2650ade985ac46e38e548bc1850bf3d895eb6c1ecf42d9e61b788c82a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46005305ec8c29ed3ef51119c80c144ea8714321a31e533c0ed9a1c03a781e4401a279393f06e4e0f0dfd7f21b523a808e0aaf659aa587063d7017dab6ec8df7810543a9a2915f377cd4943f602be38c3bd6ef39e91562c09f18fb672884b4bf8eab2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600fc691469543554a61c6bfbf5d554ebf7cb596ac78579aa1b8d6b0c7e52f8295441a4bb450d8fe1d2579a51986d3f7a6de52a34690afe34fe5699a6bdc1fee103452185859ba92a24c1f836952da8d11c9425c9b35eec979744a345d2276de3ee2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600df5b15104aba6d1d4d70f19eee2d04e22056616f0b54011fc3f15449aadd958124dabcfac319655f72688f14130808f3dd12653e4f948034398589143c24fd0d4532b59911aec8842fd910a35fad7c6210b3c1ac73c7c9799963c635e2b562882a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600b11b2845d75099b35ce971277fb82b0a8f2be06f40d2b759a291a095b3ae6f9655279b922667119308fc3d673e3d6f0063bce32f0b2c5370071214ef58b20106469947dc7cb086bd75216dcff8c8ddbbff5e0f112ba397d71b6bc2980bb6cf002a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c589c74c41ac4ba4e067df4a152dd5f34706f74828bbc1166a7ea1863e411058683a6d3b3c27db8540a8538fc50944dd9b3c8dfaa5f49fce6c5aeed834d8d504469e0a875597428e25f79157e441e933f3fff7d2afee478a9ac6e1903e4d4d1d2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c0a2143bff781fde49d6989df90169579c588192fb669219f815f47117459f9a94ddadb68a90796f95186e7ec641e6973bc165d0d8b2fe651c9e563551bf960b472bdd18c1be48d5e3c40aa093db02e45c4bfe1a5b62a3884fd65be8eca3c6022a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600a5f5af2141ecc0f6a323988e7d8ecdd8b609fb9379c20d108e218ff6c8aa7fc91ae54bde791fdde4d103c43299c033a649e69c8fc933db42b858a3730dc39a0a493c604e7f0a7cd6abf370c10b90eb8ff9d432bab420bdd9fe3da656d5e9b6752a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600a1f8ef2a102fa613697a17e7135f7f7e429107d27a74ab15f0f2a46afed1e3257ce683c09017e25cc7abd447e06891350568aace957b76c5a2a8cbd9c0c8c800494d65e6b674eee86acf5ed70b45b37d7498b8c1d05cc0baa0d6473fdb596bfe2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600eda30d06498bcca078360129b8d46b6d8af3b8b9ddb252a00dced827e3b4c86373e299dae8720f622be9bab21766fc970ad786bc5733ec208b0105910b993c01496826b538d97906e14d417b48598d7d591a483ab5f4c6786cd0b96239c2f4cf2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46009aed65d04eaf5a5c215e4f3cbf9144b83bcaa41c037e42ee788229ce622b28d3b2ec91c3b1b609aff0108bc920e0f6f43266e1b47598bc99ff5410e8af2aff024b35f8352f2aa4dac039d347d37488de305c7ef6e7aed2dbc3526f537efa2a3a2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46000c18a3ea31cd3830207a76b80da87192c61c608490c9c8826d4c860f8b39b2962d258dd43bcac5368fc2d9abb0793c188f6197f0a6d4c3346d2e53c6641086014e319a863a469525687eb7dfbc8924054c35d0599b6ba4dc94702edce01671a22a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46005bdabb13bf58fbbe4553d4af8f8fe9ebd0a4aa99e0cf8271621c9975aefa2a5f35ce96bd2897eef6044c3eee4aa475af4288d660c1d64576a770985b9c74890350811bd4b7dd8a5ed9193d4e2d19248020c3d334249a809fb96234f058ff90fe2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600bb6766e42e7a117d09953e5b2505c6720b77655c557be72254b0d946dd0670b94a18a9529dd8c96404596e9b6161a0a5e94f5bce6c190e733da9d4b5504af60451237bd7fe895abb96631987f0f82a41ef6d56f217dd1ef8aef76e9af4e559862a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600b1042015c7048dba6b6aec2a38267657d0f221d0a862514b91b4714dab337eb41a014683101b18d98bda3cb4860749f10e5bc3a1927946e1c96c3c937532800b51f980443e5fb020d8498ff88e49fc55f1581d7eed1c66bd7653a4f5a1ba18662a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600e17cf09d6059f3140f99a05905c17f597bf7fd8685db6c61fdeb0f91f0d3686a1e218daff23f1964ca2e7f983efad0d0714ed9cb5b23dcc2d428c03cc10f1a00543a164d12ac3f4ce00b176cd41a7af343c56e4ff445a2634d74f3b182c9c7352a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46004c4a8f2ee83d7226b58b1415f57cf93574021a6e828b1852b30faaf97fd75325424bc67043a889d41b4f1010e40e2ce911d02ffb252dbb1f9b15f0936fd9cc0f56682332630f5dd42f160f1e3475d1881b2adab83023216892f538efa1e0e66f2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460095af4f1a0b89f8e6458417dc977f40bd63d40cb16c2411f5305f3a12cb44997ddcca4c14c350296b4d2a30cafb1e272d22b772b0ba1d85ead112b93d82a9980f57a7cd79d0feac648df60b94b7eccde724eb7e473fa2368eb5b88181b030239b2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46005cfb9e1d0f208dcb64abae8d57bf8bbaeb4495e61da051a28d6247ee4eb02dfe0a5bcbe63a8c7869168caaa8da5ccad40e1d0c64b4c802cd9d41133cdd88f30557ae0c85ebaf333a6ec3251f577cab910cce072f238d1e50046322b83bbc0dd52a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460051eb234aec184ded0063029767fcb3e32d46bfba803f5301e932a4ce8ebfaacbfda1480f48991d6259401a5962804e2949be93d9f96ab7303b7367c6dcf46c0f584b81d2c35f01525cb80a02c259565f0becfa7d1651dd4c313358d339f32d472a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460061ad5b7dcd93f5ac2673bc36633d2a612584685c1ce6f1bab11561a0135ec7b3c20e2c88ae6ca855d4109f6f9892c41ed14bc38b549f00f823486a3e81b22f0459522ea548446804370cb91e2c4eccd2599a018202b0e6c04b7643b143707a382a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600278975698f785c3012217bffdfd48882e43033099ad968e849a14a962e2d6e538d1dacac16df19a0831f52daefbd4ba7c6af77cb7b4a343c575382858d1bcb07596004e838b7d90408d46de62b01e13e631575f348bc59f926286aae4c88702a2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d8aa4ba9183770f61c39668721066eabe70fd41d30110134ebcd703a7bb4ea5f0288e28ac58a12dbce2ace9546b0fc28f38c5c467a33cb0de07ce8c5ac14a30b59bba625d971d505a9c7d7c2f3ad69203e69e3e5e1fc1e4905fae7703fa19e032a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46000d40e4163b524c69ddb59641228c09fbcb4bff1ed875161e54bb10bee1ee21e618ff0715d5fc9e9ed1f32dc49e01926b4728c8b2664f2aa535929ba72080c0055b29e3e31323bbb46b3d1c53639ef72499c58806c7f4cb6d2e8343e961bb6e3f2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600bd7478f49bf6d8d87ef834bb9653a3f0bda54dc653fe7890c69882abaf429231fed0b99dc99d2bd4f504c32e2008b135004c60e2045c7560e0c0c21c144925085b379072ec1f3f70b4650979a47b24f9b080c03450f7e9587d92cb599fcf4d6b2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600117c8995b21aab23931a24670b8a885ad04493aa804a80b51572f5d29cf04d6e7d8f9adebc0cfdb1df0e4414a199398fab565d55fa0e32d9f70b828de39fc3005cd0c7b21d4834c5a41a80f7d421c7d0297e42eb409a524d33aa7557df13adaa2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46003d5d559b960da2c238fdb2609b45d125f1940fc8c6e4934d2888080b5f2c7f7da8c34528130afbcdac99c0364b88b5a329c8b742bbcfe91c7e96556b03e70907607b38d0d0b1ce290ae681fd3a7fe09b4299117c3d2f9a0a52c8a3076e268c132a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46008152931c4a5feea2e39f656662f3515ec32256c57aab6ab75c6f0b6f5bd50b508a743ff424f2eb046f1a923486a0566034023cb34413172ecb1e639634c9f00d6088164a2d219f2069d1d51e06c2b4eb70b85be9071afd6e99a92f1232ac645b2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46009226a52a4f9fd97a75c6c808a961744e731656160509b88038c3e7ba2e88f472b2f8fc5f865043e76558fabfd6b28dc0f8aad8a50d5744072a4bfa9a3d78200b62de44b8fae34cdd6d4df47fa7d420bf8513f35e3f63c66c4bf699675edc33e52a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460014c6bb8be7effc1ee39f3cccad774fc1e71212f75ce887f1b4bbd7184de4691c9c1cea72845d9830bb86b24464fdf7286936ed6ab238adaf78773b335057b60c64bffa0b9f160a67beb033610656b80d6c9342797fd983375e27f91ebc7e6e0c2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46009dbeeab2dd1d70ae4993bda61bfede77c378865b264b9620d17da5320141d749bdbc6ae1b2251e68f79ebd445b5b93d8a5b2fc5a2257c307ebea37f1d45c740e65bef6e1f3f6609291199f6e0940c80e559588368e2031086afc1730e0584da22a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46004ff39ee2863945517eefde66d42c4ec96da046097b7efbfde7c3111a6c06ce97f552f33374f43a723709a4b3a0cbf6b226af61515805c74fce2527bbe828860b665da232ade423c7280c6552e6d0ec8782f9bc742c0b030fc08e9dbd3ca5d3862a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600a363544def43bec4ab3cbf8aa4b637198f7cc7bd89c2a402d40c6c10aed6a9f231ec0ae946d8d21c4563ccbfc1e6d6ec73063aeace3397cfe34f6e78e378220666ed84a2c0bff828dcdd1d82463f642666dfeade46c020621490886912bf5e022a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c2ef4b28110bca12507a617a321e2e5e3abedfa0583e4bd9f320b02d5984f62caff3a28a627819a2f2d54f01f80713248a1e5f0bfaaf8b261a3ceee530b89b00677358fc648638cbd854d2a009dd39b8508dd3047d0c5f13bb403a64d053ad032a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46009d76b8b8deb037178f7686154d9228969aa30d5751ad1c2e0097c97ca39f3686a91afba09d50e443f75a8ea25f41f1e63daa1a5e0026e099f9241ba7aa38600068f8fcc2977c5fdb4ee46f234fdcec2f60a22f9c63ac7091b8ded3ec441df9bd2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46000b027d479cd33d8d57c998621de2cb6dfffd320f53d27585d3db9aa44c59bec3ecb584f003e5571e488cd92a3485ef2b740c27861a5d780571dfe10c07307a0e69470b335262280ce95a164f7963af49e41ad6173f8db9e3faaf3ab54a8c50322a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46009bcb275c92fdc035bc43aada4277e97139993270fd0ab3661350adbb09304b03246c37b02500355654ced15912e9431858d85a741d29f5f9ff65ac597fb3700b6d14338abf210592babaddf4a584f2ddc06c0d333ba8a9f5e284c3be59c828512a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46005c4b0f868037118206170a6b1d858ea81047028e10bddbaba24b1b6722ffadeac7a10dfd3cd1c2cdfd196ae3da5da59a0dd858f6ce15aeb884d39e100a6619096d28713cf7af6e13d24dc67d4540225f637f0384e58e2710d9e294e7473edc2f2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600cad6e6d6beae1a9121a104e512d5d9d89f60d0aa1a14e89ad906e5a875cea4d6290956f0f6acfca1f0faa651886556d59ad2d5311f840eda172fdf0fb9ee360b6f7a1a4f9d8c054907f3b6b946a59525199dbcffe9fd3ba89612a5d4b548bede2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460096490d82522f60cceb6f64fe9d7be13cb9d3d5426f34d9c6dc7cc7fa10b51705ad8c0fb4cb8d1df8520af798f4cae294b765f4c918f75f8b80a9110ed4fe2e0674998004d06285d2b8d99a87a9fef38f0fc3109f4919006b8d1831b0dc59b3782a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460012d3db9c5db15a349f1152cfd3edecc7cbcc6b7d6e0170379bbf12e29cbed42c9cebd9bf55832bab8dfe4f761017aa068be5f84101c4d7bccfcb929b34c9ac0d74a47c40733f76b6d0502b67a709fb5e5cbb263b180fa0fba800d9d6758207362a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600e1923130437dc6a03eccefc51c3717c7f95851aa9ce0ad76ba0a22b5dc4dfaefa9cf456eef9a10b9ffa1c33a98cae77d6b16df72b1ec7bd09b53d62dac15d40874a8906dd2888c9e7edea13ba3c42ad4d833f4a5de43d5ce0d9b12c654ab2e872a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600cc61c46e1babadb7667708e3ed03990398d9bf080ba41c1fdf67b4f51dcdb7e991b6918a6aebaf36df53f828f950a7698a21eeb1c640f809281a08424776cc0674db730277b3ea5a14e52fb0ebca202e56876c666aa160f623a4ca411b7ced702a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d9838ce8807f0de4ac9b15d648031b40e5ce436051756b3b4c852fe07c67715cab2b3cc237fb7f72e95f970760d9e36ece5107e07f41b43dafb771a42517d4027589ac4c910bd1e08bdb36f0236c05cd20349d62e07bd34d89afd21efe56b5fd2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c3136be04b86dbd5ea6d9151f51d16b47aab73e059569dc775951b16a71ac4fa890bba142e61bd67470cd1fac95ed42da2924bf82826caf7c70c3469acba540a7a92827270cfa82f16145a44f9bdd9ff5038ef1b665dd520a2e61db9749094962a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600ffac47e7232095f25fdd02a1a5dfdbf3cb9f9c999881194fa50ffb9a6f6cb00db0897043b0d6542770710c727ae5dbbd231eba181ff1c097ade5be59d01389077be4c78d01b0569bdba6cdf39abb6e02f591942133aa1db230033aa48d18fd552a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c550613a2aa3703d95d343186fb5b9ea3e5dbc71389f842cca278d8a83f7735655df193eaa580de1a0f02082385ebf6a1148d93ce610a165135448a83e6a55017cb4b69ff1f333baa130c02d83fb533cb84fc47155a707b7978e8455431f4acf2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600113efdc304c9dedd0baf19c9b54c235fb707a91a7b6730fae65d3860e960d6f6e757109e78422f7d568864de98a9817fcd1d1c435c9a73f13680525da4694b027d32388dbca301421fe038986d97764ed933927b5b74b91ea5320371a31ef12f2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600cd7aa2897e1d6d13af72130d66d65248a25ac7003734db33c3d5c59f344ba6bacdaaba9836f959a6ffd140153f8592a0a30146a9ddeb34b75184c17f8cfae10d7dea0d8ef4c3c5dccd46e8d2af6b2a85e8b36156cb97002a43ebee8229b82bff2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460097ba00cbc15f7d2e6c06f7113afeb36984a2322f18960c5fbb8d96833c6aeba2f7eb6f2b485aa1e470bddfe489527dd782b21d3d70c6a49321202f763bba82007e7ddc9a34f7d5e0acd418405ee795740b3a004c977a9ba4ee103755c05aa76a2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c42a2e034b4fd25cd46ffe153c8ff0f6cb764ba36c36b2dc125ce88d8593e5e8ddd636b246e1852e3fd20dee2a7d5b026b89aad6cc9080b4042cdb101b05ae057f406fa8642d4d3834434465fe6b1815ecf25a5875e0334010bdfab4768fcbdf2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460059b47b5455aa16792cef813dafa11c7e7955b33fcf0c39a67b85a64e620403aac36542cea1ea2d2015bdc71c09b9c60c33d77bd28c35a69a656c83743b5e63087f4786d6fc06a89b65579200e5d1bd63caeab25893891f06597232389866b4152a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46001f8980114bf02490af98132d5fd4e13b65eb990e39e0dff52f9e152754b83a19f750eb812a63a4255d621cbc095e504e54506c3a1399d421a36e77364d40c20f7f57f0e89ff23959dbdffc8f66bc433fe9849f7a9c335e0601dd59812db059862a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600b90eacba52195a184b96f5f19948facb9d5cfff012278471ec8c2ff572e70fb3040cf595588bba91973fc2163e509e0686f56039fb15790ba172aa96ee39d80a818a546c630b881c2161588866965649678cda0f4110cccc3533d0f20e5e41202a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d679230a0317ac247bbbb605e6460aca06ff81c1dc65c5ea9b00d19b3df57265ce079a8be66d5d4047f1a5d81709dec9efa2f675536d3cdfdbe68fd426981d038930b6c9661fb81a752c137a40cc8091480743856cd77ac965f4f6979ca8f30f2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46006869e6d4ca689eed4551201be9c4fd6024e38ec95ca0a6125e156b95f4f18ec4bd6b49e1ff7f5397a6d6ba77c27866d9d089eb660a0e7efdc61a8042dc4b8f028934b9a6c38dd420aefa1e115c839fad7a71147a6efc77ab593485c3b07576ac2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460038c61e1249a82da4c868ae51928fb4d21f980e96a9d32887fb9c4f41d2c6032c03c54e5e45914fa716c1e256af33758a5c7d333fa780c771e0e6f66cb0427b08895238f7e22e70f3f59aab49819d5237d6776eeb65c2d5a925b62ddc8d3f0c772a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600b819531987ad0190e399e2d1c87559917590850870b8f938da3574953e6cfc7e0b6c6fdce91bdb0e0787c3bd2bdf1541eafb86da210ca85ae2bf6fb0a6fc69038c2e1ccb3645a2466ae46d3f3743ed89c0831a74720eb5c4d8490b1e04e5bd4d2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460075c07804bad1b0a7f1211718a6d644cb500bc66416b6004f80903b51b3efc2617d1db5175485a2ede4d7bff6f0e8cf067630f11d62af1561032380150fef6b018d67a0c4b47eb0e087f2cc4c3973b59c9ecb729fc960775d76138ab09799466e2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600bfeb58aeb94d24af2fa1fd94a4b9869d40982e3f21696fb70dc9f03a9b80f5d6389e4f4f11852e43900e505c282c8c8110545d547fd0b18cbd88707e8a5fbc058f02169e09026a2500b440eb43eccec58a6a32d9f5cd9644fcf26f5e0c9692632a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600601161377f0932fc08d9127736fbf0052b763f365aba81899e05c804a1c1fe945d13052e264c550e3524d6430d69700b016113b65b6e6e73b22918fac8cba7008f371abcaa2d51351b62e5b400f52086ef7803c1a1351563c5f405a6ac054fb52a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460082af14bf589502d2963520eca5863fe7498a982e11941fe25b5da9349de59cd245ab8b510a09795ab701ebf008eeaa812c877afcebf54e70d95d45ede4839305908ad289176561ffd2c95f7cc8a2ab5c6a0effd161e3aeb1732140aa501edce12a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c19eeac39b52c944ff50ce5bc10e9726646881c798c6ee4a88de3dda17739fb11742e5e35b5c7222ccc4cf95869e91947b993e668471ab724c2c05fa3a37740b926ca2461d028a766de4efe3d8412bd08adc97c9bb3aa28c07a702ca82ac26a12a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c7eeccb5b57eaa678b568d72fd1459886758029cacf2d33f944d382f8945c56ebfc5adf4322f768116fcce9596469910f7fa214568e2ff581f2e583beb633d0a92c3f7a560edfc7927df1631ff35be263beee400f187c090dd02e4a5801a82102a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460019fe510b724fd4773b7fb1c739f1b0b9313d6ac2f6341f693dd1f898bb10bf53bd09fc398d305c55d598588be1f8b9727fa41a086c4d4f0835aeaf26ed50d30098077019fe554385e0cf6e0f3983b510cb37dea7d42f19ce0eccd86e3e147d352a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46003abdcc324544ba69a1b63ca3db2ac95b35e05d37fa96ae24d6a215db9943ea817a5eae59006784a8e1a6ed3ee554035edc0ca54ab4b04b7f95b48e3548ed36049944dcec71efeb5186d55e2d5acec943bb28d1ee9f84db3a2e2177c3d7ee9b842a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46002a4d4e13d1942c7f953c793731ab5c12a45d0a25826281c2d23eb23a1b74f7c40698bbae7c0f032b74ea44c71cd4f550b9295058f2f1dbd7b6f966b00bea110e998b7545adab70a645393ff37884688b9960c8c04989e8575602625d6cc344f12a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600b70a4ca5731238626ccbe30d9e740afa075f92a8d217fd224301d1415dca12556a89e0a2797bb2098cd0ae62bba8c4ff98e7d2089092e729bb27c039b71c15039af39dbe3ec236efd64ad0c099648dac388e424ddaebb0cba1793b1ffb9a5e2a2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600dc57933c5a5a2ac87b5b72d60ebb537a7bf9bfa2da6e0d45cd322d5408800d7cb1555298dfe74b847e69ffb237754897847afcc0dcafc80bae56209add6f760d9d10dbeb235ce888f8ed0279a2dea721df2f91c3809999301774fc2a5f4272ec2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600b0cdd53676e135af460bc95ec069717e7b78747b262d5140a1a193b855d9fbb159cb499b7fda361fd80b7743d10dd383526b70feb8dbc0b06badc2b911fe22009ec49b1bf8a4c76bff00c0aa80adebe0e9249e37fd977babb7be36028b65b00d2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d5cdd8a7eb544a9479ffbb0bc74072ba782768cb615ebaa2e39bed4784b4fab6a86c4db944a889ed7dda78b2c0b5cabe5f73c423376ac92c379202a6814fce069f107b3106c7f7570af198895d8314c9deb526a2311da652e5cc2c049212c0c92a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460033038e1fbc0dcaf9212abe66e29d423b659cca10395f78a86774250986079aed49f81381e1dd793a19cb7e0120b636ccc098604815be2e9e2950a9fc857a0f02a6c5e1d8748c139b8d98fc3bc12c3e3a37ac8e0b85847090093b99d0a3c4b9212a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c9730b655bbad75eedda6adc2f28a30882d8986f28bbb6d2f0eb8f367ae64a9eeb9ff25365ccc11c67db4763dd53f1eb6345169be20199da23559be607ccfd0da6d3f9c048fe3ba16d0edc38626afc398f2c032a6790690d1de77ecc6a65b42d2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600feafbf9f8ca6a14c50f918d8fb35a0b296e9308ae8b6c4ed7147d2aad912aea13fe7d2639829a5ed64c309c573dba88894374fd480f595bc1d7018ec8b37ae0ea70673aa688422f1775915bc89d6ef922db7f9bef28218f11bb30996a406d9ed2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c21be32aa3713d12b85ad0f157e3877154384e4049d5ac28a98bf19b4e57e1c822e6350c5ca96658ad6f9887440dcf967085e2e4008346b3cb950d46964aad0caae2b6fd7dd2550a95d384a06011091f788cf7218762e59e686ca94fd09422ab2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460050ffac3d9c7f04901bd6dccb14601d1c6a9b0c80587e74eefbf625893e351cf0dc1d4aa08b3030ad5398a461c065186138e50ce56a92d5e46eab62701a036a0aac1daf1e3e59dae210f2b56a2b194bbad084b0c1cf492289b2f03289c635d68b2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600e7b07a35b9abac9905f77d01f690f01cadbd540fa75c8147808dc84f5655b17d20951de43322a74e3e9cafd48351d58ea0aab31695c3df00d5fa1bb33a997d0dacab0d24dd65b0df17ef62e2b6faca0662890de57163550c52416630850787292a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600881b5c1abdf8a9f8ef9d9d9b27bd9eed35f8930e8ec3bad5ba82490c139005cc78b856fb9c49eafa6581988f7fca348ad60a75723db53122c73d25e858284806accc6b239b9c0c62f0fd1c9cd9f90b823b41d955865bac175488ed4e7da408682a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46009edc50b04aedd78e4771634a8333fcf75cb4519e3da0603bd6203475d78a3524dc9d5e6e1b671bf4f75e30f7e1c8bf044bd4577d94375d33123e365c0bb5bb08ae2734ab095cee7ac2ff74777a6d0293ee47303950df193932f2dad4d28526c22a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600b987cb9c38a0a401cf03b2488fc73b7faaa5df95e8384d1153cbb57620d1313cd67a9a610414056b91b02127543d33399252687e6984d867cb1e2ea97d83d801af2d390a8e4d1e464e5abd80e9ef716ffb7e656631bc4778deb7834f4fc20dd32a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600bacd2480f5df4b871ac9d49d2e79c619e51ca03330c6e0eb517496a0f163614d24013930325ef5b76031574977e8747c04c6da8352f8c80a8f05444c18c85105b2bb7fd960d5d7ace8e36e0e4ccb7171e5c3740b2ca8a96d868a09cb5e17e32c2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c73e0d10678f068c32248f2b015a3a0f415eeb404fc6f89e8589ca1e208ff5949f230d73d985f35920285cfa018d43158f0972b6588320c3756f3b9df9b5600fb38176139100069f1ff20156a180e55782796df79f4683020b06f2a5019da6dd2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46003b29752db4fed93ac5c20b344bce075851fa5cbafe02523c694bec4e3b4109fc0e88bdbd943f0d5325d62929e124a484cb24460dc06728d2039510b906928b01b7626f7e84ac48645871ac12e66bc747b9b1f64eff2352537811f9712c127c342a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c68b27c9db20c07fb3801dfae33f92407cf8e3b9c952f494687140d9a7514f0db75c71c2221b0960d74b23f1dfe643ba2f6c6fb429744e38b2f7f2ecd563d608b771cf172f891bcbbd1126e3354cdd4e324cd7202af62f5fcdcfd2ec01ef7d252a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600b272ced92db8b46f6a4a9c0164fd24f66968b23b0dbbf52ebd736b1ce0a4c0512e4715cb0c0e063d580f4b4bea5811950837fc4882aaf5e59aec026ffb0f2300b7c13f1239888cda5c8e6ac9ea10675df17633368906e66a487f91ddd3268ca62a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600e155d70b9ec48021bfb05f7aab92008e03f56a19222718aef3cee767a1882805166889d248053ed9f7b4d0c00b19d1931dac509709fe9cd0a577b71c8ba88c06b9186b95c90d2d00a31e7c68066bd37d73408271762604e3608e2f2c983f83092a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600438f0485ea2ab423a63142417de6065b4243e190cd326656e2ebb895489ea898950cbdc05ce8b99ec21c88a96c3953a2366970bf5a0f36c6d0ca34d121115f02b9404a5fb9f1723fc9fd4c5535dd9ef1d67c5237d9d322854697e9e245064e332a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460057b3433e3f0b68a8787e44e78c1ed5f27cfa4b155d3002193a452bd1351be719f47a4f445d635052945cc21df9e54613c7acec835c90a083e89276e7eefe3206bd81133a8d8a33ad2b084c54536d7a17ef0eda5e810a9994b13c2392fea208a92a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600f53a753bb53e0243dead9f8d43f93525d02a0c69b202ed0cafc6afdd50962cc8a0aaaacac45cbbb936748d75a2554173c94531a5bbe20f9664bcd925bb876007c0988a1b3c91b35a7c3722aba7d3a55f79ae07ac6a46d57b4a49a06eae20333e2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600276a7f7126f933b4412dc4db264ac434134d77c174c3d2bca0fbca788d0327b24a2a1cd8134dd39cb9faed737ba496ce35b62f663955a88f69adf8f9f7061b06c3576342cbf99792896ee5329b04ff2eee2fc2bb6d53c5c03d52c8957ee793fb2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600b5a298d2a97dbd7bf266ccfd61f20f739fd72f1cefac976ebb786e712643de23f8797aa0f160d327ed5210b6bcebb2429dcc673c4b9a39515cbe07dde7d80002c3ff25a1743a9df92af4ccd9a7aed5cbc90f64fd538c3df0a9539128f59652672a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46000b87db26d1a4eeeef1f18331c234e6daeb8aea48568b4424e76478eaf1336e1cce3e3b72d136d638be075d9238c58dca48456e8587ce2ef2c46b1d7bbbadf50ac4025624fdf1544b90a61e85099d9e3ac235396d2d9d37f4921162fa688ba95d2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600b472fc0441578ae962487a27c9903b0e8cef78c4c69521c759f359290290b1fd4ce33692f3726dc51dad68f8ff3cdc88dc6a137d2fa46ea4e10484605c4ed90bc4153342949e45f683cb6df646fe7aca71502b1139c1dfc929afaf73c0a6de822a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c58f80813694cdd8df0fe0586c341a1de7ff7f6dc7c093c2a7333d7e883354a5047ff31079cc00aacfe59aa12207c037e071f710a156247949099120a6afac0fc4374616443e809c1c763459d10d6bf6a2d999855b8d339a27d20d360ed5f1282a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46009bcc51ab91092800c4a9519ac8bd2f37c7a7079916a594cef09dcfff47978517c9b1c276749de14c21d010371cfa8da4d5f477787e0348fc71c7cc5c57322a06c49e3cd2c701bb845963c2870dcca12ed070c3f67ffc20144327a93aa6e896ec2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600c85a800ac1f59f0df206ad6fa5f26e2f208deda2c0af58ed8c6811c525a9fda21a781689446edfdc0b8a07062daffebe442699974b64cf1c31688fad59ce710bc61cb1b626fea15085663f7619a1769ffbe4fc1f8c63e6ece773acfe180806c92a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600fe7489b632220f8c23b1d66fd40bac34513f1eed3c9a343a59dc09d8d360517db73b05c2e33b0da1932a7cff38748645d8bcfe843290a561c63ebd7b98ec1e0cc63062763b78629518608de49f42c802ec2fc22f477e10a1a0023b237675d65d2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460018b4e42adfd1044bce65ff12455dda705b023cc0ffdef4c073aae2180cffac8375975214bb357988f37441c14e1c2913b2c7babed43a0b270920da6970566c08c676bdd3798340c7b35c624f2647f26816ba4b3a0821339f5a0d6c9f2d84ca3a2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460034a88c97937fabdc6e2ec582df7cef3829a2ba8b6e57795bb1832e08d07f12912c546f7452b4d0338ce197a9c0a376202387322f371495ebf891a3cb27551400c6a46dbbd2e87ecd0db7ffcffe24c1a82353bc4371d7c2cfb81e7c832d556fce2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460099d000bd6eb0abc52ab88474dc3e504f6e0b21a00cf196317e471926b93410177ae48f2860c98678769671d81b85ec462596ddb585b6e30f4e71e1a98ef80c01c70ad885a3f3ce3fa4041e1477cd801cbff8daa7c835d4463044fed653b3830b2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46000bef4c68d2fe10b9b34995633ab9dc2c76f43bc2e837252024d85417a12dcddec9cbe44c70891e541f0d67a19e1929de1b0ee5f610808feabb9d9741a0ac7201c8768f257eeaa2a6636fc68a00eb781941c26d2f5179455ac4949a320f958d6f2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46008d426d804b32a7615e207fc5fa9c119c9ace79782226bfb6dab21159f3950f18619faafa59caa9a1cd0e0401698137de6b454e45a74899e74681e7e2ee85ac07cb92ffeace78dbafe6fbf275741b4b38657fb81590712aa0bca7877931f6ad392a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46008319290259d0047497e2825a6ce9166f285a7333ade0a970ad928fd1b259feea5c927aa2c742e7fc7b542c579e65979837a77437cf28370a44feef9b8fdb5003cbfefdf389bc341a7e17139f61146f9fb3a9d7bb84fe93c1f771eafd4d4d9d462a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d54d20eb70a33339ff29f8017a48a857c4772ada28e5a88be923045c7ca2194a5683d51472db38d9ee5295fa892963264055b15599226d1ad75aaf550caf9f00cca95e245ef1d3209f6707fec25036c0a1b93ae5613a5c93095e23520395c57b2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46006fcea4a22b6b3b28a52fe41d07f4dfcb20f80b2d3308ed4b5b46ed8e4f982a6968bef3e34ac0657b1807f9f0203d905d8dc8caf4ff0c00fb651489984046510bd060d2b638be7c2ff45b20575a76c4acad1b9264607e1b71286e4e00a03266a22a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46001c0549d687700803d27fa15f67df39a92975721e14adc62d267fc7b676c2bdf4d42c66eca0ca0b6fceaceb73d042a59bd88c040aa870c4636f5ee167776fca04d1c146d2429a5a827660008721c7a880e71f44feaa3dc75524c1a9281bac48cd2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460036ad739a304649cce8e4f18021379959c98bf50e9fa1abfd74a9c3778b35f3ee2c2401a0f51794c4c44fd99d167ec94a52529761c41182fc708a6bc25a32b50ed1deb1ed8d4e155f1b5bbc54610756cf2541f43b7c1776f97d404503878b96002a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46002e1b05424cd82fec9717f23563923e095c71221e13a284261e553617e94c2f4d053c28d9652abefb837ba0d9cedce01d40696794117fca8f2b232ba97d418802d2c1a8caace45c00d73f64f241162f85842c35e557e097fd1749040fedc94df92a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46001b1722198be15749cb2c16b9b0cd42dbb320c3f6ffaa880fa90a8a8db74ad322f69bdc2762796ae752ac4377637aa6e0d757320c78d8a2b3f47a8fd0a1214303d3adef5f9366e150e5a21d8837947fc2378997c7658245fdf0bca95a513f04f92a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d202e0bbf1b01acf647b11dd367019b763c8b37f1a9cf1d1a1fe663f5cf1aa6c3c9f4a6675515e9ff47255a6627e888de370f118abc6be342d624a6a83dd960fd416f2796d696faaeb34d2cdaf1004ede551b62a690f99d69b4ccd5f4a6c248b2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460028caec545fd07031c5e73f34814d463636013534942812417d2317c7a557af8d21c742280eeecf514916493746db6d6ee1e059170ea0123220a6ae57c3c8e102d4b350a3b25c27c9ab9366d130560326fe6bb7f3a820767736b8a061405e153e2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600e5a071915555df4404f783902b68cb3c44e4ae8485d043cec0e615ff63719e6eec2cab48ed142c04efffb4321724cedaa427d27e9bed99344dba219a533b6b09d67211ff6ded7cffc866c81e24a9f54f08cd1df7ab202ae796a01d72cd2d3ab32a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460029a7b0b7a6b4a80c7e89b1cc2bb8635bea1587ad214100da7b11add353cd3e69df0b8dfdf17264eae2753de1ef1d62d267cd7214768bda15f56f9e6b00612902d744af98bbb366011a0fe90c3cee81d21cb301d9bb72d5b40b28da348ecb81672a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d77c1a9378d17bbba63e854c9bfd0abab0b3650a74bd698d7b9feea4f588d5dfa2ce724f24f60d844a4feac5d9b5928f4c537caa9230ef2ac5d1d9953b1b330cd7be80fafe9de0570984b008636808811ccfd82f32639cf6806dfb86c4c4c4c72a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d373bbbb4349a52b00d00a3981a161ed96bd0fa969faf1dbb8b49879f86dd5839b0ab12856f912ebe2c8e8a12d82dc7688729a6449d3bb6b7a6d53f66d82d203d86e516b7564a8ab0b67b503c97976157c05fc2395068b27df5f38a1a9b29b6e2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46005065b2652114169f82c0c1483c19f8fd2f0c72f8c98928fe419ce0b93b2ef1583cb684837009c4c97381eb83c880f62055e9068113d9e07d1b29c991521a5308d8a6314d0690c070c764863c7c25fb8e9d6cd462f9bd56b0b236558f66fd74fa2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460016e5038faa65afb9f179af818be6b65c1a93963effe329ecba6786891ac1d8956e3f209365e45bcaec3879ff27891202e219e8061228f7001446f532b0bc0d09d978b1073d9c88d84ece71a915d577b8b381938d07827447ec20a5bb250496d22a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d57606898de72fb911f85da1bb8fb3743b6edc0e1c61a61bc05d3b6d1d429d5b6d86e38ae7289b40efbb3df89d14f5704f8e61bf2bfe195470f8a8fe9146d009db1067e5c50401a17b926e54a98e103abd64c4b83c87b770a8312ff03bc29be72a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600ef38ecce8f3b7f2253942c612b029e05a6b37f8b1c406f5d01479ec76f956cd040f3a679a9e77a037401185a665f3162531feab7221d828e4bf7662ea876000ddb231b388a5ba18b2568eb1ab9ec84a637a66d6c1286fc7e1c2351250db635c32a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46000d66f46119003f7476f2115373c0b88a81e3044ef83d9955fce655439adcaf22a12b74b5fd18db965e694d5635f4aa2418a263decf6c448b891462432ee7ae05db3aeeb826b44e7808e3865df0da2f479442e2f2fc895a46dffa8d908ec95f512a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460065ba17675d688b0fe1ae25984335d388c61f14a81ceddf6b6585b90a9a59bba0a4070d815018326acd57a88edbef65d60ba226554112f06c1f33b0c6b39b9401dc1aada86981424a634bbc637e58712bc5002a34bb6498487d89d83534b05da02a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46005565dd6e8a390aa49b5491cd0bce4798501a87884ef50ba120262b826ff28ba5741930e988241f4efcc0cd42f7c621033166193fc85b00b792d41135172d780fdcee76cd80cf2218de9e8c4ae1922fe1c26235f9cb479b2950b064025a1d69412a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600f796ea2ed7675e68b13fa0df5a873706de9c3490e006b4b7e330668ca62ba0eb7284b5087a09b9ae01e2ffa3aed1a545a1de99ea38b0bc183d46884315170502e2cc4424464983ada824bbef5aaa8995e80ef1017c15ef3b13902599841637ba2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46006abff33b091bfe069c92ba5bcd25d56b604a45cd1023080614e8e8578f9327c7025a9bb2ba92c4db3e699015ac4f46dacec10245d86b73d41b9e751bc7f6e203e58210689f52468a22c10566f3a2c6f870e61524298a3e781425861d40446b132a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d2a95ca51c964bb35183f2ae626a09a01b7b7e82a046af708da537389d68757b8a5d5c8f230523c5ec089277b7c0e072fcf4d9dca2ecd7bb6a3eec6e95919c01e6ebe15e9e2c9f0fe234241ee50e9c574774807c0d17f8dbc7fdd1802c5c79792a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600254314bb9821d205214651a8c292292089a320ccde7dd38acb21552a855eb0ace84b132f04317b591b84be06679d74df67b46abee0128089679c45e6f7836c00e7210714763e5bb3fab12067d0a55784ae0d70ef14ccf9e243bbfc6d329834102a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600e3b32fd44cc8a2dfd85b2717b54a4d21d93a8f40b0c01be21f81dbc482b383d2a4c32911a93fcc4c8a746dc75aa0925c6b266b397a17f05318e6cebf51661001e81206b483fdcf1fe42145f27d1efb8178a57ee24b196285374037da2a53b3232a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46008b0b5494d43206cf55b468597dbb9dcfc51cb59a5b94a2f2150d5bed423057867958b09d96d7b772d41d2ffb7f68125ea119f0d628f2ad338856a34fbdd8ef05e90f0dd5c3d8e23deb6b82b9d7335855f5887c3b77ac1fe30d9112b22319bc922a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d2a3355b20c0bb0286e1951b985ac51bea4439b0c70ede6f38c026495bdc948e2df2f72d00b1d3d7a2bedfabe8a57d6d4f0af59670b5e75eb713a1ec2cf7d802e9836cd1a96c0010f43161443a790d65ea2afdef21b05bd563ed55c6eecb00662a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460015d31429c5579f409756d92155519c4bd60ca1b095c8f5a5aac66459efa937f48989d599106e5749cb74e8718bb96c49119ec5aa11adb3251a9365456809ae0aeb0af0662538c295c85d7da7b1e0d929f8d887a55bf1a136a144f2f7cf3215802a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460083b2ce5b2c3df07146efcb19e6dd1d8755ca7ef52ba3b05e3a2830374a4f984a4a2946c0905d47f962bf14e51bb0aba4303806247135d579317349e421550505ee8f86db1c1d2b7cd11e962fcfd1a7e847204337c8893f1c1ddb75c6bff3f2bb2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600dd14dbdb4c43afa43d3da56daec9d48f09e356082e2317b505fc1a8520eb491d6fd344bc94479d9497be72d2dbc95d193c6bcaaaec8c5df928b37e8373cfb005eecc027b5c05b7158fa0614fd70df47412f181e808bb78cbb63e29adf14ec91c2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460053e0270ad201d8ccbd4253715af906a56e2f44c6d78f9344da12e465a5c755ba569b0f5db093834921b8ed11f4df2dd52497b387efdf3948ff632ae40f7da504f1020dda4f7e6af02b444bfcc70a0a18bd48042ad72b1955778bf4e56086a33f2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600ff32188333a59ee7d640f5897f5415cdf08e97742f82d340687c63b71c0ba9ae42599d522a40233b772d6147edd01a7d5bc865714f95ff979c03e61b9d1a6a03f2a930984c2799a98d298521128eff1add9706c9e57e848d54de41f6b8dac4b42a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460039c0604b3ad216bbdfc17d641114a0d25e0865ff21cc945574cda12f7c41f7e19f3b65550b410b017e0e8f58c94d9abd29f01b7036193305fbecd341e7156509f5054136e1ea0e3956293c422a147eaa4950cfcf0ad9596dc6e6a287bdfb06602a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460029e0bf534f510a4cd43b5011d63057da67b36e2b03cf6087b3bb8ec80fdbb9e63b5e361313e2dee669d1d5644696e5c8b4e71548ec9c6daf4c85873318d9b708f8d4441b4ef1c2ee50272e1767ac4f773a249eb663c7e37bc075951f3b52ede52a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600753c2d7a7193f908ed808903b7cfbc3e2f5a8d9fb380c054f887c0fe21b206d81406d25bb1d0ef6cb6a0feaf015f9e96c0d703e9bc5a0e956feff40fdf0f0d0ef9887f6f0a8673aad0c10d1bf6ae3ae5f0089c8f05d54e180e16030949a8f7782a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d0c3538d97ce8594f8a113d01728ac4b13549646557385c6b5263f4a67a0ba96e2e5f0ee3ac9c85fff368b4013eb43900be5e672375a3f3860298342a0325f05fa786562ceec7e61851571e8eaae35c7f0f4b2ada3ddb4d577c66ab50a1d6ceb2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600eb8636c254b4cec68eea4ef00a3ae43b6dba9c8f85958b514042d0764f66bc9b22e4205ff4d8d4427395df3aeb15fc840356a9eabacee3a23c8cd6aff517db05faf968e9e1468d58947b9217ed7aecf19ce457036a2b46e47c44e5a1d67e5b1f2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600a7038a3eb4b56c7414816925b26d1756706f2256596c99181ae8746a40cc32c5192c93a13997c9a11f31fa6d6263164e0c06034f0790400f1730c250f4054205fbcc3ce7dbd34cd50cb3563d2b2f62a5ff2bc847e6b6e79faee63257978f9c192a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600762a2c5d7849ffbd8c2560f7e6638ede211a0e59347bb1c7f629445e6af0d38826319366d3597f0773b49515907516f2e1894618f8517297b4624866c28e3e0ffc678679f11bc904720421273689e6826acb42b21e2c4c5c1d7bf532d89668412a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd46006dbd8fd05b77b7556cef50351310ca989db725fedbe17fbdc079043ba50dd86f7fe96f621e1eafb13aaf097fed1d441a90e6462d4919a752441455bf49698a0dfd9e354ed59f35c917c42471588142e715ed2151e89ecaf98bb5d837515d74772a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd460010d0c657b622c3927cb79d4e34d0d1608e0b4f12294277049528b4a3e4add5745ce5cf9048f16607fcba67325cf06c460d2766421210dbcbeff29b1f0dc39402ff0bf39c82ed573d48585448b0fc19ba3f6203806cb6b4a230848892c097a26f2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f702247bd4600d072ab3364ee35ffd547d78aba38e764805b6caeff708dcd806acd06ee2d684cccddbacd4c5d3ba73a6f6e03d3749635ee8eaff19137ec7dc686c46af8f76b07fff437ff18629bf1490e5c9b3ec6f1515d46bb9b2aeaa6e39e36611f2479b50d00") + fj := new(Justification) + rw := &bytes.Buffer{} + rw.Write(data) + + err := fj.Decode(rw) + require.NoError(t, err) + require.Equal(t, uint64(6971), fj.Round) + require.Equal(t, uint32(4635975), fj.Commit.Number) + require.Equal(t, common.MustHexToHash("0x2a82146e771968df054c8036040dea584339df52d8cbac6970d4c22ed59f7022"), fj.Commit.Hash) + require.Equal(t, 199, len(fj.Commit.Precommits)) +} diff --git a/lib/grandpa/vote_message.go b/lib/grandpa/vote_message.go index 9315c35879..95d862dfdc 100644 --- a/lib/grandpa/vote_message.go +++ b/lib/grandpa/vote_message.go @@ -39,22 +39,22 @@ func (s *Service) receiveMessages(cond func() bool) { continue } - s.logger.Trace("received vote message", "msg", msg) + logger.Trace("received vote message", "msg", msg) vm, ok := msg.(*VoteMessage) if !ok { - s.logger.Trace("failed to cast message to VoteMessage") + logger.Trace("failed to cast message to VoteMessage") continue } v, err := s.validateMessage(vm) if err != nil { - s.logger.Trace("failed to validate vote message", "message", vm, "error", err) + logger.Trace("failed to validate vote message", "message", vm, "error", err) continue } - s.logger.Debug("validated vote message", "vote", v, "round", vm.Round, "subround", vm.Stage, "precommits", s.precommits) + logger.Debug("validated vote message", "vote", v, "round", vm.Round, "subround", vm.Message.Stage, "precommits", s.precommits) case <-ctx.Done(): - s.logger.Trace("returning from receiveMessages") + logger.Trace("returning from receiveMessages") return } } @@ -90,7 +90,7 @@ func (s *Service) sendMessage(vote *Vote, stage subround) error { } s.network.SendMessage(cm) - s.logger.Trace("sent VoteMessage", "msg", msg) + logger.Trace("sent VoteMessage", "msg", msg) return nil } @@ -113,6 +113,7 @@ func (s *Service) createVoteMessage(vote *Vote, stage subround, kp crypto.Keypai } sm := &SignedMessage{ + Stage: stage, Hash: vote.hash, Number: vote.number, Signature: ed25519.NewSignatureBytes(sig), @@ -122,7 +123,6 @@ func (s *Service) createVoteMessage(vote *Vote, stage subround, kp crypto.Keypai return &VoteMessage{ Round: s.state.round, SetID: s.state.setID, - Stage: stage, Message: sm, }, nil } @@ -184,27 +184,27 @@ func (s *Service) validateMessage(m *VoteMessage) (*Vote, error) { s.mapLock.Lock() defer s.mapLock.Unlock() - just := &Justification{ + just := &SignedPrecommit{ Vote: vote, Signature: m.Message.Signature, AuthorityID: pk.AsBytes(), } // add justification before checking for equivocation, since equivocatory vote may still be used in justification - if m.Stage == prevote { + if m.Message.Stage == prevote { s.pvJustifications[m.Message.Hash] = append(s.pvJustifications[m.Message.Hash], just) - } else if m.Stage == precommit { + } else if m.Message.Stage == precommit { s.pcJustifications[m.Message.Hash] = append(s.pcJustifications[m.Message.Hash], just) } - equivocated := s.checkForEquivocation(voter, vote, m.Stage) + equivocated := s.checkForEquivocation(voter, vote, m.Message.Stage) if equivocated { return nil, ErrEquivocation } - if m.Stage == prevote { + if m.Message.Stage == prevote { s.prevotes[pk.AsBytes()] = vote - } else if m.Stage == precommit { + } else if m.Message.Stage == precommit { s.precommits[pk.AsBytes()] = vote } @@ -215,7 +215,7 @@ func (s *Service) validateMessage(m *VoteMessage) (*Vote, error) { // it returns true if so, false otherwise. // additionally, if the vote is equivocatory, it updates the service's votes and equivocations. func (s *Service) checkForEquivocation(voter *Voter, vote *Vote, stage subround) bool { - v := voter.key.AsBytes() + v := voter.Key.AsBytes() var eq map[ed25519.PublicKeyBytes][]*Vote var votes map[ed25519.PublicKeyBytes]*Vote @@ -246,7 +246,7 @@ func (s *Service) checkForEquivocation(voter *Voter, vote *Vote, stage subround) } // validateVote checks if the block that is being voted for exists, and that it is a descendant of a -// previously finalized block. +// previously finalised block. func (s *Service) validateVote(v *Vote) error { // check if v.hash corresponds to a valid block has, err := s.blockState.HasHeader(v.hash) @@ -258,7 +258,7 @@ func (s *Service) validateVote(v *Vote) error { return ErrBlockDoesNotExist } - // check if the block is an eventual descendant of a previously finalized block + // check if the block is an eventual descendant of a previously finalised block isDescendant, err := s.blockState.IsDescendantOf(s.head.Hash(), v.hash) if err != nil { return err @@ -273,7 +273,7 @@ func (s *Service) validateVote(v *Vote) error { func validateMessageSignature(pk *ed25519.PublicKey, m *VoteMessage) error { msg, err := scale.Encode(&FullVote{ - Stage: m.Stage, + Stage: m.Message.Stage, Vote: NewVote(m.Message.Hash, m.Message.Number), Round: m.Round, SetID: m.SetID, diff --git a/lib/grandpa/vote_message_test.go b/lib/grandpa/vote_message_test.go index 8af3b1aed9..c5e4d9f75b 100644 --- a/lib/grandpa/vote_message_test.go +++ b/lib/grandpa/vote_message_test.go @@ -30,7 +30,6 @@ import ( func TestCheckForEquivocation_NoEquivocation(t *testing.T) { st := newTestState(t) - voters := newTestVoters() net := newTestNetwork(t) kr, err := keystore.NewEd25519Keyring() @@ -38,6 +37,7 @@ func TestCheckForEquivocation_NoEquivocation(t *testing.T) { cfg := &Config{ BlockState: st.Block, + GrandpaState: st.Grandpa, DigestHandler: &mockDigestHandler{}, Voters: voters, Keypair: kr.Bob().(*ed25519.Keypair), @@ -62,7 +62,6 @@ func TestCheckForEquivocation_NoEquivocation(t *testing.T) { func TestCheckForEquivocation_WithEquivocation(t *testing.T) { st := newTestState(t) - voters := newTestVoters() net := newTestNetwork(t) kr, err := keystore.NewEd25519Keyring() @@ -70,6 +69,7 @@ func TestCheckForEquivocation_WithEquivocation(t *testing.T) { cfg := &Config{ BlockState: st.Block, + GrandpaState: st.Grandpa, DigestHandler: &mockDigestHandler{}, Voters: voters, Keypair: kr.Bob().(*ed25519.Keypair), @@ -89,7 +89,7 @@ func TestCheckForEquivocation_WithEquivocation(t *testing.T) { voter := voters[0] - gs.prevotes[voter.key.AsBytes()] = vote1 + gs.prevotes[voter.Key.AsBytes()] = vote1 vote2, err := NewVoteFromHash(leaves[1], st.Block) require.NoError(t, err) @@ -99,12 +99,11 @@ func TestCheckForEquivocation_WithEquivocation(t *testing.T) { require.Equal(t, 0, len(gs.prevotes)) require.Equal(t, 1, len(gs.pvEquivocations)) - require.Equal(t, 2, len(gs.pvEquivocations[voter.key.AsBytes()])) + require.Equal(t, 2, len(gs.pvEquivocations[voter.Key.AsBytes()])) } func TestCheckForEquivocation_WithExistingEquivocation(t *testing.T) { st := newTestState(t) - voters := newTestVoters() net := newTestNetwork(t) kr, err := keystore.NewEd25519Keyring() @@ -112,6 +111,7 @@ func TestCheckForEquivocation_WithExistingEquivocation(t *testing.T) { cfg := &Config{ BlockState: st.Block, + GrandpaState: st.Grandpa, DigestHandler: &mockDigestHandler{}, Voters: voters, Keypair: kr.Bob().(*ed25519.Keypair), @@ -137,7 +137,7 @@ func TestCheckForEquivocation_WithExistingEquivocation(t *testing.T) { voter := voters[0] - gs.prevotes[voter.key.AsBytes()] = vote + gs.prevotes[voter.Key.AsBytes()] = vote vote2 := NewVoteFromHeader(branches[0]) require.NoError(t, err) @@ -156,12 +156,11 @@ func TestCheckForEquivocation_WithExistingEquivocation(t *testing.T) { require.Equal(t, 0, len(gs.prevotes)) require.Equal(t, 1, len(gs.pvEquivocations)) - require.Equal(t, 3, len(gs.pvEquivocations[voter.key.AsBytes()])) + require.Equal(t, 3, len(gs.pvEquivocations[voter.Key.AsBytes()])) } func TestValidateMessage_Valid(t *testing.T) { st := newTestState(t) - voters := newTestVoters() net := newTestNetwork(t) kr, err := keystore.NewEd25519Keyring() @@ -169,6 +168,7 @@ func TestValidateMessage_Valid(t *testing.T) { cfg := &Config{ BlockState: st.Block, + GrandpaState: st.Grandpa, DigestHandler: &mockDigestHandler{}, Voters: voters, Keypair: kr.Bob().(*ed25519.Keypair), @@ -192,7 +192,6 @@ func TestValidateMessage_Valid(t *testing.T) { func TestValidateMessage_InvalidSignature(t *testing.T) { st := newTestState(t) - voters := newTestVoters() net := newTestNetwork(t) kr, err := keystore.NewEd25519Keyring() @@ -200,6 +199,7 @@ func TestValidateMessage_InvalidSignature(t *testing.T) { cfg := &Config{ BlockState: st.Block, + GrandpaState: st.Grandpa, DigestHandler: &mockDigestHandler{}, Voters: voters, Keypair: kr.Bob().(*ed25519.Keypair), @@ -231,6 +231,7 @@ func TestValidateMessage_SetIDMismatch(t *testing.T) { cfg := &Config{ BlockState: st.Block, + GrandpaState: st.Grandpa, DigestHandler: &mockDigestHandler{}, Keypair: kr.Bob().(*ed25519.Keypair), Network: net, @@ -254,7 +255,6 @@ func TestValidateMessage_SetIDMismatch(t *testing.T) { func TestValidateMessage_Equivocation(t *testing.T) { st := newTestState(t) - voters := newTestVoters() net := newTestNetwork(t) kr, err := keystore.NewEd25519Keyring() @@ -262,6 +262,7 @@ func TestValidateMessage_Equivocation(t *testing.T) { cfg := &Config{ BlockState: st.Block, + GrandpaState: st.Grandpa, DigestHandler: &mockDigestHandler{}, Voters: voters, Keypair: kr.Bob().(*ed25519.Keypair), @@ -287,7 +288,7 @@ func TestValidateMessage_Equivocation(t *testing.T) { voter := voters[0] - gs.prevotes[voter.key.AsBytes()] = vote + gs.prevotes[voter.Key.AsBytes()] = vote msg, err := gs.createVoteMessage(NewVoteFromHeader(branches[0]), prevote, kr.Alice()) require.NoError(t, err) @@ -298,7 +299,6 @@ func TestValidateMessage_Equivocation(t *testing.T) { func TestValidateMessage_BlockDoesNotExist(t *testing.T) { st := newTestState(t) - voters := newTestVoters() net := newTestNetwork(t) kr, err := keystore.NewEd25519Keyring() @@ -306,6 +306,7 @@ func TestValidateMessage_BlockDoesNotExist(t *testing.T) { cfg := &Config{ BlockState: st.Block, + GrandpaState: st.Grandpa, DigestHandler: &mockDigestHandler{}, Voters: voters, Keypair: kr.Bob().(*ed25519.Keypair), @@ -331,7 +332,6 @@ func TestValidateMessage_BlockDoesNotExist(t *testing.T) { func TestValidateMessage_IsNotDescendant(t *testing.T) { st := newTestState(t) - voters := newTestVoters() net := newTestNetwork(t) kr, err := keystore.NewEd25519Keyring() @@ -339,6 +339,7 @@ func TestValidateMessage_IsNotDescendant(t *testing.T) { cfg := &Config{ BlockState: st.Block, + GrandpaState: st.Grandpa, DigestHandler: &mockDigestHandler{}, Voters: voters, Keypair: kr.Bob().(*ed25519.Keypair), diff --git a/lib/keystore/keyring.go b/lib/keystore/keyring.go index cef7b99d50..9835f6e0c0 100644 --- a/lib/keystore/keyring.go +++ b/lib/keystore/keyring.go @@ -66,7 +66,7 @@ type Sr25519Keyring struct { Keys []*sr25519.Keypair } -// NewSr25519Keyring returns an initialized sr25519 Keyring +// NewSr25519Keyring returns an initialised sr25519 Keyring func NewSr25519Keyring() (*Sr25519Keyring, error) { kr := new(Sr25519Keyring) v := reflect.ValueOf(kr).Elem() @@ -152,7 +152,7 @@ type Ed25519Keyring struct { Keys []*ed25519.Keypair } -// NewEd25519Keyring returns an initialized ed25519 Keyring +// NewEd25519Keyring returns an initialised ed25519 Keyring func NewEd25519Keyring() (*Ed25519Keyring, error) { kr := new(Ed25519Keyring) v := reflect.ValueOf(kr).Elem() diff --git a/lib/runtime/constants.go b/lib/runtime/constants.go index fe20618c7e..28aa82a734 100644 --- a/lib/runtime/constants.go +++ b/lib/runtime/constants.go @@ -38,10 +38,11 @@ const ( HOST_API_TEST_RUNTIME_URL = "https://github.com/noot/polkadot-spec/blob/master/test/hostapi_runtime.compact.wasm?raw=true" ) +//nolint var ( // CoreVersion is the runtime API call Core_version CoreVersion = "Core_version" - // CoreInitializeBlock is the runtime API call Core_initialize_block + // CoreInitialiseBlock is the runtime API call Core_initialize_block CoreInitializeBlock = "Core_initialize_block" // CoreExecuteBlock is the runtime API call Core_execute_block CoreExecuteBlock = "Core_execute_block" diff --git a/lib/runtime/extrinsic/unchecked_extrinsic_test.go b/lib/runtime/extrinsic/unchecked_extrinsic_test.go index 665df992b7..f44b023109 100644 --- a/lib/runtime/extrinsic/unchecked_extrinsic_test.go +++ b/lib/runtime/extrinsic/unchecked_extrinsic_test.go @@ -83,7 +83,7 @@ func TestUncheckedExtrinsic_Encode(t *testing.T) { func TestMain(m *testing.M) { k, err := keystore.NewSr25519Keyring() if err != nil { - log.Fatal(fmt.Errorf("error initializing keyring")) + log.Fatal(fmt.Errorf("error initialising keyring")) } kr = *k testTransFunc = createFunction() diff --git a/lib/runtime/life/exports.go b/lib/runtime/life/exports.go index b662307bcc..cc04df8112 100644 --- a/lib/runtime/life/exports.go +++ b/lib/runtime/life/exports.go @@ -85,7 +85,7 @@ func (in *Instance) GrandpaAuthorities() ([]*types.Authority, error) { return types.GrandpaAuthoritiesRawToAuthorities(adr.([]*types.GrandpaAuthoritiesRaw)) } -// InitializeBlock calls runtime API function Core_initialize_block +// InitializeBlock calls runtime API function Core_initialise_block func (in *Instance) InitializeBlock(header *types.Header) error { encodedHeader, err := scale.Encode(header) if err != nil { @@ -106,6 +106,7 @@ func (in *Instance) ApplyExtrinsic(data types.Extrinsic) ([]byte, error) { return in.Exec(runtime.BlockBuilderApplyExtrinsic, data) } +//nolint // FinalizeBlock calls runtime API function BlockBuilder_finalize_block func (in *Instance) FinalizeBlock() (*types.Header, error) { data, err := in.Exec(runtime.BlockBuilderFinalizeBlock, []byte{}) diff --git a/lib/runtime/sig_verifier.go b/lib/runtime/sig_verifier.go index 0e72e7c98a..458d18ee09 100644 --- a/lib/runtime/sig_verifier.go +++ b/lib/runtime/sig_verifier.go @@ -31,7 +31,7 @@ type SignatureVerifier struct { sync.WaitGroup } -// NewSignatureVerifier initializes SignatureVerifier which does background verification of signatures. +// NewSignatureVerifier initialises SignatureVerifier which does background verification of signatures. // Start() is called to start the verification process. // Finish() is called to stop the verification process. // Signatures can be added to the batch using Add(). diff --git a/lib/runtime/storage/trie_test.go b/lib/runtime/storage/trie_test.go index 9e3e5eda5f..c2e22ad62a 100644 --- a/lib/runtime/storage/trie_test.go +++ b/lib/runtime/storage/trie_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/require" ) -// newTestTrieState returns an initialized TrieState +// newTestTrieState returns an initialised TrieState func newTestTrieState(t *testing.T) *TrieState { ts, err := NewTrieState(nil) require.NoError(t, err) diff --git a/lib/runtime/wasmer/exports.go b/lib/runtime/wasmer/exports.go index fa5234f2ff..a5c21699ad 100644 --- a/lib/runtime/wasmer/exports.go +++ b/lib/runtime/wasmer/exports.go @@ -106,7 +106,7 @@ func (in *Instance) GrandpaAuthorities() ([]*types.Authority, error) { return types.GrandpaAuthoritiesRawToAuthorities(adr.([]*types.GrandpaAuthoritiesRaw)) } -// InitializeBlock calls runtime API function Core_initialize_block +// InitializeBlock calls runtime API function Core_initialise_block func (in *Instance) InitializeBlock(header *types.Header) error { encodedHeader, err := scale.Encode(header) if err != nil { @@ -127,6 +127,7 @@ func (in *Instance) ApplyExtrinsic(data types.Extrinsic) ([]byte, error) { return in.exec(runtime.BlockBuilderApplyExtrinsic, data) } +//nolint // FinalizeBlock calls runtime API function BlockBuilder_finalize_block func (in *Instance) FinalizeBlock() (*types.Header, error) { data, err := in.exec(runtime.BlockBuilderFinalizeBlock, []byte{}) diff --git a/lib/runtime/wasmer/exports_test.go b/lib/runtime/wasmer/exports_test.go index 3d6a993084..b00abcd238 100644 --- a/lib/runtime/wasmer/exports_test.go +++ b/lib/runtime/wasmer/exports_test.go @@ -459,7 +459,7 @@ func TestInstance_ApplyExtrinsic_GossamerRuntime(t *testing.T) { require.NoError(t, err) instance.SetContextStorage(parentState) - //initialize block header + //initialise block header parentHash := common.MustHexToHash("0x35a28a7dbaf0ba07d1485b0f3da7757e3880509edc8c31d0850cb6dd6219361d") header, err := types.NewHeader(parentHash, common.Hash{}, common.Hash{}, big.NewInt(1), types.NewEmptyDigest()) require.NoError(t, err) diff --git a/lib/runtime/wasmtime/exports.go b/lib/runtime/wasmtime/exports.go index 4302e7a393..e2574f53c1 100644 --- a/lib/runtime/wasmtime/exports.go +++ b/lib/runtime/wasmtime/exports.go @@ -98,6 +98,7 @@ func (in *Instance) ValidateTransaction(e types.Extrinsic) (*transaction.Validit return v, err } +//nolint // InitializeBlock calls runtime API function Core_initialize_block func (in *Instance) InitializeBlock(header *types.Header) error { encodedHeader, err := scale.Encode(header) @@ -119,6 +120,7 @@ func (in *Instance) ApplyExtrinsic(data types.Extrinsic) ([]byte, error) { return in.exec(runtime.BlockBuilderApplyExtrinsic, data) } +//nolint // FinalizeBlock calls runtime API function BlockBuilder_finalize_block func (in *Instance) FinalizeBlock() (*types.Header, error) { data, err := in.exec(runtime.BlockBuilderFinalizeBlock, []byte{}) diff --git a/lib/scale/decode.go b/lib/scale/decode.go index c8dc52a882..898b78073b 100644 --- a/lib/scale/decode.go +++ b/lib/scale/decode.go @@ -473,7 +473,7 @@ func (sd *Decoder) DecodeSlice(t interface{}) (interface{}, error) { } // DecodeTuple accepts a byte array representing the SCALE encoded tuple and an interface. This interface should be a pointer -// to a struct which the encoded tuple should be marshaled into. If it is a valid encoding for the struct, it returns the +// to a struct which the encoded tuple should be marshalled into. If it is a valid encoding for the struct, it returns the // decoded struct, otherwise error, // Note that we return the same interface that was passed to this function; this is because we are writing directly to the // struct that is passed in, using reflect to get each of the fields. diff --git a/lib/scale/encode.go b/lib/scale/encode.go index fae886bc56..bdfc91046a 100644 --- a/lib/scale/encode.go +++ b/lib/scale/encode.go @@ -100,16 +100,25 @@ func (se *Encoder) EncodeCustom(in interface{}) (int, error) { someType := reflect.TypeOf(in) // TODO: if not a pointer, check if type pointer has Encode method _, ok := someType.MethodByName("Encode") - if ok { - res := reflect.ValueOf(in).MethodByName("Encode").Call([]reflect.Value{}) - val := res[0].Interface() - err := res[1].Interface() - if err != nil { - return 0, err.(error) - } + if !ok { + return 0, fmt.Errorf("cannot call EncodeCustom") + } + + res := reflect.ValueOf(in).MethodByName("Encode").Call([]reflect.Value{}) + if len(res) == 0 { + return 0, fmt.Errorf("method Encode does not have any return values") + } + + val := res[0].Interface() + if len(res) < 2 { return se.Writer.Write(val.([]byte)) } - return 0, fmt.Errorf("cannot call EncodeCustom") + + err := res[1].Interface() + if err != nil { + return 0, err.(error) + } + return se.Writer.Write(val.([]byte)) } // encodeCustomOrEncode tries to use EncodeCustom, if that fails, it reverts to Encode diff --git a/lib/trie/database_test.go b/lib/trie/database_test.go index 42fff69904..4e8fc12aed 100644 --- a/lib/trie/database_test.go +++ b/lib/trie/database_test.go @@ -35,7 +35,7 @@ func newTestDB(t *testing.T) chaindb.Database { InMemory: true, } - // TODO: don't initialize new DB but pass it in + // TODO: don't initialise new DB but pass it in db, err := chaindb.NewBadgerDB(cfg) require.NoError(t, err) return chaindb.NewTable(db, "trie") diff --git a/lib/trie/trie_test.go b/lib/trie/trie_test.go index 28f26caaa9..795ead0d62 100644 --- a/lib/trie/trie_test.go +++ b/lib/trie/trie_test.go @@ -72,14 +72,14 @@ var ( func TestNewEmptyTrie(t *testing.T) { trie := NewEmptyTrie() if trie == nil { - t.Error("did not initialize trie") + t.Error("did not initialise trie") } } func TestNewTrie(t *testing.T) { trie := NewTrie(&leaf{key: []byte{0}, value: []byte{17}}) if trie == nil { - t.Error("did not initialize trie") + t.Error("did not initialise trie") } } diff --git a/scripts/install-lint.sh b/scripts/install-lint.sh index 5efd724247..8f0837bdbf 100755 --- a/scripts/install-lint.sh +++ b/scripts/install-lint.sh @@ -6,7 +6,7 @@ fi if ! command -v golangci-lint &> /dev/null then - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.32.2 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.39.0 fi export PATH=$PATH:$(go env GOPATH)/bin \ No newline at end of file diff --git a/tests/polkadotjs_test/package-lock.json b/tests/polkadotjs_test/package-lock.json index 5c28d80371..dd4cb1c748 100644 --- a/tests/polkadotjs_test/package-lock.json +++ b/tests/polkadotjs_test/package-lock.json @@ -5,164 +5,164 @@ "requires": true, "dependencies": { "@babel/runtime": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.13.8.tgz", - "integrity": "sha512-CwQljpw6qSayc0fRG1soxHAKs1CnQMOChm4mlQP6My0kf9upVGizj/KhlTTgyUnETmHpcUXjaluNAkteRFuafg==", + "version": "7.13.10", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.13.10.tgz", + "integrity": "sha512-4QPkjJq6Ns3V/RgpEahRk+AGfL0eO6RHHtTWoNNr5mO49G6B5+X6d6THgWEAvTrznU5xYpbAlVKRYcsCgh/Akw==", "requires": { "regenerator-runtime": "^0.13.4" } }, "@polkadot/api": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/@polkadot/api/-/api-2.8.1.tgz", - "integrity": "sha512-IvR8aTUzd3759tJVkHEsnpXqdvv72mTkST3poO2/v30GusqTH6KQDWhQy7MhgYjElk9hLIPZRsmA62WVOlSG2Q==", - "requires": { - "@babel/runtime": "^7.12.5", - "@polkadot/api-derive": "2.8.1", - "@polkadot/keyring": "^4.2.1", - "@polkadot/metadata": "2.8.1", - "@polkadot/rpc-core": "2.8.1", - "@polkadot/rpc-provider": "2.8.1", - "@polkadot/types": "2.8.1", - "@polkadot/types-known": "2.8.1", - "@polkadot/util": "^4.2.1", - "@polkadot/util-crypto": "^4.2.1", + "version": "3.11.1", + "resolved": "https://registry.npmjs.org/@polkadot/api/-/api-3.11.1.tgz", + "integrity": "sha512-VqEh2n13ESLxnTUKujUfZ3Spct+lTycNgrX+IWD7/f05GsMwhCZLYtt708K8nqGFH2OKDl8xzwuGCvRN/05U1Q==", + "requires": { + "@babel/runtime": "^7.13.8", + "@polkadot/api-derive": "3.11.1", + "@polkadot/keyring": "^5.9.2", + "@polkadot/metadata": "3.11.1", + "@polkadot/rpc-core": "3.11.1", + "@polkadot/rpc-provider": "3.11.1", + "@polkadot/types": "3.11.1", + "@polkadot/types-known": "3.11.1", + "@polkadot/util": "^5.9.2", + "@polkadot/util-crypto": "^5.9.2", + "@polkadot/x-rxjs": "^5.9.2", "bn.js": "^4.11.9", - "eventemitter3": "^4.0.7", - "rxjs": "^6.6.3" + "eventemitter3": "^4.0.7" } }, "@polkadot/api-derive": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/@polkadot/api-derive/-/api-derive-2.8.1.tgz", - "integrity": "sha512-5oJ7V7yRHHSSnWQ/l3MQQ8+ki/g+v4NbqgI/FTOIUQl7Ja1lPwjKYpqXgP7EGob+pcdFj6VRqywzAOkVA730tw==", - "requires": { - "@babel/runtime": "^7.12.5", - "@polkadot/api": "2.8.1", - "@polkadot/rpc-core": "2.8.1", - "@polkadot/types": "2.8.1", - "@polkadot/util": "^4.2.1", - "@polkadot/util-crypto": "^4.2.1", - "bn.js": "^4.11.9", - "memoizee": "^0.4.14", - "rxjs": "^6.6.3" + "version": "3.11.1", + "resolved": "https://registry.npmjs.org/@polkadot/api-derive/-/api-derive-3.11.1.tgz", + "integrity": "sha512-/v/fNSivgucQrDJvwLU17u8iZ0oQipQzgpofCJGQhRv8OaSv/E9g5EXcHJ1ri/Ozevgu5cPmGs96lLkQaPieAw==", + "requires": { + "@babel/runtime": "^7.13.8", + "@polkadot/api": "3.11.1", + "@polkadot/rpc-core": "3.11.1", + "@polkadot/types": "3.11.1", + "@polkadot/util": "^5.9.2", + "@polkadot/util-crypto": "^5.9.2", + "@polkadot/x-rxjs": "^5.9.2", + "bn.js": "^4.11.9" } }, "@polkadot/keyring": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/@polkadot/keyring/-/keyring-4.2.1.tgz", - "integrity": "sha512-8kH8jXSIA3I2Gn96o7KjGoLBa7fmc2iB/VKOmEEcMCgJR32HyE8YbeXwc/85OQCheQjG4rJA3RxPQ4CsTsjO7w==", + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/@polkadot/keyring/-/keyring-5.9.2.tgz", + "integrity": "sha512-h9AhrzyUmludbmo0ixRFLEyRJvUc7GTl5koSBrG0uv+9Yn0I/7YRgAKn3zKcUVZyvgoLvzZnBFwekGbdFcl9Yg==", "requires": { - "@babel/runtime": "^7.12.5", - "@polkadot/util": "4.2.1", - "@polkadot/util-crypto": "4.2.1" + "@babel/runtime": "^7.13.8", + "@polkadot/util": "5.9.2", + "@polkadot/util-crypto": "5.9.2" } }, "@polkadot/metadata": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/@polkadot/metadata/-/metadata-2.8.1.tgz", - "integrity": "sha512-tJ+hTXsvve1f2pziPGp/nELK+W/xvMsc2xGgoVwccxv1mPFNSny8RPDl7Wgmli0PPztXG6eBnLvWt4FXYnp7vA==", - "requires": { - "@babel/runtime": "^7.12.5", - "@polkadot/types": "2.8.1", - "@polkadot/types-known": "2.8.1", - "@polkadot/util": "^4.2.1", - "@polkadot/util-crypto": "^4.2.1", + "version": "3.11.1", + "resolved": "https://registry.npmjs.org/@polkadot/metadata/-/metadata-3.11.1.tgz", + "integrity": "sha512-Z3KtOTX2kU+vvbRDiGY+qyPpF/4xTpnUipoNGijIGQ/EWWcgrm8sSgPzZQhHCfgIqM+jq3g9GvPMYeQp2Yy3ng==", + "requires": { + "@babel/runtime": "^7.13.8", + "@polkadot/types": "3.11.1", + "@polkadot/types-known": "3.11.1", + "@polkadot/util": "^5.9.2", + "@polkadot/util-crypto": "^5.9.2", "bn.js": "^4.11.9" } }, "@polkadot/networks": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/@polkadot/networks/-/networks-4.2.1.tgz", - "integrity": "sha512-T1tg0V0uG09Vdce2O4KfEcWO3/fZh4VYt0bmJ6iPwC+x6yv939X2BKvuFTDDVNT3fqBpGzWQlwiTXYQ15o9bGA==", + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/@polkadot/networks/-/networks-5.9.2.tgz", + "integrity": "sha512-JQyXJDJTZKQtn8y3HBHWDhiBfijhpiXjVEhY+fKvFcQ82TaKmzhnipYX0EdBoopZbuxpn/BJy6Y1Y/3y85EC+g==", "requires": { - "@babel/runtime": "^7.12.5" + "@babel/runtime": "^7.13.8" } }, "@polkadot/rpc-core": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/@polkadot/rpc-core/-/rpc-core-2.8.1.tgz", - "integrity": "sha512-tMSH2D5wu28UMhLIjWxZ7br0HRC0T7crYu/BSBE8m3GzLJU4mwsygn2VLDVxQOz4DvHvWh+xQzd2QFc/z02SQw==", + "version": "3.11.1", + "resolved": "https://registry.npmjs.org/@polkadot/rpc-core/-/rpc-core-3.11.1.tgz", + "integrity": "sha512-8KTEZ/c2/TrsTOrrqxxNbyjO5P/033R/yTDgwqL0gwmF+ApnH3vB65YfKqaxn+rBWOMQS0jQhF6KZdtXvRcuYg==", "requires": { - "@babel/runtime": "^7.12.5", - "@polkadot/metadata": "2.8.1", - "@polkadot/rpc-provider": "2.8.1", - "@polkadot/types": "2.8.1", - "@polkadot/util": "^4.2.1", - "memoizee": "^0.4.14", - "rxjs": "^6.6.3" + "@babel/runtime": "^7.13.8", + "@polkadot/metadata": "3.11.1", + "@polkadot/rpc-provider": "3.11.1", + "@polkadot/types": "3.11.1", + "@polkadot/util": "^5.9.2", + "@polkadot/x-rxjs": "^5.9.2" } }, "@polkadot/rpc-provider": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/@polkadot/rpc-provider/-/rpc-provider-2.8.1.tgz", - "integrity": "sha512-PtLZcbNMx6+sN04f4T+j3fqJPYG3qsPX+k1DU5FFDUZ3GVRphfyXmswjbwmH9nkCyr04eBGLb1M1EipsqiP8Ig==", - "requires": { - "@babel/runtime": "^7.12.5", - "@polkadot/types": "2.8.1", - "@polkadot/util": "^4.2.1", - "@polkadot/util-crypto": "^4.2.1", - "@polkadot/x-fetch": "^4.2.1", - "@polkadot/x-ws": "^4.2.1", + "version": "3.11.1", + "resolved": "https://registry.npmjs.org/@polkadot/rpc-provider/-/rpc-provider-3.11.1.tgz", + "integrity": "sha512-5OKh3rAg8l10M+tGLCoxhEoH9uEtK0ehJfOHUmdtwmwIk5aBFZ/ZTeiDkPM+/l84PCzYmp2uzO+YNsyMWUoVLw==", + "requires": { + "@babel/runtime": "^7.13.8", + "@polkadot/types": "3.11.1", + "@polkadot/util": "^5.9.2", + "@polkadot/util-crypto": "^5.9.2", + "@polkadot/x-fetch": "^5.9.2", + "@polkadot/x-global": "^5.9.2", + "@polkadot/x-ws": "^5.9.2", "bn.js": "^4.11.9", "eventemitter3": "^4.0.7" } }, "@polkadot/types": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/@polkadot/types/-/types-2.8.1.tgz", - "integrity": "sha512-D7K2wG7xytkMJ0s6W/JwzU4LPiQdFThqmRY+kXdbXrYF1UdiUkiS5MMjUUG9CseRITYUigtF6D6B/PiOv9zupQ==", - "requires": { - "@babel/runtime": "^7.12.5", - "@polkadot/metadata": "2.8.1", - "@polkadot/util": "^4.2.1", - "@polkadot/util-crypto": "^4.2.1", + "version": "3.11.1", + "resolved": "https://registry.npmjs.org/@polkadot/types/-/types-3.11.1.tgz", + "integrity": "sha512-+BWsmveYVkLFx/csvPmU+NhNFhf+0srAt2d0f+7y663nitc/sng1AcEDPbrbXHSQVyPdvI20Mh4Escl4aR+TLw==", + "requires": { + "@babel/runtime": "^7.13.8", + "@polkadot/metadata": "3.11.1", + "@polkadot/util": "^5.9.2", + "@polkadot/util-crypto": "^5.9.2", + "@polkadot/x-rxjs": "^5.9.2", "@types/bn.js": "^4.11.6", - "bn.js": "^4.11.9", - "memoizee": "^0.4.14", - "rxjs": "^6.6.3" + "bn.js": "^4.11.9" } }, "@polkadot/types-known": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/@polkadot/types-known/-/types-known-2.8.1.tgz", - "integrity": "sha512-aTriYfu5l8Fz73Ti8rT0q2DfwMIk4eLTqb3VBDR21XcAbjVxZHc24jdhnnnbc6RxvGOg2ertrN9fTz3xhvtPyg==", - "requires": { - "@babel/runtime": "^7.12.5", - "@polkadot/types": "2.8.1", - "@polkadot/util": "^4.2.1", + "version": "3.11.1", + "resolved": "https://registry.npmjs.org/@polkadot/types-known/-/types-known-3.11.1.tgz", + "integrity": "sha512-ImAxyCdqblmlXaMlgvuXZ6wzZgOYgE40FgWaYRJpFXRGJLDwtcJcpVI+7m/ns5dJ3WujboEMOHVR1HPpquw8Jw==", + "requires": { + "@babel/runtime": "^7.13.8", + "@polkadot/networks": "^5.9.2", + "@polkadot/types": "3.11.1", + "@polkadot/util": "^5.9.2", "bn.js": "^4.11.9" } }, "@polkadot/util": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/@polkadot/util/-/util-4.2.1.tgz", - "integrity": "sha512-eO/IFbSDjqVPPWPnARDFydy2Kt992Th+8ByleTkCRqWk0aNYaseO1pGKNdwrYbLfUR3JlyWqvJ60lITeS+qAfQ==", + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/@polkadot/util/-/util-5.9.2.tgz", + "integrity": "sha512-p225NJusnXeu7i2iAb8HAGWiMOUAnRaIyblIjJ4F89ZFZZ4amyliGxe5gKcyjRgxAJ44WdKyBLl/8L3rNv8hmQ==", "requires": { - "@babel/runtime": "^7.12.5", - "@polkadot/x-textdecoder": "4.2.1", - "@polkadot/x-textencoder": "4.2.1", + "@babel/runtime": "^7.13.8", + "@polkadot/x-textdecoder": "5.9.2", + "@polkadot/x-textencoder": "5.9.2", "@types/bn.js": "^4.11.6", "bn.js": "^4.11.9", "camelcase": "^5.3.1", - "ip-regex": "^4.2.0" + "ip-regex": "^4.3.0" } }, "@polkadot/util-crypto": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/@polkadot/util-crypto/-/util-crypto-4.2.1.tgz", - "integrity": "sha512-U1rCdzBQxVTA854HRpt2d4InDnPCfHD15JiWAwIzjBvq7i59EcTbVSqV02fcwet/KpmT3XYa25xoiff+alzCBA==", - "requires": { - "@babel/runtime": "^7.12.5", - "@polkadot/networks": "4.2.1", - "@polkadot/util": "4.2.1", - "@polkadot/wasm-crypto": "^2.0.1", - "@polkadot/x-randomvalues": "4.2.1", + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/@polkadot/util-crypto/-/util-crypto-5.9.2.tgz", + "integrity": "sha512-d8CW2grI3gWi6d/brmcZQWaMPHqQq5z7VcM74/v8D2KZ+hPYL3B0Jn8zGL1vtgMz2qdpWrZdAe89LBC8BvM9bw==", + "requires": { + "@babel/runtime": "^7.13.8", + "@polkadot/networks": "5.9.2", + "@polkadot/util": "5.9.2", + "@polkadot/wasm-crypto": "^3.2.4", + "@polkadot/x-randomvalues": "5.9.2", "base-x": "^3.0.8", + "base64-js": "^1.5.1", "blakejs": "^1.1.0", "bn.js": "^4.11.9", "create-hash": "^1.2.0", - "elliptic": "^6.5.3", + "elliptic": "^6.5.4", "hash.js": "^1.1.7", "js-sha3": "^0.8.0", "scryptsy": "^2.1.0", @@ -171,52 +171,97 @@ } }, "@polkadot/wasm-crypto": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto/-/wasm-crypto-2.0.1.tgz", - "integrity": "sha512-Vb0q4NToCRHXYJwhLWc4NTy77+n1dtJmkiE1tt8j1pmY4IJ4UL25yBxaS8NCS1LGqofdUYK1wwgrHiq5A78PFA==" + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto/-/wasm-crypto-3.2.4.tgz", + "integrity": "sha512-poeRU91zzZza0ZectT63vBiAqh6DsHCyd3Ogx1U6jsYiRa0yuECMWJx1onvnseDW4tIqsC8vZ/9xHXWwhjTAVg==", + "requires": { + "@babel/runtime": "^7.13.7", + "@polkadot/wasm-crypto-asmjs": "^3.2.4", + "@polkadot/wasm-crypto-wasm": "^3.2.4" + } + }, + "@polkadot/wasm-crypto-asmjs": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-asmjs/-/wasm-crypto-asmjs-3.2.4.tgz", + "integrity": "sha512-fgN26iL+Pbb35OYsDIRHC74Xnwde+A5u3OjEcQ9zJhM391eOTuKsQ2gyC9TLNAKqeYH8pxsa27yjRO71We7FUA==", + "requires": { + "@babel/runtime": "^7.13.7" + } + }, + "@polkadot/wasm-crypto-wasm": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-wasm/-/wasm-crypto-wasm-3.2.4.tgz", + "integrity": "sha512-Q/3IEpoo7vkTzg40GxehRK000A9oBgjbh/uWCNQ8cMqWLYYCfzZy4NIzw8szpxNiSiGfGL0iZlP4ZSx2ZqEe2g==", + "requires": { + "@babel/runtime": "^7.13.7" + } }, "@polkadot/x-fetch": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/@polkadot/x-fetch/-/x-fetch-4.2.1.tgz", - "integrity": "sha512-dfVYvCQQXo2AgoWPi4jQp47eIMjAi6glQQ8Y1OsK4sCqmX7BSkNl9ONUKQuH27oi0BkJ/BL7fwDg55JeB5QrKg==", + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/@polkadot/x-fetch/-/x-fetch-5.9.2.tgz", + "integrity": "sha512-Nx7GfyOmMdqn5EX+wf6PnIwleQX+aGqzdbYhozNLF54IoNFLHLOs6hCYnBlKbmM1WyukMZMjg2YxyZRQWcHKPQ==", "requires": { - "@babel/runtime": "^7.12.5", - "@types/node-fetch": "^2.5.7", + "@babel/runtime": "^7.13.8", + "@polkadot/x-global": "5.9.2", + "@types/node-fetch": "^2.5.8", + "node-fetch": "^2.6.1" + } + }, + "@polkadot/x-global": { + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/@polkadot/x-global/-/x-global-5.9.2.tgz", + "integrity": "sha512-wpY6IAOZMGiJQa8YMm7NeTLi9bwnqqVauR+v7HwyrssnGPuYX8heb6BQLOnnnPh/EK0+M8zNtwRBU48ez0/HOg==", + "requires": { + "@babel/runtime": "^7.13.8", + "@types/node-fetch": "^2.5.8", "node-fetch": "^2.6.1" } }, "@polkadot/x-randomvalues": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/@polkadot/x-randomvalues/-/x-randomvalues-4.2.1.tgz", - "integrity": "sha512-eOfz/KnHYFVl9l0zlhlwomKMzFASgolaQV6uXSN38np+99/+F38wlbOSXFbfZ5H3vmMCt4y/UUTLtoGV/44yLg==", + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/@polkadot/x-randomvalues/-/x-randomvalues-5.9.2.tgz", + "integrity": "sha512-Zv+eXSP3oBImMnB82y05Doo0A96WUFsQDbnLHI3jFHioIg848cL0nndB9TgBwPaFkZ2oiwoHEC8yxqNI6/jkzQ==", "requires": { - "@babel/runtime": "^7.12.5" + "@babel/runtime": "^7.13.8", + "@polkadot/x-global": "5.9.2" + } + }, + "@polkadot/x-rxjs": { + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/@polkadot/x-rxjs/-/x-rxjs-5.9.2.tgz", + "integrity": "sha512-cuF4schclspOfAqEPvbcA3aQ9d3TBy2ORZ8YehxD0ZSHWJNhefHDIUDgS5T3NtPhSKgcEmSlI5TfVfgGFxgVMg==", + "requires": { + "@babel/runtime": "^7.13.8", + "rxjs": "^6.6.6" } }, "@polkadot/x-textdecoder": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/@polkadot/x-textdecoder/-/x-textdecoder-4.2.1.tgz", - "integrity": "sha512-B5t20PryMKr7kdd7q+kmzJPU01l28ZDD06cQ/ZFkybI7avI6PIz/U33ctXxiHOatbBRO6Ez8uzrWd3JmaQ2bGQ==", + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/@polkadot/x-textdecoder/-/x-textdecoder-5.9.2.tgz", + "integrity": "sha512-MCkgITwGY3tG0UleDkBJEoiKGk/YWYwMM5OR6fNo07RymHRtJ8OLJC+Sej9QD05yz6TIhFaaRRYzmtungIcwTw==", "requires": { - "@babel/runtime": "^7.12.5" + "@babel/runtime": "^7.13.8", + "@polkadot/x-global": "5.9.2" } }, "@polkadot/x-textencoder": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/@polkadot/x-textencoder/-/x-textencoder-4.2.1.tgz", - "integrity": "sha512-EHc6RS9kjdP28q6EYlSgHF2MrJCdOTc5EVlqHL7V1UKLh3vD6QaWGYBwbzXNFPXO3RYPO/DKYCu4RxAVSM1OOg==", + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/@polkadot/x-textencoder/-/x-textencoder-5.9.2.tgz", + "integrity": "sha512-IjdLY3xy0nUfps1Bdi0tRxAX7X081YyoiSWExwqUkChdcYGMqMe3T2wqrrt9qBr2IkW8O/tlfYBiZXdII0YCcw==", "requires": { - "@babel/runtime": "^7.12.5" + "@babel/runtime": "^7.13.8", + "@polkadot/x-global": "5.9.2" } }, "@polkadot/x-ws": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/@polkadot/x-ws/-/x-ws-4.2.1.tgz", - "integrity": "sha512-7L1ve2rshBFI/00/0zkX1k0OP/rSD6Tp0Mj/GSg2UvnsmUb2Bb3OpwUJ4aTDr1En6OVGWj9c0fNO0tZR7rtoYA==", + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/@polkadot/x-ws/-/x-ws-5.9.2.tgz", + "integrity": "sha512-6A/cteC0B3hm64/xG6DNG8qGsHAXJgAy9wjcB38qnoJGYl12hysIFjPeHD+V0W/LOl9payW6kpZzhisLlVOZpQ==", "requires": { - "@babel/runtime": "^7.12.5", + "@babel/runtime": "^7.13.8", + "@polkadot/x-global": "5.9.2", "@types/websocket": "^1.0.1", - "websocket": "^1.0.32" + "websocket": "^1.0.33" } }, "@types/bn.js": { @@ -228,23 +273,23 @@ } }, "@types/node": { - "version": "14.14.31", - "resolved": "https://registry.npmjs.org/@types/node/-/node-14.14.31.tgz", - "integrity": "sha512-vFHy/ezP5qI0rFgJ7aQnjDXwAMrG0KqqIH7tQG5PPv3BWBayOPIQNBjVc/P6hhdZfMx51REc6tfDNXHUio893g==" + "version": "14.14.41", + "resolved": "https://registry.npmjs.org/@types/node/-/node-14.14.41.tgz", + "integrity": "sha512-dueRKfaJL4RTtSa7bWeTK1M+VH+Gns73oCgzvYfHZywRCoPSd8EkXBL0mZ9unPTveBn+D9phZBaxuzpwjWkW0g==" }, "@types/node-fetch": { - "version": "2.5.8", - "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.5.8.tgz", - "integrity": "sha512-fbjI6ja0N5ZA8TV53RUqzsKNkl9fv8Oj3T7zxW7FGv1GSH7gwJaNF8dzCjrqKaxKeUpTz4yT1DaJFq/omNpGfw==", + "version": "2.5.10", + "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.5.10.tgz", + "integrity": "sha512-IpkX0AasN44hgEad0gEF/V6EgR5n69VEqPEgnmoM8GsIGro3PowbWs4tR6IhxUTyPLpOn+fiGG6nrQhcmoCuIQ==", "requires": { "@types/node": "*", "form-data": "^3.0.0" } }, "@types/websocket": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@types/websocket/-/websocket-1.0.1.tgz", - "integrity": "sha512-f5WLMpezwVxCLm1xQe/kdPpQIOmL0TXYx2O15VYfYzc7hTIdxiOoOvez+McSIw3b7z/1zGovew9YSL7+h4h7/Q==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@types/websocket/-/websocket-1.0.2.tgz", + "integrity": "sha512-B5m9aq7cbbD/5/jThEr33nUY8WEfVi6A2YKCTOvw5Ldy7mtsOkqRvGjnzy6g7iMMDsgu7xREuCzqATLDLQVKcQ==", "requires": { "@types/node": "*" } @@ -317,6 +362,11 @@ "safe-buffer": "^5.0.1" } }, + "base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" + }, "binary-extensions": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", @@ -604,13 +654,6 @@ "es6-iterator": "~2.0.3", "es6-symbol": "~3.1.3", "next-tick": "~1.0.0" - }, - "dependencies": { - "next-tick": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz", - "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=" - } } }, "es6-iterator": { @@ -632,17 +675,6 @@ "ext": "^1.1.2" } }, - "es6-weak-map": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/es6-weak-map/-/es6-weak-map-2.0.3.tgz", - "integrity": "sha512-p5um32HOTO1kP+w7PRnB+5lQ43Z6muuMuIMffvDN8ZB4GcnjLBV6zGStpbASIMk4DCAvEaamhe2zhyCb/QXXsA==", - "requires": { - "d": "1", - "es5-ext": "^0.10.46", - "es6-iterator": "^2.0.3", - "es6-symbol": "^3.1.1" - } - }, "escalade": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", @@ -655,15 +687,6 @@ "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", "dev": true }, - "event-emitter": { - "version": "0.3.5", - "resolved": "https://registry.npmjs.org/event-emitter/-/event-emitter-0.3.5.tgz", - "integrity": "sha1-34xp7vFkeSPHFXuc6DhAYQsCzDk=", - "requires": { - "d": "1", - "es5-ext": "~0.10.14" - } - }, "eventemitter3": { "version": "4.0.7", "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", @@ -678,9 +701,9 @@ }, "dependencies": { "type": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/type/-/type-2.3.0.tgz", - "integrity": "sha512-rgPIqOdfK/4J9FhiVrZ3cveAjRRo5rsQBAIhnylX874y1DX/kEKSVdLsnuHB6l1KTjHyU01VjiMBHgU2adejyg==" + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/type/-/type-2.5.0.tgz", + "integrity": "sha512-180WMDQaIMm3+7hGXWf12GtdniDEy7nYcyFMKJn/eZz/6tSLXrUN9V0wKSbMjej0I1WHWbpREDEKHtqPQa9NNw==" } } }, @@ -876,11 +899,6 @@ "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", "dev": true }, - "is-promise": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.2.2.tgz", - "integrity": "sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==" - }, "is-typedarray": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", @@ -924,14 +942,6 @@ "chalk": "^4.0.0" } }, - "lru-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/lru-queue/-/lru-queue-0.1.0.tgz", - "integrity": "sha1-Jzi9nw089PhEkMVzbEhpmsYyzaM=", - "requires": { - "es5-ext": "~0.10.2" - } - }, "md5.js": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz", @@ -942,32 +952,17 @@ "safe-buffer": "^5.1.2" } }, - "memoizee": { - "version": "0.4.15", - "resolved": "https://registry.npmjs.org/memoizee/-/memoizee-0.4.15.tgz", - "integrity": "sha512-UBWmJpLZd5STPm7PMUlOw/TSy972M+z8gcyQ5veOnSDRREz/0bmpyTfKt3/51DhEBqCZQn1udM/5flcSPYhkdQ==", - "requires": { - "d": "^1.0.1", - "es5-ext": "^0.10.53", - "es6-weak-map": "^2.0.3", - "event-emitter": "^0.3.5", - "is-promise": "^2.2.2", - "lru-queue": "^0.1.0", - "next-tick": "^1.1.0", - "timers-ext": "^0.1.7" - } - }, "mime-db": { - "version": "1.46.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.46.0.tgz", - "integrity": "sha512-svXaP8UQRZ5K7or+ZmfNhg2xX3yKDMUzqadsSqi4NCH/KomcH75MAMYAGVlvXn4+b/xOPhS3I2uHKRUzvjY7BQ==" + "version": "1.47.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.47.0.tgz", + "integrity": "sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw==" }, "mime-types": { - "version": "2.1.29", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.29.tgz", - "integrity": "sha512-Y/jMt/S5sR9OaqteJtslsFZKWOIIqMACsJSiHghlCAyhf7jfVYjKBmLiX8OgpWeW+fjJ2b+Az69aPFPkUOY6xQ==", + "version": "2.1.30", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.30.tgz", + "integrity": "sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg==", "requires": { - "mime-db": "1.46.0" + "mime-db": "1.47.0" } }, "minimalistic-assert": { @@ -1059,9 +1054,9 @@ "dev": true }, "next-tick": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.1.0.tgz", - "integrity": "sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==" + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz", + "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=" }, "node-fetch": { "version": "2.6.1", @@ -1179,9 +1174,9 @@ } }, "rxjs": { - "version": "6.6.6", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.6.tgz", - "integrity": "sha512-/oTwee4N4iWzAMAL9xdGKjkEHmIwupR3oXbQjCKywF1BeFohswF3vZdogbmEF6pZkOsXTzWkrZszrWpQTByYVg==", + "version": "6.6.7", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.7.tgz", + "integrity": "sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ==", "requires": { "tslib": "^1.9.0" } @@ -1256,15 +1251,6 @@ "has-flag": "^4.0.0" } }, - "timers-ext": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/timers-ext/-/timers-ext-0.1.7.tgz", - "integrity": "sha512-b85NUNzTSdodShTIbky6ZF02e8STtVVfD+fu4aXXShEELpozH+bCpJLYMPZbsABN2wDH7fJpqIoXxJpzbf0NqQ==", - "requires": { - "es5-ext": "~0.10.46", - "next-tick": "1" - } - }, "to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", @@ -1317,9 +1303,9 @@ "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" }, "websocket": { - "version": "1.0.33", - "resolved": "https://registry.npmjs.org/websocket/-/websocket-1.0.33.tgz", - "integrity": "sha512-XwNqM2rN5eh3G2CUQE3OHZj+0xfdH42+OFK6LdC2yqiC0YU8e5UK0nYre220T0IyyN031V/XOvtHvXozvJYFWA==", + "version": "1.0.34", + "resolved": "https://registry.npmjs.org/websocket/-/websocket-1.0.34.tgz", + "integrity": "sha512-PRDso2sGwF6kM75QykIesBijKSVceR6jL2G8NGYyq2XrItNC2P5/qL5XeR056GhA+Ly7JMFvJb9I312mJfmqnQ==", "requires": { "bufferutil": "^4.0.1", "debug": "^2.2.0", diff --git a/tests/polkadotjs_test/test_transaction.js b/tests/polkadotjs_test/test_transaction.js index 5e9373e036..1d2db7e699 100644 --- a/tests/polkadotjs_test/test_transaction.js +++ b/tests/polkadotjs_test/test_transaction.js @@ -19,11 +19,8 @@ async function main() { const ADDR_Bob = '0x90b5ab205c6974c9ea841be688864633dc9ca8a357843eeacf2314649965fe22'; // bob 5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty - const transfer = await api.tx.balances.transfer(bobKey.address, 12345) - .signAndSend(aliceKey, {era: 0, blockHash: '0x64597c55a052d484d9ff357266be326f62573bb4fbdbb3cd49f219396fcebf78', blockNumber:0, genesisHash: '0x64597c55a052d484d9ff357266be326f62573bb4fbdbb3cd49f219396fcebf78', nonce: 1, tip: 0, transactionVersion: 1}); - + const transfer = await api.tx.balances.transfer(bobKey.address, 12345).signAndSend(aliceKey); console.log(`hxHash ${transfer}`); - } main().catch(console.error); diff --git a/tests/stress/errors.go b/tests/stress/errors.go index 488e028ef8..f7d1fb3119 100644 --- a/tests/stress/errors.go +++ b/tests/stress/errors.go @@ -6,8 +6,8 @@ import ( //nolint var ( - errFinalizedBlockMismatch = errors.New("node finalized head hashes don't match") - errNoFinalizedBlock = errors.New("did not finalize block for round") + errFinalizedBlockMismatch = errors.New("node finalised head hashes don't match") + errNoFinalizedBlock = errors.New("did not finalise block for round") errNoBlockAtNumber = errors.New("no blocks found for given number") errBlocksAtNumberMismatch = errors.New("different blocks found for given number") errChainHeadMismatch = errors.New("node chain head hashes don't match") diff --git a/tests/stress/grandpa_test.go b/tests/stress/grandpa_test.go index e9be10c47b..737023a567 100644 --- a/tests/stress/grandpa_test.go +++ b/tests/stress/grandpa_test.go @@ -68,7 +68,7 @@ func TestStress_Grandpa_ThreeAuthorities(t *testing.T) { for i := 1; i < numRounds+1; i++ { fin, err := compareFinalizedHeadsWithRetry(t, nodes, uint64(i)) require.NoError(t, err) - t.Logf("finalized hash in round %d: %s", i, fin) + t.Logf("finalised hash in round %d: %s", i, fin) } } @@ -90,7 +90,7 @@ func TestStress_Grandpa_SixAuthorities(t *testing.T) { for i := 1; i < numRounds+1; i++ { fin, err := compareFinalizedHeadsWithRetry(t, nodes, uint64(i)) require.NoError(t, err) - t.Logf("finalized hash in round %d: %s", i, fin) + t.Logf("finalised hash in round %d: %s", i, fin) } } @@ -115,7 +115,7 @@ func TestStress_Grandpa_NineAuthorities(t *testing.T) { for i := 1; i < numRounds+1; i++ { fin, err := compareFinalizedHeadsWithRetry(t, nodes, uint64(i)) require.NoError(t, err) - t.Logf("finalized hash in round %d: %s", i, fin) + t.Logf("finalised hash in round %d: %s", i, fin) } } @@ -137,7 +137,7 @@ func TestStress_Grandpa_CatchUp(t *testing.T) { }() time.Sleep(time.Second * 70) // let some rounds run - + //nolint node, err := utils.RunGossamer(t, numNodes-1, utils.TestDir(t, utils.KeyList[numNodes-1]), utils.GenesisSixAuths, utils.ConfigDefault, false) require.NoError(t, err) nodes = append(nodes, node) @@ -146,6 +146,6 @@ func TestStress_Grandpa_CatchUp(t *testing.T) { for i := 1; i < numRounds+1; i++ { fin, err := compareFinalizedHeadsWithRetry(t, nodes, uint64(i)) require.NoError(t, err) - t.Logf("finalized hash in round %d: %s", i, fin) + t.Logf("finalised hash in round %d: %s", i, fin) } } diff --git a/tests/stress/helpers.go b/tests/stress/helpers.go index 6751cb2223..32d6078ee2 100644 --- a/tests/stress/helpers.go +++ b/tests/stress/helpers.go @@ -148,12 +148,12 @@ doneBlockProduction: } // compareFinalizedHeads calls getFinalizedHeadByRound for each node in the array -// it returns a map of finalizedHead hashes to node key names, and an error if the hashes don't all match +// it returns a map of finalisedHead hashes to node key names, and an error if the hashes don't all match func compareFinalizedHeads(t *testing.T, nodes []*utils.Node) (map[common.Hash][]string, error) { hashes := make(map[common.Hash][]string) for _, node := range nodes { hash := utils.GetFinalizedHead(t, node) - logger.Info("got finalized head from node", "hash", hash, "node", node.Key) + logger.Info("got finalised head from node", "hash", hash, "node", node.Key) hashes[hash] = append(hashes[hash], node.Key) } @@ -170,7 +170,7 @@ func compareFinalizedHeads(t *testing.T, nodes []*utils.Node) (map[common.Hash][ } // compareFinalizedHeadsByRound calls getFinalizedHeadByRound for each node in the array -// it returns a map of finalizedHead hashes to node key names, and an error if the hashes don't all match +// it returns a map of finalisedHead hashes to node key names, and an error if the hashes don't all match func compareFinalizedHeadsByRound(t *testing.T, nodes []*utils.Node, round uint64) (map[common.Hash][]string, error) { hashes := make(map[common.Hash][]string) for _, node := range nodes { @@ -179,7 +179,7 @@ func compareFinalizedHeadsByRound(t *testing.T, nodes []*utils.Node, round uint6 return nil, err } - logger.Info("got finalized head from node", "hash", hash, "node", node.Key, "round", round) + logger.Info("got finalised head from node", "hash", hash, "node", node.Key, "round", round) hashes[hash] = append(hashes[hash], node.Key) } @@ -196,7 +196,7 @@ func compareFinalizedHeadsByRound(t *testing.T, nodes []*utils.Node, round uint6 } // compareFinalizedHeadsWithRetry calls compareFinalizedHeadsByRound, retrying up to maxRetries times if it errors. -// it returns the finalized hash if it succeeds +// it returns the finalised hash if it succeeds func compareFinalizedHeadsWithRetry(t *testing.T, nodes []*utils.Node, round uint64) (common.Hash, error) { var hashes map[common.Hash][]string var err error diff --git a/tests/stress/stress_test.go b/tests/stress/stress_test.go index 23c7ee27a8..2bf9be9d55 100644 --- a/tests/stress/stress_test.go +++ b/tests/stress/stress_test.go @@ -28,7 +28,6 @@ import ( gosstypes "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/scale" "github.com/ChainSafe/gossamer/tests/utils" gsrpc "github.com/centrifuge/go-substrate-rpc-client/v2" "github.com/centrifuge/go-substrate-rpc-client/v2/signature" @@ -99,6 +98,7 @@ func TestSync_SingleBlockProducer(t *testing.T) { utils.SetLogLevel(log.LvlInfo) // start block producing node first + //nolint node, err := utils.RunGossamer(t, numNodes-1, utils.TestDir(t, utils.KeyList[numNodes-1]), utils.GenesisDefault, utils.ConfigBABEMaxThreshold, false) require.NoError(t, err) @@ -311,6 +311,7 @@ func TestSync_Restart(t *testing.T) { utils.SetLogLevel(log.LvlInfo) // start block producing node first + //nolint node, err := utils.RunGossamer(t, numNodes-1, utils.TestDir(t, utils.KeyList[numNodes-1]), utils.GenesisDefault, utils.ConfigBABEMaxThreshold, false) require.NoError(t, err) @@ -358,21 +359,30 @@ func TestSync_Restart(t *testing.T) { } func TestPendingExtrinsic(t *testing.T) { - // TODO: Fix this test and enable it. Node syncing takes time. - t.Skip("skipping TestPendingExtrinsic") - t.Log("starting gossamer...") utils.CreateConfigBabeMaxThreshold() numNodes := 3 // index of node to submit tx to - idx := numNodes - 1 // TODO: randomize this + idx := numNodes - 1 // TODO: randomise this // start block producing node first node, err := utils.RunGossamer(t, numNodes-1, utils.TestDir(t, utils.KeyList[numNodes-1]), utils.GenesisDefault, utils.ConfigBABEMaxThreshold, false) require.NoError(t, err) + // Start rest of nodes + nodes, err := utils.InitializeAndStartNodes(t, numNodes-1, utils.GenesisDefault, utils.ConfigNoBABE) + require.NoError(t, err) + nodes = append(nodes, node) + + defer func() { + t.Log("going to tear down gossamer...") + os.Remove(utils.ConfigBABEMaxThreshold) + errList := utils.StopNodes(t, nodes) + require.Len(t, errList, 0) + }() + // send tx to non-authority node api, err := gsrpc.NewSubstrateAPI(fmt.Sprintf("http://localhost:%s", node.RPCPort)) require.NoError(t, err) @@ -428,20 +438,6 @@ func TestPendingExtrinsic(t *testing.T) { require.NoError(t, err) require.NotEqual(t, hash, common.Hash{}) - // wait and start rest of nodes - // TODO: it seems like the non-authority nodes don't sync properly if started before submitting the tx - time.Sleep(time.Second * 20) - nodes, err := utils.InitializeAndStartNodes(t, numNodes-1, utils.GenesisDefault, utils.ConfigNoBABE) - require.NoError(t, err) - nodes = append(nodes, node) - - defer func() { - t.Log("going to tear down gossamer...") - os.Remove(utils.ConfigBABEMaxThreshold) - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() - time.Sleep(time.Second * 10) // wait until there's no more pending extrinsics @@ -490,20 +486,14 @@ func TestPendingExtrinsic(t *testing.T) { var included bool for _, ext := range resExts { - dec, err := scale.Decode(ext, []byte{}) //nolint - require.NoError(t, err) - decExt := dec.([]byte) - logger.Debug("comparing", "expected", extEnc, "in block", common.BytesToHex(decExt)) - if strings.Compare(extEnc, common.BytesToHex(decExt)) == 0 { + logger.Debug("comparing", "expected", extEnc, "in block", common.BytesToHex(ext)) + if strings.Compare(extEnc, common.BytesToHex(ext)) == 0 { included = true } } require.True(t, included) - // wait for nodes to sync - // TODO: seems like nodes don't sync properly :/ - time.Sleep(time.Second * 45) hashes, err := compareBlocksByNumberWithRetry(t, nodes, extInBlock.String()) require.NoError(t, err, hashes) } diff --git a/tests/sync/sync_test.go b/tests/sync/sync_test.go index 5f0a63a147..5b45bc7501 100644 --- a/tests/sync/sync_test.go +++ b/tests/sync/sync_test.go @@ -49,7 +49,7 @@ func TestMain(m *testing.M) { } fw, err := utils.InitFramework(3) if err != nil { - log.Fatal(fmt.Errorf("error initializing test framework")) + log.Fatal(fmt.Errorf("error initialising test framework")) } framework = *fw // Start all tests diff --git a/tests/utils/chain.go b/tests/utils/chain.go index 322c2d4947..d43b6529a2 100644 --- a/tests/utils/chain.go +++ b/tests/utils/chain.go @@ -81,7 +81,7 @@ func GetBlockHash(t *testing.T, node *Node, num string) (common.Hash, error) { return common.MustHexToHash(hash), nil } -// GetFinalizedHead calls the endpoint chain_getFinalizedHead to get the latest finalized head +// GetFinalizedHead calls the endpoint chain_getFinalizedHead to get the latest finalised head func GetFinalizedHead(t *testing.T, node *Node) common.Hash { respBody, err := PostRPC(ChainGetFinalizedHead, NewEndpoint(node.RPCPort), "[]") require.NoError(t, err) @@ -92,7 +92,7 @@ func GetFinalizedHead(t *testing.T, node *Node) common.Hash { return common.MustHexToHash(hash) } -// GetFinalizedHeadByRound calls the endpoint chain_getFinalizedHeadByRound to get the finalized head at a given round +// GetFinalizedHeadByRound calls the endpoint chain_getFinalizedHeadByRound to get the finalised head at a given round // TODO: add setID, hard-coded at 1 for now func GetFinalizedHeadByRound(t *testing.T, node *Node, round uint64) (common.Hash, error) { p := strconv.Itoa(int(round)) diff --git a/tests/utils/gossamer_utils.go b/tests/utils/gossamer_utils.go index 2511dfd029..0106f150a1 100644 --- a/tests/utils/gossamer_utils.go +++ b/tests/utils/gossamer_utils.go @@ -89,7 +89,7 @@ type Node struct { WSPort string } -// InitGossamer initializes given node number and returns node reference +// InitGossamer initialises given node number and returns node reference func InitGossamer(idx int, basePath, genesis, config string) (*Node, error) { //nolint cmdInit := exec.Command(gossamerCMD, "init", @@ -100,15 +100,15 @@ func InitGossamer(idx int, basePath, genesis, config string) (*Node, error) { ) //add step for init - logger.Info("initializing gossamer...", "cmd", cmdInit) + logger.Info("initialising gossamer...", "cmd", cmdInit) stdOutInit, err := cmdInit.CombinedOutput() if err != nil { fmt.Printf("%s", stdOutInit) return nil, err } - // TODO: get init exit code to see if node was successfully initialized - logger.Info("initialized gossamer!", "node", idx) + // TODO: get init exit code to see if node was successfully initialised + logger.Info("initialised gossamer!", "node", idx) return &Node{ Idx: idx, @@ -213,11 +213,11 @@ func StartGossamer(t *testing.T, node *Node, websocket bool) error { return nil } -// RunGossamer will initialize and start a gossamer instance +// RunGossamer will initialise and start a gossamer instance func RunGossamer(t *testing.T, idx int, basepath, genesis, config string, websocket bool) (*Node, error) { node, err := InitGossamer(idx, basepath, genesis, config) if err != nil { - logger.Crit("could not initialize gossamer", "error", err) + logger.Crit("could not initialise gossamer", "error", err) os.Exit(1) } @@ -261,7 +261,7 @@ func KillProcess(t *testing.T, cmd *exec.Cmd) error { return err } -// InitNodes initializes given number of nodes +// InitNodes initialises given number of nodes func InitNodes(num int, config string) ([]*Node, error) { var nodes []*Node tempDir, err := ioutil.TempDir("", "gossamer-stress-")