diff --git a/docs/gno-infrastructure/setting-up-a-local-chain.md b/docs/gno-infrastructure/setting-up-a-local-chain.md index 088e5a01495..e4e3dced37e 100644 --- a/docs/gno-infrastructure/setting-up-a-local-chain.md +++ b/docs/gno-infrastructure/setting-up-a-local-chain.md @@ -10,10 +10,12 @@ In this tutorial, you will learn how to start a local Gno node (and chain!). Additionally, you will see the different options you can use to make your Gno instance unique. ## Prerequisites + - **Git** - **`make` (for running Makefiles)** - **Go 1.21+** -- **Go Environment Setup**: Ensure you have Go set up as outlined in the [Go official installation documentation](https://go.dev/doc/install) for your environment +- **Go Environment Setup**: Ensure you have Go set up as outlined in + the [Go official installation documentation](https://go.dev/doc/install) for your environment ## Installation @@ -153,7 +155,7 @@ A couple of things to note: - `gnoland config init` initializes a default configuration - `gnoland secrets init` initializes new node secrets (validator key, node p2p key) -Essentially, `gnoland start --lazy` is simply a combination of `gnoland secrets generate` and `gnoland config generate`, +Essentially, `gnoland start --lazy` is simply a combination of `gnoland secrets init` and `gnoland config init`, with the default options enabled. #### Changing the node configuration @@ -244,7 +246,7 @@ locally will be the validator node for the new Gno network. To display the generated node key data, run the following command: ```shell -gnoland secrets get ValidatorPrivateKey +gnoland secrets get validator_key ``` This will display the information we need for updating the `genesis.json`: diff --git a/gno.land/cmd/gnoland/config.go b/gno.land/cmd/gnoland/config.go index eed553901f4..fca72add21e 100644 --- a/gno.land/cmd/gnoland/config.go +++ b/gno.land/cmd/gnoland/config.go @@ -1,6 +1,7 @@ package main import ( + "encoding/json" "flag" "fmt" "path/filepath" @@ -58,6 +59,62 @@ func constructConfigPath(nodeDir string) string { ) } +// printKeyValue searches and prints the given key value in JSON +func printKeyValue[T *secrets | *config.Config]( + input T, + raw bool, + io commands.IO, + key ...string, +) error { + // prepareOutput prepares the JSON output, taking into account raw mode + prepareOutput := func(input any) (string, error) { + encoded, err := json.MarshalIndent(input, "", " ") + if err != nil { + return "", fmt.Errorf("unable to marshal JSON, %w", err) + } + + output := string(encoded) + + if raw { + if err := json.Unmarshal(encoded, &output); err != nil { + return "", fmt.Errorf("unable to unmarshal raw JSON, %w", err) + } + } + + return output, nil + } + + if len(key) == 0 { + // Print the entire input + output, err := prepareOutput(input) + if err != nil { + return err + } + + io.Println(output) + + return nil + } + + // Get the value using reflect + secretValue := reflect.ValueOf(input).Elem() + + // Get the value path, with sections separated out by a period + field, err := getFieldAtPath(secretValue, strings.Split(key[0], ".")) + if err != nil { + return err + } + + output, err := prepareOutput(field.Interface()) + if err != nil { + return err + } + + io.Println(output) + + return nil +} + // getFieldAtPath fetches the given field from the given path func getFieldAtPath(currentValue reflect.Value, path []string) (*reflect.Value, error) { // Look at the current section, and figure out if diff --git a/gno.land/cmd/gnoland/config_get.go b/gno.land/cmd/gnoland/config_get.go index 33a98608b85..1fd4027ec60 100644 --- a/gno.land/cmd/gnoland/config_get.go +++ b/gno.land/cmd/gnoland/config_get.go @@ -3,9 +3,8 @@ package main import ( "context" "errors" + "flag" "fmt" - "reflect" - "strings" "github.com/gnolang/gno/tm2/pkg/bft/config" "github.com/gnolang/gno/tm2/pkg/commands" @@ -13,14 +12,20 @@ import ( var errInvalidConfigGetArgs = errors.New("invalid number of config get arguments provided") +type configGetCfg struct { + configCfg + + raw bool +} + // newConfigGetCmd creates the config get command func newConfigGetCmd(io commands.IO) *commands.Command { - cfg := &configCfg{} + cfg := &configGetCfg{} cmd := commands.NewCommand( commands.Metadata{ Name: "get", - ShortUsage: "config get ", + ShortUsage: "config get [flags] []", ShortHelp: "shows the Gno node configuration", LongHelp: "Shows the Gno node configuration at the given path " + "by fetching the option specified at ", @@ -34,40 +39,33 @@ func newConfigGetCmd(io commands.IO) *commands.Command { return cmd } -func execConfigGet(cfg *configCfg, io commands.IO, args []string) error { +func (c *configGetCfg) RegisterFlags(fs *flag.FlagSet) { + c.configCfg.RegisterFlags(fs) + + fs.BoolVar( + &c.raw, + "raw", + false, + "output raw string values, rather than as JSON strings", + ) +} + +func execConfigGet(cfg *configGetCfg, io commands.IO, args []string) error { // Load the config loadedCfg, err := config.LoadConfigFile(cfg.configPath) if err != nil { return fmt.Errorf("%s, %w", tryConfigInit, err) } - // Make sure the edit arguments are valid - if len(args) != 1 { + // Make sure the get arguments are valid + if len(args) > 1 { return errInvalidConfigGetArgs } // Find and print the config field, if any - if err := printConfigField(loadedCfg, args[0], io); err != nil { - return fmt.Errorf("unable to update config field, %w", err) - } - - return nil -} - -// printConfigField prints the value of the field at the given path -func printConfigField(config *config.Config, key string, io commands.IO) error { - // Get the config value using reflect - configValue := reflect.ValueOf(config).Elem() - - // Get the value path, with sections separated out by a period - path := strings.Split(key, ".") - - field, err := getFieldAtPath(configValue, path) - if err != nil { - return err + if err := printKeyValue(loadedCfg, cfg.raw, io, args...); err != nil { + return fmt.Errorf("unable to get config field, %w", err) } - io.Printf("%v", field.Interface()) - return nil } diff --git a/gno.land/cmd/gnoland/config_get_test.go b/gno.land/cmd/gnoland/config_get_test.go index 0639e3be60f..f2ddc5ca6d0 100644 --- a/gno.land/cmd/gnoland/config_get_test.go +++ b/gno.land/cmd/gnoland/config_get_test.go @@ -3,11 +3,13 @@ package main import ( "bytes" "context" - "fmt" - "strconv" + "encoding/json" + "strings" "testing" + "time" "github.com/gnolang/gno/tm2/pkg/bft/config" + "github.com/gnolang/gno/tm2/pkg/bft/state/eventstore/types" "github.com/gnolang/gno/tm2/pkg/commands" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -34,7 +36,8 @@ func TestConfig_Get_Invalid(t *testing.T) { type testGetCase struct { name string field string - verifyFn func(*config.Config, string) + verifyFn func(*config.Config, []byte) + isRaw bool } // verifyGetTestTableCommon is the common test table @@ -57,6 +60,10 @@ func verifyGetTestTableCommon(t *testing.T, testTable []testGetCase) { path, } + if testCase.isRaw { + args = append(args, "--raw") + } + // Create the command IO mockOut := new(bytes.Buffer) @@ -75,11 +82,25 @@ func verifyGetTestTableCommon(t *testing.T, testTable []testGetCase) { loadedCfg, err := config.LoadConfigFile(path) require.NoError(t, err) - testCase.verifyFn(loadedCfg, mockOut.String()) + testCase.verifyFn(loadedCfg, mockOut.Bytes()) }) } } +func unmarshalJSONCommon[T any](t *testing.T, input []byte) T { + t.Helper() + + var output T + + require.NoError(t, json.Unmarshal(input, &output)) + + return output +} + +func escapeNewline(value []byte) string { + return strings.ReplaceAll(string(value), "\n", "") +} + func TestConfig_Get_Base(t *testing.T) { t.Parallel() @@ -87,99 +108,194 @@ func TestConfig_Get_Base(t *testing.T) { { "root dir fetched", "home", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, loadedCfg.RootDir, value) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RootDir, unmarshalJSONCommon[string](t, value)) + }, + false, + }, + { + "root dir fetched, raw", + "home", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RootDir, escapeNewline(value)) }, + true, }, { "proxy app fetched", "proxy_app", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, loadedCfg.ProxyApp, value) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.ProxyApp, unmarshalJSONCommon[string](t, value)) }, + false, + }, + { + "proxy app fetched, raw", + "proxy_app", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.ProxyApp, escapeNewline(value)) + }, + true, }, { "moniker fetched", "moniker", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, loadedCfg.Moniker, value) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.Moniker, unmarshalJSONCommon[string](t, value)) + }, + false, + }, + { + "moniker fetched, raw", + "moniker", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.Moniker, escapeNewline(value)) }, + true, }, { "fast sync mode fetched", "fast_sync", - func(loadedCfg *config.Config, value string) { - boolVal, err := strconv.ParseBool(value) - require.NoError(t, err) - - assert.Equal(t, loadedCfg.FastSyncMode, boolVal) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.FastSyncMode, unmarshalJSONCommon[bool](t, value)) }, + false, }, { "db backend fetched", "db_backend", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, loadedCfg.DBBackend, value) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.DBBackend, unmarshalJSONCommon[string](t, value)) + }, + false, + }, + { + "db backend fetched, raw", + "db_backend", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.DBBackend, escapeNewline(value)) }, + true, }, { "db path fetched", "db_dir", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, loadedCfg.DBPath, value) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.DBPath, unmarshalJSONCommon[string](t, value)) + }, + false, + }, + { + "db path fetched, raw", + "db_dir", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.DBPath, escapeNewline(value)) }, + true, }, { "validator key fetched", "priv_validator_key_file", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, loadedCfg.PrivValidatorKey, value) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.PrivValidatorKey, unmarshalJSONCommon[string](t, value)) + }, + false, + }, + { + "validator key fetched, raw", + "priv_validator_key_file", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.PrivValidatorKey, escapeNewline(value)) }, + true, }, { "validator state file fetched", "priv_validator_state_file", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, loadedCfg.PrivValidatorState, value) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.PrivValidatorState, unmarshalJSONCommon[string](t, value)) }, + false, + }, + { + "validator state file fetched, raw", + "priv_validator_state_file", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.PrivValidatorState, escapeNewline(value)) + }, + true, }, { "validator listen addr fetched", "priv_validator_laddr", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, loadedCfg.PrivValidatorListenAddr, value) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.PrivValidatorListenAddr, unmarshalJSONCommon[string](t, value)) }, + false, + }, + { + "validator listen addr fetched, raw", + "priv_validator_laddr", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.PrivValidatorListenAddr, escapeNewline(value)) + }, + true, }, { "node key path fetched", "node_key_file", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, loadedCfg.NodeKey, value) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.NodeKey, unmarshalJSONCommon[string](t, value)) + }, + false, + }, + { + "node key path fetched, raw", + "node_key_file", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.NodeKey, escapeNewline(value)) }, + true, }, { "abci fetched", "abci", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, loadedCfg.ABCI, value) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.ABCI, unmarshalJSONCommon[string](t, value)) }, + false, + }, + { + "abci fetched, raw", + "abci", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.ABCI, escapeNewline(value)) + }, + true, }, { "profiling listen address fetched", "prof_laddr", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, loadedCfg.ProfListenAddress, value) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.ProfListenAddress, unmarshalJSONCommon[string](t, value)) + }, + false, + }, + { + "profiling listen address fetched, raw", + "prof_laddr", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.ProfListenAddress, escapeNewline(value)) }, + true, }, { "filter peers flag fetched", "filter_peers", - func(loadedCfg *config.Config, value string) { - boolVal, err := strconv.ParseBool(value) - require.NoError(t, err) - - assert.Equal(t, loadedCfg.FilterPeers, boolVal) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.FilterPeers, unmarshalJSONCommon[bool](t, value)) }, + false, }, } @@ -193,105 +309,178 @@ func TestConfig_Get_Consensus(t *testing.T) { { "root dir updated", "consensus.home", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.Consensus.RootDir) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.Consensus.RootDir, unmarshalJSONCommon[string](t, value)) + }, + false, + }, + { + "root dir updated, raw", + "consensus.home", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.Consensus.RootDir, escapeNewline(value)) }, + true, }, { "WAL path updated", "consensus.wal_file", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.Consensus.WALPath) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.Consensus.WALPath, unmarshalJSONCommon[string](t, value)) + }, + false, + }, + { + "WAL path updated, raw", + "consensus.wal_file", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.Consensus.WALPath, escapeNewline(value)) }, + true, }, { "propose timeout updated", "consensus.timeout_propose", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.Consensus.TimeoutPropose.String()) + func(loadedCfg *config.Config, value []byte) { + assert.Equal( + t, + loadedCfg.Consensus.TimeoutPropose, + unmarshalJSONCommon[time.Duration](t, value), + ) }, + false, }, { "propose timeout delta updated", "consensus.timeout_propose_delta", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.Consensus.TimeoutProposeDelta.String()) + func(loadedCfg *config.Config, value []byte) { + assert.Equal( + t, + loadedCfg.Consensus.TimeoutProposeDelta, + unmarshalJSONCommon[time.Duration](t, value), + ) }, + false, }, { "prevote timeout updated", "consensus.timeout_prevote", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.Consensus.TimeoutPrevote.String()) + func(loadedCfg *config.Config, value []byte) { + assert.Equal( + t, + loadedCfg.Consensus.TimeoutPrevote, + unmarshalJSONCommon[time.Duration](t, value), + ) }, + false, }, { "prevote timeout delta updated", "consensus.timeout_prevote_delta", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.Consensus.TimeoutPrevoteDelta.String()) + func(loadedCfg *config.Config, value []byte) { + assert.Equal( + t, + loadedCfg.Consensus.TimeoutPrevoteDelta, + unmarshalJSONCommon[time.Duration](t, value), + ) }, + false, }, { "precommit timeout updated", "consensus.timeout_precommit", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.Consensus.TimeoutPrecommit.String()) + func(loadedCfg *config.Config, value []byte) { + assert.Equal( + t, + loadedCfg.Consensus.TimeoutPrecommit, + unmarshalJSONCommon[time.Duration](t, value), + ) }, + false, }, { "precommit timeout delta updated", "consensus.timeout_precommit_delta", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.Consensus.TimeoutPrecommitDelta.String()) + func(loadedCfg *config.Config, value []byte) { + assert.Equal( + t, + loadedCfg.Consensus.TimeoutPrecommitDelta, + unmarshalJSONCommon[time.Duration](t, value), + ) }, + false, }, { "commit timeout updated", "consensus.timeout_commit", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.Consensus.TimeoutCommit.String()) + func(loadedCfg *config.Config, value []byte) { + assert.Equal( + t, + loadedCfg.Consensus.TimeoutCommit, + unmarshalJSONCommon[time.Duration](t, value), + ) }, + false, }, { "skip commit timeout toggle updated", "consensus.skip_timeout_commit", - func(loadedCfg *config.Config, value string) { - boolVal, err := strconv.ParseBool(value) - require.NoError(t, err) - - assert.Equal(t, boolVal, loadedCfg.Consensus.SkipTimeoutCommit) + func(loadedCfg *config.Config, value []byte) { + assert.Equal( + t, + loadedCfg.Consensus.SkipTimeoutCommit, + unmarshalJSONCommon[bool](t, value), + ) }, + false, }, { "create empty blocks toggle updated", "consensus.create_empty_blocks", - func(loadedCfg *config.Config, value string) { - boolVal, err := strconv.ParseBool(value) - require.NoError(t, err) - assert.Equal(t, boolVal, loadedCfg.Consensus.CreateEmptyBlocks) + func(loadedCfg *config.Config, value []byte) { + assert.Equal( + t, + loadedCfg.Consensus.CreateEmptyBlocks, + unmarshalJSONCommon[bool](t, value), + ) }, + false, }, { "create empty blocks interval updated", "consensus.create_empty_blocks_interval", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.Consensus.CreateEmptyBlocksInterval.String()) + func(loadedCfg *config.Config, value []byte) { + assert.Equal( + t, + loadedCfg.Consensus.CreateEmptyBlocksInterval, + unmarshalJSONCommon[time.Duration](t, value), + ) }, + false, }, { "peer gossip sleep duration updated", "consensus.peer_gossip_sleep_duration", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.Consensus.PeerGossipSleepDuration.String()) + func(loadedCfg *config.Config, value []byte) { + assert.Equal( + t, + loadedCfg.Consensus.PeerGossipSleepDuration, + unmarshalJSONCommon[time.Duration](t, value), + ) }, + false, }, { "peer query majority sleep duration updated", "consensus.peer_query_maj23_sleep_duration", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.Consensus.PeerQueryMaj23SleepDuration.String()) + func(loadedCfg *config.Config, value []byte) { + assert.Equal( + t, + loadedCfg.Consensus.PeerQueryMaj23SleepDuration, + unmarshalJSONCommon[time.Duration](t, value), + ) }, + false, }, } @@ -303,18 +492,40 @@ func TestConfig_Get_Events(t *testing.T) { testTable := []testGetCase{ { - "event store type updated", + "event store type", + "tx_event_store.event_store_type", + func(loadedCfg *config.Config, value []byte) { + assert.Equal( + t, + loadedCfg.TxEventStore.EventStoreType, + unmarshalJSONCommon[string](t, value), + ) + }, + false, + }, + { + "event store type, raw", "tx_event_store.event_store_type", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.TxEventStore.EventStoreType) + func(loadedCfg *config.Config, value []byte) { + assert.Equal( + t, + loadedCfg.TxEventStore.EventStoreType, + escapeNewline(value), + ) }, + true, }, { - "event store params updated", + "event store params", "tx_event_store.event_store_params", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%v", loadedCfg.TxEventStore.Params)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal( + t, + loadedCfg.TxEventStore.Params, + unmarshalJSONCommon[types.EventStoreParams](t, value), + ) }, + false, }, } @@ -326,142 +537,196 @@ func TestConfig_Get_P2P(t *testing.T) { testTable := []testGetCase{ { - "root dir updated", + "root dir", "p2p.home", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.P2P.RootDir) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.RootDir, unmarshalJSONCommon[string](t, value)) }, + false, }, { - "listen address updated", + "root dir, raw", + "p2p.home", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.RootDir, escapeNewline(value)) + }, + true, + }, + { + "listen address", "p2p.laddr", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.P2P.ListenAddress) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.ListenAddress, unmarshalJSONCommon[string](t, value)) }, + false, }, { - "external address updated", + "listen address, raw", + "p2p.laddr", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.ListenAddress, escapeNewline(value)) + }, + true, + }, + { + "external address", + "p2p.external_address", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.ExternalAddress, unmarshalJSONCommon[string](t, value)) + }, + false, + }, + { + "external address, raw", "p2p.external_address", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.P2P.ExternalAddress) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.ExternalAddress, escapeNewline(value)) }, + true, }, { - "seeds updated", + "seeds", "p2p.seeds", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.P2P.Seeds) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.Seeds, unmarshalJSONCommon[string](t, value)) }, + false, }, { - "persistent peers updated", + "seeds, raw", + "p2p.seeds", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.Seeds, escapeNewline(value)) + }, + true, + }, + { + "persistent peers", + "p2p.persistent_peers", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.PersistentPeers, unmarshalJSONCommon[string](t, value)) + }, + false, + }, + { + "persistent peers, raw", "p2p.persistent_peers", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.P2P.PersistentPeers) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.PersistentPeers, escapeNewline(value)) }, + true, }, { - "upnp toggle updated", + "upnp toggle", "p2p.upnp", - func(loadedCfg *config.Config, value string) { - boolVal, err := strconv.ParseBool(value) - require.NoError(t, err) - - assert.Equal(t, boolVal, loadedCfg.P2P.UPNP) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.UPNP, unmarshalJSONCommon[bool](t, value)) }, + false, }, { - "max inbound peers updated", + "max inbound peers", "p2p.max_num_inbound_peers", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%d", loadedCfg.P2P.MaxNumInboundPeers)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.MaxNumInboundPeers, unmarshalJSONCommon[int](t, value)) }, + false, }, { - "max outbound peers updated", + "max outbound peers", "p2p.max_num_outbound_peers", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%d", loadedCfg.P2P.MaxNumOutboundPeers)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.MaxNumOutboundPeers, unmarshalJSONCommon[int](t, value)) }, + false, }, { - "flush throttle timeout updated", + "flush throttle timeout", "p2p.flush_throttle_timeout", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.P2P.FlushThrottleTimeout.String()) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.FlushThrottleTimeout, unmarshalJSONCommon[time.Duration](t, value)) }, + false, }, { - "max package payload size updated", + "max package payload size", "p2p.max_packet_msg_payload_size", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%d", loadedCfg.P2P.MaxPacketMsgPayloadSize)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.MaxPacketMsgPayloadSize, unmarshalJSONCommon[int](t, value)) }, + false, }, { - "send rate updated", + "send rate", "p2p.send_rate", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%d", loadedCfg.P2P.SendRate)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.SendRate, unmarshalJSONCommon[int64](t, value)) }, + false, }, { - "receive rate updated", + "receive rate", "p2p.recv_rate", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%d", loadedCfg.P2P.RecvRate)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.RecvRate, unmarshalJSONCommon[int64](t, value)) }, + false, }, { - "pex reactor toggle updated", + "pex reactor toggle", "p2p.pex", - func(loadedCfg *config.Config, value string) { - boolVal, err := strconv.ParseBool(value) - require.NoError(t, err) - - assert.Equal(t, boolVal, loadedCfg.P2P.PexReactor) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.PexReactor, unmarshalJSONCommon[bool](t, value)) }, + false, }, { - "seed mode updated", + "seed mode", "p2p.seed_mode", - func(loadedCfg *config.Config, value string) { - boolVal, err := strconv.ParseBool(value) - require.NoError(t, err) - - assert.Equal(t, boolVal, loadedCfg.P2P.SeedMode) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.SeedMode, unmarshalJSONCommon[bool](t, value)) + }, + false, + }, + { + "private peer IDs", + "p2p.private_peer_ids", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.PrivatePeerIDs, unmarshalJSONCommon[string](t, value)) }, + false, }, { - "private peer IDs updated", + "private peer IDs, raw", "p2p.private_peer_ids", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.P2P.PrivatePeerIDs) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.PrivatePeerIDs, escapeNewline(value)) }, + true, }, { - "allow duplicate IP updated", + "allow duplicate IP", "p2p.allow_duplicate_ip", - func(loadedCfg *config.Config, value string) { - boolVal, err := strconv.ParseBool(value) - require.NoError(t, err) - - assert.Equal(t, boolVal, loadedCfg.P2P.AllowDuplicateIP) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.AllowDuplicateIP, unmarshalJSONCommon[bool](t, value)) }, + false, }, { - "handshake timeout updated", + "handshake timeout", "p2p.handshake_timeout", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.P2P.HandshakeTimeout.String()) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.HandshakeTimeout, unmarshalJSONCommon[time.Duration](t, value)) }, + false, }, { - "dial timeout updated", + "dial timeout", "p2p.dial_timeout", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.P2P.DialTimeout.String()) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.P2P.DialTimeout, unmarshalJSONCommon[time.Duration](t, value)) }, + false, }, } @@ -473,105 +738,156 @@ func TestConfig_Get_RPC(t *testing.T) { testTable := []testGetCase{ { - "root dir updated", + "root dir", "rpc.home", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.RPC.RootDir) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.RootDir, unmarshalJSONCommon[string](t, value)) }, + false, }, { - "listen address updated", + "root dir, raw", + "rpc.home", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.RootDir, escapeNewline(value)) + }, + true, + }, + { + "listen address", "rpc.laddr", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.RPC.ListenAddress) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.ListenAddress, unmarshalJSONCommon[string](t, value)) }, + false, }, { - "CORS Allowed Origins updated", + "listen address, raw", + "rpc.laddr", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.ListenAddress, escapeNewline(value)) + }, + true, + }, + { + "CORS Allowed Origins", "rpc.cors_allowed_origins", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%v", loadedCfg.RPC.CORSAllowedOrigins)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.CORSAllowedOrigins, unmarshalJSONCommon[[]string](t, value)) }, + false, }, { - "CORS Allowed Methods updated", + "CORS Allowed Methods", "rpc.cors_allowed_methods", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%v", loadedCfg.RPC.CORSAllowedMethods)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.CORSAllowedMethods, unmarshalJSONCommon[[]string](t, value)) }, + false, }, { - "CORS Allowed Headers updated", + "CORS Allowed Headers", "rpc.cors_allowed_headers", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%v", loadedCfg.RPC.CORSAllowedHeaders)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.CORSAllowedHeaders, unmarshalJSONCommon[[]string](t, value)) }, + false, }, { - "GRPC listen address updated", + "GRPC listen address", "rpc.grpc_laddr", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.RPC.GRPCListenAddress) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.GRPCListenAddress, unmarshalJSONCommon[string](t, value)) }, + false, }, { - "GRPC max open connections updated", + "GRPC listen address, raw", + "rpc.grpc_laddr", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.GRPCListenAddress, escapeNewline(value)) + }, + true, + }, + { + "GRPC max open connections", "rpc.grpc_max_open_connections", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%d", loadedCfg.RPC.GRPCMaxOpenConnections)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.GRPCMaxOpenConnections, unmarshalJSONCommon[int](t, value)) }, + false, }, { - "unsafe value updated", + "unsafe value", "rpc.unsafe", - func(loadedCfg *config.Config, value string) { - boolVal, err := strconv.ParseBool(value) - require.NoError(t, err) - - assert.Equal(t, boolVal, loadedCfg.RPC.Unsafe) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.Unsafe, unmarshalJSONCommon[bool](t, value)) }, + false, }, { - "rpc max open connections updated", + "rpc max open connections", "rpc.max_open_connections", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%d", loadedCfg.RPC.MaxOpenConnections)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.MaxOpenConnections, unmarshalJSONCommon[int](t, value)) }, + false, }, { - "tx commit broadcast timeout updated", + "tx commit broadcast timeout", "rpc.timeout_broadcast_tx_commit", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.RPC.TimeoutBroadcastTxCommit.String()) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.TimeoutBroadcastTxCommit, unmarshalJSONCommon[time.Duration](t, value)) }, + false, }, { - "max body bytes updated", + "max body bytes", "rpc.max_body_bytes", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%d", loadedCfg.RPC.MaxBodyBytes)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.MaxBodyBytes, unmarshalJSONCommon[int64](t, value)) }, + false, }, { - "max header bytes updated", + "max header bytes", "rpc.max_header_bytes", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%d", loadedCfg.RPC.MaxHeaderBytes)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.MaxHeaderBytes, unmarshalJSONCommon[int](t, value)) + }, + false, + }, + { + "TLS cert file", + "rpc.tls_cert_file", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.TLSCertFile, unmarshalJSONCommon[string](t, value)) }, + false, }, { - "TLS cert file updated", + "TLS cert file, raw", "rpc.tls_cert_file", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.RPC.TLSCertFile) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.TLSCertFile, escapeNewline(value)) }, + true, }, { - "TLS key file updated", + "TLS key file", "rpc.tls_key_file", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.RPC.TLSKeyFile) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.TLSKeyFile, unmarshalJSONCommon[string](t, value)) }, + false, + }, + { + "TLS key file, raw", + "rpc.tls_key_file", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.RPC.TLSKeyFile, escapeNewline(value)) + }, + true, }, } @@ -583,59 +899,76 @@ func TestConfig_Get_Mempool(t *testing.T) { testTable := []testGetCase{ { - "root dir updated", + "root dir", + "mempool.home", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.Mempool.RootDir, unmarshalJSONCommon[string](t, value)) + }, + false, + }, + { + "root dir, raw", "mempool.home", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.Mempool.RootDir) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.Mempool.RootDir, escapeNewline(value)) }, + true, }, { - "recheck flag updated", + "recheck flag", "mempool.recheck", - func(loadedCfg *config.Config, value string) { - boolVar, err := strconv.ParseBool(value) - require.NoError(t, err) - - assert.Equal(t, boolVar, loadedCfg.Mempool.Recheck) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.Mempool.Recheck, unmarshalJSONCommon[bool](t, value)) }, + false, }, { - "broadcast flag updated", + "broadcast flag", "mempool.broadcast", - func(loadedCfg *config.Config, value string) { - boolVar, err := strconv.ParseBool(value) - require.NoError(t, err) - - assert.Equal(t, boolVar, loadedCfg.Mempool.Broadcast) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.Mempool.Broadcast, unmarshalJSONCommon[bool](t, value)) }, + false, }, { - "WAL path updated", + "WAL path", + "mempool.wal_dir", + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.Mempool.WalPath, unmarshalJSONCommon[string](t, value)) + }, + false, + }, + { + "WAL path, raw", "mempool.wal_dir", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.Mempool.WalPath) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.Mempool.WalPath, escapeNewline(value)) }, + true, }, { - "size updated", + "size", "mempool.size", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%d", loadedCfg.Mempool.Size)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.Mempool.Size, unmarshalJSONCommon[int](t, value)) }, + false, }, { - "max pending txs bytes updated", + "max pending txs bytes", "mempool.max_pending_txs_bytes", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%d", loadedCfg.Mempool.MaxPendingTxsBytes)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.Mempool.MaxPendingTxsBytes, unmarshalJSONCommon[int64](t, value)) }, + false, }, { - "cache size updated", + "cache size", "mempool.cache_size", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, fmt.Sprintf("%d", loadedCfg.Mempool.CacheSize)) + func(loadedCfg *config.Config, value []byte) { + assert.Equal(t, loadedCfg.Mempool.CacheSize, unmarshalJSONCommon[int](t, value)) }, + false, }, } diff --git a/gno.land/cmd/gnoland/secrets.go b/gno.land/cmd/gnoland/secrets.go index f45c751a463..6fbce52c638 100644 --- a/gno.land/cmd/gnoland/secrets.go +++ b/gno.land/cmd/gnoland/secrets.go @@ -21,9 +21,9 @@ const ( ) const ( - nodeIDKey = "NodeID" - validatorPrivateKeyKey = "ValidatorPrivateKey" - validatorStateKey = "ValidatorState" + nodeIDKey = "node_id" + validatorPrivateKeyKey = "validator_key" + validatorStateKey = "validator_state" ) // newSecretsCmd creates the secrets root command @@ -72,3 +72,33 @@ func constructSecretsPath(nodeDir string) string { config.DefaultSecretsDir, ) } + +type ( + secrets struct { + ValidatorKeyInfo *validatorKeyInfo `json:"validator_key,omitempty" toml:"validator_key" comment:"the validator private key info"` + ValidatorStateInfo *validatorStateInfo `json:"validator_state,omitempty" toml:"validator_state" comment:"the last signed validator state info"` + NodeIDInfo *nodeIDInfo `json:"node_id,omitempty" toml:"node_id" comment:"the derived node ID info"` + } + + // NOTE: keep in sync with tm2/pkg/bft/privval/file.go + validatorKeyInfo struct { + Address string `json:"address" toml:"address" comment:"the validator address"` + PubKey string `json:"pub_key" toml:"pub_key" comment:"the validator public key"` + } + + // NOTE: keep in sync with tm2/pkg/bft/privval/file.go + validatorStateInfo struct { + Height int64 `json:"height" toml:"height" comment:"the height of the last sign"` + Round int `json:"round" toml:"round" comment:"the round of the last sign"` + Step int8 `json:"step" toml:"step" comment:"the step of the last sign"` + + Signature []byte `json:"signature,omitempty" toml:"signature,omitempty" comment:"the signature of the last sign"` + SignBytes []byte `json:"sign_bytes,omitempty" toml:"sign_bytes,omitempty" comment:"the raw signature bytes of the last sign"` + } + + // NOTE: keep in sync with tm2/pkg/p2p/key.go + nodeIDInfo struct { + ID string `json:"id" toml:"id" comment:"the node ID derived from the private key"` + P2PAddress string `json:"p2p_address" toml:"p2p_address" comment:"the node's constructed P2P address'"` + } +) diff --git a/gno.land/cmd/gnoland/secrets_get.go b/gno.land/cmd/gnoland/secrets_get.go index 92648314bd1..47de7a46283 100644 --- a/gno.land/cmd/gnoland/secrets_get.go +++ b/gno.land/cmd/gnoland/secrets_get.go @@ -2,11 +2,11 @@ package main import ( "context" + "errors" "flag" "fmt" "path/filepath" "strings" - "text/tabwriter" "github.com/gnolang/gno/tm2/pkg/bft/config" "github.com/gnolang/gno/tm2/pkg/bft/privval" @@ -15,8 +15,12 @@ import ( "github.com/gnolang/gno/tm2/pkg/p2p" ) +var errInvalidSecretsGetArgs = errors.New("invalid number of secrets get arguments provided") + type secretsGetCfg struct { commonAllCfg + + raw bool } // newSecretsGetCmd creates the secrets get command @@ -27,12 +31,9 @@ func newSecretsGetCmd(io commands.IO) *commands.Command { commands.Metadata{ Name: "get", ShortUsage: "secrets get [flags] []", - ShortHelp: "shows all Gno secrets present in a common directory", - LongHelp: fmt.Sprintf( - "shows the validator private key, the node p2p key and the validator's last sign state. "+ - "If a key is provided, it shows the specified key value. Available keys: %s", - getAvailableSecretsKeys(), - ), + ShortHelp: "shows the Gno secrets present in a common directory", + LongHelp: "shows the validator private key, the node p2p key and the validator's last sign state at the given path " + + "by fetching the option specified at ", }, cfg, func(_ context.Context, args []string) error { @@ -45,6 +46,13 @@ func newSecretsGetCmd(io commands.IO) *commands.Command { func (c *secretsGetCfg) RegisterFlags(fs *flag.FlagSet) { c.commonAllCfg.RegisterFlags(fs) + + fs.BoolVar( + &c.raw, + "raw", + false, + "output raw string values, rather than as JSON strings", + ) } func execSecretsGet(cfg *secretsGetCfg, args []string, io commands.IO) error { @@ -53,139 +61,105 @@ func execSecretsGet(cfg *secretsGetCfg, args []string, io commands.IO) error { return errInvalidDataDir } - // Verify the secrets key - if err := verifySecretsKey(args); err != nil { - return err + // Make sure the get arguments are valid + if len(args) > 1 { + return errInvalidSecretsGetArgs } - var key string + // Load the secrets from the dir + loadedSecrets, err := loadSecrets(cfg.dataDir) + if err != nil { + return err + } - if len(args) > 0 { - key = args[0] + // Find and print the secrets value, if any + if err := printKeyValue(loadedSecrets, cfg.raw, io, args...); err != nil { + return fmt.Errorf("unable to get secrets value, %w", err) } - // Construct the paths + return nil +} + +// loadSecrets loads the secrets from the specified data directory +func loadSecrets(dirPath string) (*secrets, error) { + // Construct the file paths var ( - validatorKeyPath = filepath.Join(cfg.dataDir, defaultValidatorKeyName) - validatorStatePath = filepath.Join(cfg.dataDir, defaultValidatorStateName) - nodeKeyPath = filepath.Join(cfg.dataDir, defaultNodeKeyName) + validatorKeyPath = filepath.Join(dirPath, defaultValidatorKeyName) + validatorStatePath = filepath.Join(dirPath, defaultValidatorStateName) + nodeKeyPath = filepath.Join(dirPath, defaultNodeKeyName) ) - switch key { - case validatorPrivateKeyKey: - // Show the validator's key info - return readAndShowValidatorKey(validatorKeyPath, io) - case validatorStateKey: - // Show the validator's last sign state - return readAndShowValidatorState(validatorStatePath, io) - case nodeIDKey: - // Show the node's p2p info - return readAndShowNodeKey(nodeKeyPath, io) - default: - // Show the node's p2p info - if err := readAndShowNodeKey(nodeKeyPath, io); err != nil { - return err - } + var ( + vkInfo *validatorKeyInfo + vsInfo *validatorStateInfo + niInfo *nodeIDInfo - // Show the validator's key info - if err := readAndShowValidatorKey(validatorKeyPath, io); err != nil { - return err - } + err error + ) - // Show the validator's last sign state - return readAndShowValidatorState(validatorStatePath, io) + // Load the secrets + if osm.FileExists(validatorKeyPath) { + vkInfo, err = readValidatorKey(validatorKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load secrets, %w", err) + } } -} -// readAndShowValidatorKey reads and shows the validator key from the given path -func readAndShowValidatorKey(path string, io commands.IO) error { - validatorKey, err := readSecretData[privval.FilePVKey](path) - if err != nil { - return fmt.Errorf("unable to read validator key, %w", err) + if osm.FileExists(validatorStatePath) { + vsInfo, err = readValidatorState(validatorStatePath) + if err != nil { + return nil, fmt.Errorf("unable to load secrets, %w", err) + } } - w := tabwriter.NewWriter(io.Out(), 0, 0, 2, ' ', 0) - - if _, err := fmt.Fprintf(w, "[Validator Key Info]\n\n"); err != nil { - return err + if osm.FileExists(nodeKeyPath) { + niInfo, err = readNodeID(nodeKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load secrets, %w", err) + } } - if _, err := fmt.Fprintf(w, "Address:\t%s\n", validatorKey.Address.String()); err != nil { - return err - } + return &secrets{ + ValidatorKeyInfo: vkInfo, + ValidatorStateInfo: vsInfo, + NodeIDInfo: niInfo, + }, nil +} - if _, err := fmt.Fprintf(w, "Public Key:\t%s\n", validatorKey.PubKey.String()); err != nil { - return err +// readValidatorKey reads the validator key from the given path +func readValidatorKey(path string) (*validatorKeyInfo, error) { + validatorKey, err := readSecretData[privval.FilePVKey](path) + if err != nil { + return nil, fmt.Errorf("unable to read validator key, %w", err) } - return w.Flush() + return &validatorKeyInfo{ + Address: validatorKey.Address.String(), + PubKey: validatorKey.PubKey.String(), + }, nil } -// readAndShowValidatorState reads and shows the validator state from the given path -func readAndShowValidatorState(path string, io commands.IO) error { +// readValidatorState reads the validator state from the given path +func readValidatorState(path string) (*validatorStateInfo, error) { validatorState, err := readSecretData[privval.FilePVLastSignState](path) if err != nil { - return fmt.Errorf("unable to read validator state, %w", err) - } - - w := tabwriter.NewWriter(io.Out(), 0, 0, 2, ' ', 0) - - if _, err := fmt.Fprintf(w, "[Last Validator Sign State Info]\n\n"); err != nil { - return err - } - - if _, err := fmt.Fprintf( - w, - "Height:\t%d\n", - validatorState.Height, - ); err != nil { - return err - } - - if _, err := fmt.Fprintf( - w, - "Round:\t%d\n", - validatorState.Round, - ); err != nil { - return err - } - - if _, err := fmt.Fprintf( - w, - "Step:\t%d\n", - validatorState.Step, - ); err != nil { - return err - } - - if validatorState.Signature != nil { - if _, err := fmt.Fprintf( - w, - "Signature:\t%X\n", - validatorState.Signature, - ); err != nil { - return err - } - } - - if validatorState.SignBytes != nil { - if _, err := fmt.Fprintf( - w, - "Sign Bytes:\t%X\n", - validatorState.SignBytes, - ); err != nil { - return err - } + return nil, fmt.Errorf("unable to read validator state, %w", err) } - return w.Flush() + return &validatorStateInfo{ + Height: validatorState.Height, + Round: validatorState.Round, + Step: validatorState.Step, + Signature: validatorState.Signature, + SignBytes: validatorState.SignBytes, + }, nil } -// readAndShowNodeKey reads and shows the node p2p key from the given path -func readAndShowNodeKey(path string, io commands.IO) error { +// readNodeID reads the node p2p info from the given path +func readNodeID(path string) (*nodeIDInfo, error) { nodeKey, err := readSecretData[p2p.NodeKey](path) if err != nil { - return fmt.Errorf("unable to read node key, %w", err) + return nil, fmt.Errorf("unable to read node key, %w", err) } // Construct the config path @@ -201,38 +175,14 @@ func readAndShowNodeKey(path string, io commands.IO) error { // Attempt to grab the config from disk cfg, err = config.LoadConfig(nodeDir) if err != nil { - return fmt.Errorf("unable to load config file, %w", err) + return nil, fmt.Errorf("unable to load config file, %w", err) } } - w := tabwriter.NewWriter(io.Out(), 0, 0, 2, ' ', 0) - - if _, err := fmt.Fprintf(w, "[Node P2P Info]\n\n"); err != nil { - return err - } - - // Print the ID info - if _, err := fmt.Fprintf( - w, - "Node ID:\t%s\n", - nodeKey.ID(), - ); err != nil { - return err - } - - // Print the P2P address info - if _, err := fmt.Fprintf( - w, - "P2P Address:\t%s\n", - constructP2PAddress( - nodeKey.ID(), - cfg.P2P.ListenAddress, - ), - ); err != nil { - return err - } - - return w.Flush() + return &nodeIDInfo{ + ID: nodeKey.ID().String(), + P2PAddress: constructP2PAddress(nodeKey.ID(), cfg.P2P.ListenAddress), + }, nil } // constructP2PAddress constructs the P2P address other nodes can use diff --git a/gno.land/cmd/gnoland/secrets_get_test.go b/gno.land/cmd/gnoland/secrets_get_test.go index 7d3951ded3e..66e6e3509fc 100644 --- a/gno.land/cmd/gnoland/secrets_get_test.go +++ b/gno.land/cmd/gnoland/secrets_get_test.go @@ -3,6 +3,7 @@ package main import ( "bytes" "context" + "encoding/json" "fmt" "os" "path/filepath" @@ -151,10 +152,10 @@ func TestSecrets_Get_All(t *testing.T) { }) } -func TestSecrets_Get_Single(t *testing.T) { +func TestSecrets_Get_ValidatorKeyInfo(t *testing.T) { t.Parallel() - t.Run("validator key shown", func(t *testing.T) { + t.Run("validator key info", func(t *testing.T) { t.Parallel() dirPath := t.TempDir() @@ -181,23 +182,175 @@ func TestSecrets_Get_Single(t *testing.T) { // Run the command require.NoError(t, cmd.ParseAndRun(context.Background(), args)) - output := mockOutput.String() + var vk validatorKeyInfo + + require.NoError(t, json.Unmarshal(mockOutput.Bytes(), &vk)) // Make sure the private key info is displayed - assert.Contains( + assert.Equal( t, - output, validKey.Address.String(), + vk.Address, ) - assert.Contains( + assert.Equal( t, - output, validKey.PubKey.String(), + vk.PubKey, ) }) - t.Run("validator state shown", func(t *testing.T) { + t.Run("validator key address", func(t *testing.T) { + t.Parallel() + + dirPath := t.TempDir() + keyPath := filepath.Join(dirPath, defaultValidatorKeyName) + + validKey := generateValidatorPrivateKey() + + require.NoError(t, saveSecretData(validKey, keyPath)) + + mockOutput := bytes.NewBufferString("") + io := commands.NewTestIO() + io.SetOut(commands.WriteNopCloser(mockOutput)) + + // Create the command + cmd := newRootCmd(io) + args := []string{ + "secrets", + "get", + "--data-dir", + dirPath, + fmt.Sprintf("%s.%s", validatorPrivateKeyKey, "address"), + } + + // Run the command + require.NoError(t, cmd.ParseAndRun(context.Background(), args)) + + var address string + + require.NoError(t, json.Unmarshal(mockOutput.Bytes(), &address)) + + assert.Equal( + t, + validKey.Address.String(), + address, + ) + }) + + t.Run("validator key address, raw", func(t *testing.T) { + t.Parallel() + + dirPath := t.TempDir() + keyPath := filepath.Join(dirPath, defaultValidatorKeyName) + + validKey := generateValidatorPrivateKey() + + require.NoError(t, saveSecretData(validKey, keyPath)) + + mockOutput := bytes.NewBufferString("") + io := commands.NewTestIO() + io.SetOut(commands.WriteNopCloser(mockOutput)) + + // Create the command + cmd := newRootCmd(io) + args := []string{ + "secrets", + "get", + "--data-dir", + dirPath, + fmt.Sprintf("%s.%s", validatorPrivateKeyKey, "address"), + "--raw", + } + + // Run the command + require.NoError(t, cmd.ParseAndRun(context.Background(), args)) + + assert.Equal( + t, + validKey.Address.String(), + escapeNewline(mockOutput.Bytes()), + ) + }) + + t.Run("validator key pubkey", func(t *testing.T) { + t.Parallel() + + dirPath := t.TempDir() + keyPath := filepath.Join(dirPath, defaultValidatorKeyName) + + validKey := generateValidatorPrivateKey() + + require.NoError(t, saveSecretData(validKey, keyPath)) + + mockOutput := bytes.NewBufferString("") + io := commands.NewTestIO() + io.SetOut(commands.WriteNopCloser(mockOutput)) + + // Create the command + cmd := newRootCmd(io) + args := []string{ + "secrets", + "get", + "--data-dir", + dirPath, + fmt.Sprintf("%s.%s", validatorPrivateKeyKey, "pub_key"), + } + + // Run the command + require.NoError(t, cmd.ParseAndRun(context.Background(), args)) + + var address string + + require.NoError(t, json.Unmarshal(mockOutput.Bytes(), &address)) + + assert.Equal( + t, + validKey.PubKey.String(), + address, + ) + }) + + t.Run("validator key pubkey, raw", func(t *testing.T) { + t.Parallel() + + dirPath := t.TempDir() + keyPath := filepath.Join(dirPath, defaultValidatorKeyName) + + validKey := generateValidatorPrivateKey() + + require.NoError(t, saveSecretData(validKey, keyPath)) + + mockOutput := bytes.NewBufferString("") + io := commands.NewTestIO() + io.SetOut(commands.WriteNopCloser(mockOutput)) + + // Create the command + cmd := newRootCmd(io) + args := []string{ + "secrets", + "get", + "--data-dir", + dirPath, + fmt.Sprintf("%s.%s", validatorPrivateKeyKey, "pub_key"), + "--raw", + } + + // Run the command + require.NoError(t, cmd.ParseAndRun(context.Background(), args)) + + assert.Equal( + t, + validKey.PubKey.String(), + escapeNewline(mockOutput.Bytes()), + ) + }) +} + +func TestSecrets_Get_ValidatorStateInfo(t *testing.T) { + t.Parallel() + + t.Run("validator state info", func(t *testing.T) { t.Parallel() dirPath := t.TempDir() @@ -224,29 +377,137 @@ func TestSecrets_Get_Single(t *testing.T) { // Run the command require.NoError(t, cmd.ParseAndRun(context.Background(), args)) - output := mockOutput.String() + var vs validatorStateInfo + + require.NoError(t, json.Unmarshal(mockOutput.Bytes(), &vs)) // Make sure the state info is displayed - assert.Contains( + assert.Equal( t, - output, - fmt.Sprintf("%d", validState.Step), + validState.Step, + vs.Step, ) - assert.Contains( + assert.Equal( t, - output, - fmt.Sprintf("%d", validState.Height), + validState.Height, + vs.Height, ) - assert.Contains( + assert.Equal( t, - output, - strconv.Itoa(validState.Round), + validState.Round, + vs.Round, + ) + }) + + t.Run("validator state info height", func(t *testing.T) { + t.Parallel() + + dirPath := t.TempDir() + statePath := filepath.Join(dirPath, defaultValidatorStateName) + + validState := generateLastSignValidatorState() + + require.NoError(t, saveSecretData(validState, statePath)) + + mockOutput := bytes.NewBufferString("") + io := commands.NewTestIO() + io.SetOut(commands.WriteNopCloser(mockOutput)) + + // Create the command + cmd := newRootCmd(io) + args := []string{ + "secrets", + "get", + "--data-dir", + dirPath, + fmt.Sprintf("%s.%s", validatorStateKey, "height"), + } + + // Run the command + require.NoError(t, cmd.ParseAndRun(context.Background(), args)) + + assert.Equal( + t, + fmt.Sprintf("%d\n", validState.Height), + mockOutput.String(), ) }) - t.Run("node key shown, default config", func(t *testing.T) { + t.Run("validator state info round", func(t *testing.T) { + t.Parallel() + + dirPath := t.TempDir() + statePath := filepath.Join(dirPath, defaultValidatorStateName) + + validState := generateLastSignValidatorState() + + require.NoError(t, saveSecretData(validState, statePath)) + + mockOutput := bytes.NewBufferString("") + io := commands.NewTestIO() + io.SetOut(commands.WriteNopCloser(mockOutput)) + + // Create the command + cmd := newRootCmd(io) + args := []string{ + "secrets", + "get", + "--data-dir", + dirPath, + fmt.Sprintf("%s.%s", validatorStateKey, "round"), + } + + // Run the command + require.NoError(t, cmd.ParseAndRun(context.Background(), args)) + + assert.Equal( + t, + fmt.Sprintf("%d\n", validState.Round), + mockOutput.String(), + ) + }) + + t.Run("validator state info step", func(t *testing.T) { + t.Parallel() + + dirPath := t.TempDir() + statePath := filepath.Join(dirPath, defaultValidatorStateName) + + validState := generateLastSignValidatorState() + + require.NoError(t, saveSecretData(validState, statePath)) + + mockOutput := bytes.NewBufferString("") + io := commands.NewTestIO() + io.SetOut(commands.WriteNopCloser(mockOutput)) + + // Create the command + cmd := newRootCmd(io) + args := []string{ + "secrets", + "get", + "--data-dir", + dirPath, + fmt.Sprintf("%s.%s", validatorStateKey, "step"), + } + + // Run the command + require.NoError(t, cmd.ParseAndRun(context.Background(), args)) + + assert.Equal( + t, + fmt.Sprintf("%d\n", validState.Step), + mockOutput.String(), + ) + }) +} + +func TestSecrets_Get_NodeIDInfo(t *testing.T) { + t.Parallel() + + t.Run("node ID info, default config", func(t *testing.T) { t.Parallel() cfg := config.DefaultConfig() @@ -275,24 +536,25 @@ func TestSecrets_Get_Single(t *testing.T) { // Run the command require.NoError(t, cmd.ParseAndRun(context.Background(), args)) - output := mockOutput.String() + var ni nodeIDInfo + require.NoError(t, json.Unmarshal(mockOutput.Bytes(), &ni)) // Make sure the node p2p key is displayed - assert.Contains( + assert.Equal( t, - output, validNodeKey.ID().String(), + ni.ID, ) // Make sure the default node p2p address is displayed - assert.Contains( + assert.Equal( t, - output, constructP2PAddress(validNodeKey.ID(), cfg.P2P.ListenAddress), + ni.P2PAddress, ) }) - t.Run("node key shown, existing config", func(t *testing.T) { + t.Run("node ID info, existing config", func(t *testing.T) { t.Parallel() var ( @@ -332,20 +594,173 @@ func TestSecrets_Get_Single(t *testing.T) { // Run the command require.NoError(t, cmd.ParseAndRun(context.Background(), args)) - output := mockOutput.String() + var ni nodeIDInfo + require.NoError(t, json.Unmarshal(mockOutput.Bytes(), &ni)) // Make sure the node p2p key is displayed - assert.Contains( + assert.Equal( + t, + validNodeKey.ID().String(), + ni.ID, + ) + + // Make sure the custom node p2p address is displayed + assert.Equal( + t, + constructP2PAddress(validNodeKey.ID(), cfg.P2P.ListenAddress), + ni.P2PAddress, + ) + }) + + t.Run("ID", func(t *testing.T) { + t.Parallel() + + dirPath := t.TempDir() + nodeKeyPath := filepath.Join(dirPath, defaultNodeKeyName) + + validNodeKey := generateNodeKey() + + require.NoError(t, saveSecretData(validNodeKey, nodeKeyPath)) + + mockOutput := bytes.NewBufferString("") + io := commands.NewTestIO() + io.SetOut(commands.WriteNopCloser(mockOutput)) + + // Create the command + cmd := newRootCmd(io) + args := []string{ + "secrets", + "get", + "--data-dir", + dirPath, + fmt.Sprintf("%s.%s", nodeIDKey, "id"), + } + + // Run the command + require.NoError(t, cmd.ParseAndRun(context.Background(), args)) + + var output string + require.NoError(t, json.Unmarshal(mockOutput.Bytes(), &output)) + + // Make sure the node p2p key is displayed + assert.Equal( t, + validNodeKey.ID().String(), output, + ) + }) + + t.Run("ID, raw", func(t *testing.T) { + t.Parallel() + + dirPath := t.TempDir() + nodeKeyPath := filepath.Join(dirPath, defaultNodeKeyName) + + validNodeKey := generateNodeKey() + + require.NoError(t, saveSecretData(validNodeKey, nodeKeyPath)) + + mockOutput := bytes.NewBufferString("") + io := commands.NewTestIO() + io.SetOut(commands.WriteNopCloser(mockOutput)) + + // Create the command + cmd := newRootCmd(io) + args := []string{ + "secrets", + "get", + "--data-dir", + dirPath, + fmt.Sprintf("%s.%s", nodeIDKey, "id"), + "--raw", + } + + // Run the command + require.NoError(t, cmd.ParseAndRun(context.Background(), args)) + + // Make sure the node p2p key is displayed + assert.Equal( + t, validNodeKey.ID().String(), + escapeNewline(mockOutput.Bytes()), ) + }) + + t.Run("P2P Address", func(t *testing.T) { + t.Parallel() + + cfg := config.DefaultConfig() + + dirPath := t.TempDir() + nodeKeyPath := filepath.Join(dirPath, defaultNodeKeyName) + + validNodeKey := generateNodeKey() + + require.NoError(t, saveSecretData(validNodeKey, nodeKeyPath)) + + mockOutput := bytes.NewBufferString("") + io := commands.NewTestIO() + io.SetOut(commands.WriteNopCloser(mockOutput)) + + // Create the command + cmd := newRootCmd(io) + args := []string{ + "secrets", + "get", + "--data-dir", + dirPath, + fmt.Sprintf("%s.%s", nodeIDKey, "p2p_address"), + } + + // Run the command + require.NoError(t, cmd.ParseAndRun(context.Background(), args)) + + var output string + require.NoError(t, json.Unmarshal(mockOutput.Bytes(), &output)) // Make sure the custom node p2p address is displayed - assert.Contains( + assert.Equal( t, + constructP2PAddress(validNodeKey.ID(), cfg.P2P.ListenAddress), output, + ) + }) + + t.Run("P2P Address, raw", func(t *testing.T) { + t.Parallel() + + cfg := config.DefaultConfig() + + dirPath := t.TempDir() + nodeKeyPath := filepath.Join(dirPath, defaultNodeKeyName) + + validNodeKey := generateNodeKey() + + require.NoError(t, saveSecretData(validNodeKey, nodeKeyPath)) + + mockOutput := bytes.NewBufferString("") + io := commands.NewTestIO() + io.SetOut(commands.WriteNopCloser(mockOutput)) + + // Create the command + cmd := newRootCmd(io) + args := []string{ + "secrets", + "get", + "--data-dir", + dirPath, + fmt.Sprintf("%s.%s", nodeIDKey, "p2p_address"), + "--raw", + } + + // Run the command + require.NoError(t, cmd.ParseAndRun(context.Background(), args)) + + // Make sure the custom node p2p address is displayed + assert.Equal( + t, constructP2PAddress(validNodeKey.ID(), cfg.P2P.ListenAddress), + escapeNewline(mockOutput.Bytes()), ) }) } diff --git a/tm2/pkg/bft/config/config.go b/tm2/pkg/bft/config/config.go index 21e9c2fe646..f9e9a0cd899 100644 --- a/tm2/pkg/bft/config/config.go +++ b/tm2/pkg/bft/config/config.go @@ -48,12 +48,12 @@ type Config struct { BaseConfig `toml:",squash"` // Options for services - RPC *rpc.RPCConfig `toml:"rpc" comment:"##### rpc server configuration options #####"` - P2P *p2p.P2PConfig `toml:"p2p" comment:"##### peer to peer configuration options #####"` - Mempool *mem.MempoolConfig `toml:"mempool" comment:"##### mempool configuration options #####"` - Consensus *cns.ConsensusConfig `toml:"consensus" comment:"##### consensus configuration options #####"` - TxEventStore *eventstore.Config `toml:"tx_event_store" comment:"##### event store #####"` - Telemetry *telemetry.Config `toml:"telemetry" comment:"##### node telemetry #####"` + RPC *rpc.RPCConfig `json:"rpc" toml:"rpc" comment:"##### rpc server configuration options #####"` + P2P *p2p.P2PConfig `json:"p2p" toml:"p2p" comment:"##### peer to peer configuration options #####"` + Mempool *mem.MempoolConfig `json:"mempool" toml:"mempool" comment:"##### mempool configuration options #####"` + Consensus *cns.ConsensusConfig `json:"consensus" toml:"consensus" comment:"##### consensus configuration options #####"` + TxEventStore *eventstore.Config `json:"tx_event_store" toml:"tx_event_store" comment:"##### event store #####"` + Telemetry *telemetry.Config `json:"telemetry" toml:"telemetry" comment:"##### node telemetry #####"` } // DefaultConfig returns a default configuration for a Tendermint node diff --git a/tm2/pkg/bft/consensus/config/config.go b/tm2/pkg/bft/consensus/config/config.go index 4a350ff3976..c0b5661a0e5 100644 --- a/tm2/pkg/bft/consensus/config/config.go +++ b/tm2/pkg/bft/consensus/config/config.go @@ -16,29 +16,29 @@ const ( // ConsensusConfig defines the configuration for the Tendermint consensus service, // including timeouts and details about the WAL and the block structure. type ConsensusConfig struct { - RootDir string `toml:"home"` - WALPath string `toml:"wal_file"` - WALDisabled bool `toml:"-"` + RootDir string `json:"home" toml:"home"` + WALPath string `json:"wal_file" toml:"wal_file"` + WALDisabled bool `json:"-" toml:"-"` walFile string // overrides WalPath if set - TimeoutPropose time.Duration `toml:"timeout_propose"` - TimeoutProposeDelta time.Duration `toml:"timeout_propose_delta"` - TimeoutPrevote time.Duration `toml:"timeout_prevote"` - TimeoutPrevoteDelta time.Duration `toml:"timeout_prevote_delta"` - TimeoutPrecommit time.Duration `toml:"timeout_precommit"` - TimeoutPrecommitDelta time.Duration `toml:"timeout_precommit_delta"` - TimeoutCommit time.Duration `toml:"timeout_commit"` + TimeoutPropose time.Duration `json:"timeout_propose" toml:"timeout_propose"` + TimeoutProposeDelta time.Duration `json:"timeout_propose_delta" toml:"timeout_propose_delta"` + TimeoutPrevote time.Duration `json:"timeout_prevote" toml:"timeout_prevote"` + TimeoutPrevoteDelta time.Duration `json:"timeout_prevote_delta" toml:"timeout_prevote_delta"` + TimeoutPrecommit time.Duration `json:"timeout_precommit" toml:"timeout_precommit"` + TimeoutPrecommitDelta time.Duration `json:"timeout_precommit_delta" toml:"timeout_precommit_delta"` + TimeoutCommit time.Duration `json:"timeout_commit" toml:"timeout_commit"` // Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) - SkipTimeoutCommit bool `toml:"skip_timeout_commit" comment:"Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)"` + SkipTimeoutCommit bool `json:"skip_timeout_commit" toml:"skip_timeout_commit" comment:"Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)"` // EmptyBlocks mode and possible interval between empty blocks - CreateEmptyBlocks bool `toml:"create_empty_blocks" comment:"EmptyBlocks mode and possible interval between empty blocks"` - CreateEmptyBlocksInterval time.Duration `toml:"create_empty_blocks_interval"` + CreateEmptyBlocks bool `json:"create_empty_blocks" toml:"create_empty_blocks" comment:"EmptyBlocks mode and possible interval between empty blocks"` + CreateEmptyBlocksInterval time.Duration `json:"create_empty_blocks_interval" toml:"create_empty_blocks_interval"` // Reactor sleep duration parameters - PeerGossipSleepDuration time.Duration `toml:"peer_gossip_sleep_duration" comment:"Reactor sleep duration parameters"` - PeerQueryMaj23SleepDuration time.Duration `toml:"peer_query_maj23_sleep_duration"` + PeerGossipSleepDuration time.Duration `json:"peer_gossip_sleep_duration" toml:"peer_gossip_sleep_duration" comment:"Reactor sleep duration parameters"` + PeerQueryMaj23SleepDuration time.Duration `json:"peer_query_maj_23_sleep_duration" toml:"peer_query_maj23_sleep_duration"` } // DefaultConsensusConfig returns a default configuration for the consensus service diff --git a/tm2/pkg/bft/mempool/config/config.go b/tm2/pkg/bft/mempool/config/config.go index 198b34291bc..47df01238e5 100644 --- a/tm2/pkg/bft/mempool/config/config.go +++ b/tm2/pkg/bft/mempool/config/config.go @@ -7,13 +7,13 @@ import "github.com/gnolang/gno/tm2/pkg/errors" // MempoolConfig defines the configuration options for the Tendermint mempool type MempoolConfig struct { - RootDir string `toml:"home"` - Recheck bool `toml:"recheck"` - Broadcast bool `toml:"broadcast"` - WalPath string `toml:"wal_dir"` - Size int `toml:"size" comment:"Maximum number of transactions in the mempool"` - MaxPendingTxsBytes int64 `toml:"max_pending_txs_bytes" comment:"Limit the total size of all txs in the mempool.\n This only accounts for raw transactions (e.g. given 1MB transactions and\n max_txs_bytes=5MB, mempool will only accept 5 transactions)."` - CacheSize int `toml:"cache_size" comment:"Size of the cache (used to filter transactions we saw earlier) in transactions"` + RootDir string `json:"home" toml:"home"` + Recheck bool `json:"recheck" toml:"recheck"` + Broadcast bool `json:"broadcast" toml:"broadcast"` + WalPath string `json:"wal_dir" toml:"wal_dir"` + Size int `json:"size" toml:"size" comment:"Maximum number of transactions in the mempool"` + MaxPendingTxsBytes int64 `json:"max_pending_txs_bytes" toml:"max_pending_txs_bytes" comment:"Limit the total size of all txs in the mempool.\n This only accounts for raw transactions (e.g. given 1MB transactions and\n max_txs_bytes=5MB, mempool will only accept 5 transactions)."` + CacheSize int `json:"cache_size" toml:"cache_size" comment:"Size of the cache (used to filter transactions we saw earlier) in transactions"` } // DefaultMempoolConfig returns a default configuration for the Tendermint mempool diff --git a/tm2/pkg/bft/privval/file.go b/tm2/pkg/bft/privval/file.go index b1bac8416f7..7ed586b7c05 100644 --- a/tm2/pkg/bft/privval/file.go +++ b/tm2/pkg/bft/privval/file.go @@ -38,10 +38,11 @@ func voteToStep(vote *types.Vote) int8 { // ------------------------------------------------------------------------------- // FilePVKey stores the immutable part of PrivValidator. +// NOTE: keep in sync with gno.land/cmd/gnoland/secrets.go type FilePVKey struct { - Address types.Address `json:"address"` - PubKey crypto.PubKey `json:"pub_key"` - PrivKey crypto.PrivKey `json:"priv_key"` + Address types.Address `json:"address" comment:"the validator address"` + PubKey crypto.PubKey `json:"pub_key" comment:"the validator public key"` + PrivKey crypto.PrivKey `json:"priv_key" comment:"the validator private key"` filePath string } @@ -66,12 +67,13 @@ func (pvKey FilePVKey) Save() { // ------------------------------------------------------------------------------- // FilePVLastSignState stores the mutable part of PrivValidator. +// NOTE: keep in sync with gno.land/cmd/gnoland/secrets.go type FilePVLastSignState struct { - Height int64 `json:"height"` - Round int `json:"round"` - Step int8 `json:"step"` - Signature []byte `json:"signature,omitempty"` - SignBytes []byte `json:"signbytes,omitempty"` + Height int64 `json:"height" comment:"the height of the last sign"` + Round int `json:"round" comment:"the round of the last sign"` + Step int8 `json:"step" comment:"the step of the last sign"` + Signature []byte `json:"signature,omitempty" comment:"the signature of the last sign"` + SignBytes []byte `json:"signbytes,omitempty" comment:"the raw signature bytes of the last sign"` filePath string } diff --git a/tm2/pkg/bft/rpc/config/config.go b/tm2/pkg/bft/rpc/config/config.go index de576ddb402..fe527450178 100644 --- a/tm2/pkg/bft/rpc/config/config.go +++ b/tm2/pkg/bft/rpc/config/config.go @@ -16,36 +16,36 @@ const ( // RPCConfig defines the configuration options for the Tendermint RPC server type RPCConfig struct { - RootDir string `toml:"home"` + RootDir string `json:"home" toml:"home"` // TCP or UNIX socket address for the RPC server to listen on - ListenAddress string `toml:"laddr" comment:"TCP or UNIX socket address for the RPC server to listen on"` + ListenAddress string `json:"laddr" toml:"laddr" comment:"TCP or UNIX socket address for the RPC server to listen on"` // A list of origins a cross-domain request can be executed from. // If the special '*' value is present in the list, all origins will be allowed. // An origin may contain a wildcard (*) to replace 0 or more characters (i.e.: http://*.domain.com). // Only one wildcard can be used per origin. - CORSAllowedOrigins []string `toml:"cors_allowed_origins" comment:"A list of origins a cross-domain request can be executed from\n Default value '[]' disables cors support\n Use '[\"*\"]' to allow any origin"` + CORSAllowedOrigins []string `json:"cors_allowed_origins" toml:"cors_allowed_origins" comment:"A list of origins a cross-domain request can be executed from\n Default value '[]' disables cors support\n Use '[\"*\"]' to allow any origin"` // A list of methods the client is allowed to use with cross-domain requests. - CORSAllowedMethods []string `toml:"cors_allowed_methods" comment:"A list of methods the client is allowed to use with cross-domain requests"` + CORSAllowedMethods []string `json:"cors_allowed_methods" toml:"cors_allowed_methods" comment:"A list of methods the client is allowed to use with cross-domain requests"` // A list of non simple headers the client is allowed to use with cross-domain requests. - CORSAllowedHeaders []string `toml:"cors_allowed_headers" comment:"A list of non simple headers the client is allowed to use with cross-domain requests"` + CORSAllowedHeaders []string `json:"cors_allowed_headers" toml:"cors_allowed_headers" comment:"A list of non simple headers the client is allowed to use with cross-domain requests"` // TCP or UNIX socket address for the gRPC server to listen on // NOTE: This server only supports /broadcast_tx_commit - GRPCListenAddress string `toml:"grpc_laddr" comment:"TCP or UNIX socket address for the gRPC server to listen on\n NOTE: This server only supports /broadcast_tx_commit"` + GRPCListenAddress string `json:"grpc_laddr" toml:"grpc_laddr" comment:"TCP or UNIX socket address for the gRPC server to listen on\n NOTE: This server only supports /broadcast_tx_commit"` // Maximum number of simultaneous connections. // Does not include RPC (HTTP&WebSocket) connections. See max_open_connections // If you want to accept a larger number than the default, make sure // you increase your OS limits. // 0 - unlimited. - GRPCMaxOpenConnections int `toml:"grpc_max_open_connections" comment:"Maximum number of simultaneous connections.\n Does not include RPC (HTTP&WebSocket) connections. See max_open_connections\n If you want to accept a larger number than the default, make sure\n you increase your OS limits.\n 0 - unlimited.\n Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}\n 1024 - 40 - 10 - 50 = 924 = ~900"` + GRPCMaxOpenConnections int `json:"grpc_max_open_connections" toml:"grpc_max_open_connections" comment:"Maximum number of simultaneous connections.\n Does not include RPC (HTTP&WebSocket) connections. See max_open_connections\n If you want to accept a larger number than the default, make sure\n you increase your OS limits.\n 0 - unlimited.\n Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}\n 1024 - 40 - 10 - 50 = 924 = ~900"` // Activate unsafe RPC commands like /dial_persistent_peers and /unsafe_flush_mempool - Unsafe bool `toml:"unsafe" comment:"Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool"` + Unsafe bool `json:"unsafe" toml:"unsafe" comment:"Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool"` // Maximum number of simultaneous connections (including WebSocket). // Does not include gRPC connections. See grpc_max_open_connections @@ -54,19 +54,19 @@ type RPCConfig struct { // 0 - unlimited. // Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} // 1024 - 40 - 10 - 50 = 924 = ~900 - MaxOpenConnections int `toml:"max_open_connections" comment:"Maximum number of simultaneous connections (including WebSocket).\n Does not include gRPC connections. See grpc_max_open_connections\n If you want to accept a larger number than the default, make sure\n you increase your OS limits.\n 0 - unlimited.\n Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}\n 1024 - 40 - 10 - 50 = 924 = ~900"` + MaxOpenConnections int `json:"max_open_connections" toml:"max_open_connections" comment:"Maximum number of simultaneous connections (including WebSocket).\n Does not include gRPC connections. See grpc_max_open_connections\n If you want to accept a larger number than the default, make sure\n you increase your OS limits.\n 0 - unlimited.\n Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}\n 1024 - 40 - 10 - 50 = 924 = ~900"` // How long to wait for a tx to be committed during /broadcast_tx_commit // WARNING: Using a value larger than 10s will result in increasing the // global HTTP write timeout, which applies to all connections and endpoints. // See https://github.com/gnolang/gno/tm2/pkg/bft/issues/3435 - TimeoutBroadcastTxCommit time.Duration `toml:"timeout_broadcast_tx_commit" comment:"How long to wait for a tx to be committed during /broadcast_tx_commit.\n WARNING: Using a value larger than 10s will result in increasing the\n global HTTP write timeout, which applies to all connections and endpoints.\n See https://github.com/tendermint/classic/issues/3435"` + TimeoutBroadcastTxCommit time.Duration `json:"timeout_broadcast_tx_commit" toml:"timeout_broadcast_tx_commit" comment:"How long to wait for a tx to be committed during /broadcast_tx_commit.\n WARNING: Using a value larger than 10s will result in increasing the\n global HTTP write timeout, which applies to all connections and endpoints.\n See https://github.com/tendermint/classic/issues/3435"` // Maximum size of request body, in bytes - MaxBodyBytes int64 `toml:"max_body_bytes" comment:"Maximum size of request body, in bytes"` + MaxBodyBytes int64 `json:"max_body_bytes" toml:"max_body_bytes" comment:"Maximum size of request body, in bytes"` // Maximum size of request header, in bytes - MaxHeaderBytes int `toml:"max_header_bytes" comment:"Maximum size of request header, in bytes"` + MaxHeaderBytes int `json:"max_header_bytes" toml:"max_header_bytes" comment:"Maximum size of request header, in bytes"` // The path to a file containing certificate that is used to create the HTTPS server. // Might be either absolute path or path related to tendermint's config directory. @@ -76,13 +76,13 @@ type RPCConfig struct { // and the CA's certificate. // // NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. - TLSCertFile string `toml:"tls_cert_file" comment:"The path to a file containing certificate that is used to create the HTTPS server.\n Might be either absolute path or path related to tendermint's config directory.\n If the certificate is signed by a certificate authority,\n the certFile should be the concatenation of the server's certificate, any intermediates,\n and the CA's certificate.\n NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run."` + TLSCertFile string `json:"tls_cert_file" toml:"tls_cert_file" comment:"The path to a file containing certificate that is used to create the HTTPS server.\n Might be either absolute path or path related to tendermint's config directory.\n If the certificate is signed by a certificate authority,\n the certFile should be the concatenation of the server's certificate, any intermediates,\n and the CA's certificate.\n NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run."` // The path to a file containing matching private key that is used to create the HTTPS server. // Might be either absolute path or path related to tendermint's config directory. // // NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. - TLSKeyFile string `toml:"tls_key_file" comment:"The path to a file containing matching private key that is used to create the HTTPS server.\n Might be either absolute path or path related to tendermint's config directory.\n NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run."` + TLSKeyFile string `json:"tls_key_file" toml:"tls_key_file" comment:"The path to a file containing matching private key that is used to create the HTTPS server.\n Might be either absolute path or path related to tendermint's config directory.\n NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run."` } // DefaultRPCConfig returns a default configuration for the RPC server diff --git a/tm2/pkg/bft/state/eventstore/types/config.go b/tm2/pkg/bft/state/eventstore/types/config.go index 5b152f254fd..a74d6c9d2ec 100644 --- a/tm2/pkg/bft/state/eventstore/types/config.go +++ b/tm2/pkg/bft/state/eventstore/types/config.go @@ -7,8 +7,8 @@ type EventStoreParams map[string]any // Config defines the specific event store configuration type Config struct { - EventStoreType string `toml:"event_store_type" comment:"Type of event store"` - Params EventStoreParams `toml:"event_store_params" comment:"Event store parameters"` + EventStoreType string `json:"event_store_type" toml:"event_store_type" comment:"Type of event store"` + Params EventStoreParams `json:"event_store_params" toml:"event_store_params" comment:"Event store parameters"` } // GetParam fetches the specific config param, if any. diff --git a/tm2/pkg/p2p/config/config.go b/tm2/pkg/p2p/config/config.go index 48aae35e10a..07692145fee 100644 --- a/tm2/pkg/p2p/config/config.go +++ b/tm2/pkg/p2p/config/config.go @@ -18,67 +18,67 @@ const ( // P2PConfig defines the configuration options for the Tendermint peer-to-peer networking layer type P2PConfig struct { - RootDir string `toml:"home"` + RootDir string `json:"rpc" toml:"home"` // Address to listen for incoming connections - ListenAddress string `toml:"laddr" comment:"Address to listen for incoming connections"` + ListenAddress string `json:"laddr" toml:"laddr" comment:"Address to listen for incoming connections"` // Address to advertise to peers for them to dial - ExternalAddress string `toml:"external_address" comment:"Address to advertise to peers for them to dial\n If empty, will use the same port as the laddr,\n and will introspect on the listener or use UPnP\n to figure out the address."` + ExternalAddress string `json:"external_address" toml:"external_address" comment:"Address to advertise to peers for them to dial\n If empty, will use the same port as the laddr,\n and will introspect on the listener or use UPnP\n to figure out the address."` // Comma separated list of seed nodes to connect to - Seeds string `toml:"seeds" comment:"Comma separated list of seed nodes to connect to"` + Seeds string `json:"seeds" toml:"seeds" comment:"Comma separated list of seed nodes to connect to"` // Comma separated list of nodes to keep persistent connections to - PersistentPeers string `toml:"persistent_peers" comment:"Comma separated list of nodes to keep persistent connections to"` + PersistentPeers string `json:"persistent_peers" toml:"persistent_peers" comment:"Comma separated list of nodes to keep persistent connections to"` // UPNP port forwarding - UPNP bool `toml:"upnp" comment:"UPNP port forwarding"` + UPNP bool `json:"upnp" toml:"upnp" comment:"UPNP port forwarding"` // Maximum number of inbound peers - MaxNumInboundPeers int `toml:"max_num_inbound_peers" comment:"Maximum number of inbound peers"` + MaxNumInboundPeers int `json:"max_num_inbound_peers" toml:"max_num_inbound_peers" comment:"Maximum number of inbound peers"` // Maximum number of outbound peers to connect to, excluding persistent peers - MaxNumOutboundPeers int `toml:"max_num_outbound_peers" comment:"Maximum number of outbound peers to connect to, excluding persistent peers"` + MaxNumOutboundPeers int `json:"max_num_outbound_peers" toml:"max_num_outbound_peers" comment:"Maximum number of outbound peers to connect to, excluding persistent peers"` // Time to wait before flushing messages out on the connection - FlushThrottleTimeout time.Duration `toml:"flush_throttle_timeout" comment:"Time to wait before flushing messages out on the connection"` + FlushThrottleTimeout time.Duration `json:"flush_throttle_timeout" toml:"flush_throttle_timeout" comment:"Time to wait before flushing messages out on the connection"` // Maximum size of a message packet payload, in bytes - MaxPacketMsgPayloadSize int `toml:"max_packet_msg_payload_size" comment:"Maximum size of a message packet payload, in bytes"` + MaxPacketMsgPayloadSize int `json:"max_packet_msg_payload_size" toml:"max_packet_msg_payload_size" comment:"Maximum size of a message packet payload, in bytes"` // Rate at which packets can be sent, in bytes/second - SendRate int64 `toml:"send_rate" comment:"Rate at which packets can be sent, in bytes/second"` + SendRate int64 `json:"send_rate" toml:"send_rate" comment:"Rate at which packets can be sent, in bytes/second"` // Rate at which packets can be received, in bytes/second - RecvRate int64 `toml:"recv_rate" comment:"Rate at which packets can be received, in bytes/second"` + RecvRate int64 `json:"recv_rate" toml:"recv_rate" comment:"Rate at which packets can be received, in bytes/second"` // Set true to enable the peer-exchange reactor - PexReactor bool `toml:"pex" comment:"Set true to enable the peer-exchange reactor"` + PexReactor bool `json:"pex" toml:"pex" comment:"Set true to enable the peer-exchange reactor"` // Seed mode, in which node constantly crawls the network and looks for // peers. If another node asks it for addresses, it responds and disconnects. // // Does not work if the peer-exchange reactor is disabled. - SeedMode bool `toml:"seed_mode" comment:"Seed mode, in which node constantly crawls the network and looks for\n peers. If another node asks it for addresses, it responds and disconnects.\n\n Does not work if the peer-exchange reactor is disabled."` + SeedMode bool `json:"seed_mode" toml:"seed_mode" comment:"Seed mode, in which node constantly crawls the network and looks for\n peers. If another node asks it for addresses, it responds and disconnects.\n\n Does not work if the peer-exchange reactor is disabled."` // Comma separated list of peer IDs to keep private (will not be gossiped to // other peers) - PrivatePeerIDs string `toml:"private_peer_ids" comment:"Comma separated list of peer IDs to keep private (will not be gossiped to other peers)"` + PrivatePeerIDs string `json:"private_peer_ids" toml:"private_peer_ids" comment:"Comma separated list of peer IDs to keep private (will not be gossiped to other peers)"` // Toggle to disable guard against peers connecting from the same ip. - AllowDuplicateIP bool `toml:"allow_duplicate_ip" comment:"Toggle to disable guard against peers connecting from the same ip."` + AllowDuplicateIP bool `json:"allow_duplicate_ip" toml:"allow_duplicate_ip" comment:"Toggle to disable guard against peers connecting from the same ip."` // Peer connection configuration. - HandshakeTimeout time.Duration `toml:"handshake_timeout" comment:"Peer connection configuration."` - DialTimeout time.Duration `toml:"dial_timeout"` + HandshakeTimeout time.Duration `json:"handshake_timeout" toml:"handshake_timeout" comment:"Peer connection configuration."` + DialTimeout time.Duration `json:"dial_timeout" toml:"dial_timeout"` // Testing params. // Force dial to fail - TestDialFail bool `toml:"test_dial_fail"` + TestDialFail bool `json:"test_dial_fail" toml:"test_dial_fail"` // FUzz connection - TestFuzz bool `toml:"test_fuzz"` - TestFuzzConfig *FuzzConnConfig `toml:"test_fuzz_config"` + TestFuzz bool `json:"test_fuzz" toml:"test_fuzz"` + TestFuzzConfig *FuzzConnConfig `json:"test_fuzz_config" toml:"test_fuzz_config"` } // DefaultP2PConfig returns a default configuration for the peer-to-peer layer diff --git a/tm2/pkg/p2p/key.go b/tm2/pkg/p2p/key.go index 71e3459f418..a41edeb07f8 100644 --- a/tm2/pkg/p2p/key.go +++ b/tm2/pkg/p2p/key.go @@ -17,6 +17,7 @@ import ( // NodeKey is the persistent peer key. // It contains the nodes private key for authentication. +// NOTE: keep in sync with gno.land/cmd/gnoland/secrets.go type NodeKey struct { crypto.PrivKey `json:"priv_key"` // our priv key } diff --git a/tm2/pkg/telemetry/config/config.go b/tm2/pkg/telemetry/config/config.go index a5e991fbc89..c496a52d078 100644 --- a/tm2/pkg/telemetry/config/config.go +++ b/tm2/pkg/telemetry/config/config.go @@ -8,10 +8,10 @@ var errEndpointNotSet = errors.New("telemetry exporter endpoint not set") // Config is the configuration struct for the tm2 telemetry package type Config struct { - MetricsEnabled bool `toml:"enabled"` - MeterName string `toml:"meter_name"` - ServiceName string `toml:"service_name"` - ExporterEndpoint string `toml:"exporter_endpoint" comment:"the endpoint to export metrics to, like a local OpenTelemetry collector"` + MetricsEnabled bool `json:"enabled" toml:"enabled"` + MeterName string `json:"meter_name" toml:"meter_name"` + ServiceName string `json:"service_name" toml:"service_name"` + ExporterEndpoint string `json:"exporter_endpoint" toml:"exporter_endpoint" comment:"the endpoint to export metrics to, like a local OpenTelemetry collector"` } // DefaultTelemetryConfig is the default configuration used for the node