Skip to content

Commit

Permalink
cmd: refactor create-cluster flags (#385)
Browse files Browse the repository at this point in the history
Refactors `create-cluster` command flags to improve UX when used in combination with docker-compose and k8s.

The main purpose of `create-cluster` is to create a manifest and keys. Creating **config run scripts (run.sh)** is only applicable to local non-docker clusters (which will not be the most common use case). So make creating config run scripts opt-in by specifying `--config=true`. This also makes it more explicit that configuring a `--config-simnet` is just via the config run script. The same goes for `--config-port-start` and `--config-binary`. 

When used in charon-docker-compose or charon-k8s, this config is not applicable and is now not generated any more. So less confusing and doesn't need to be deleted.

Also align `--split-keys-dir` with `--split-existing-keys` with same prefix to indicate they are related. 

category: refactor 
ticket: none
  • Loading branch information
corverroos authored Apr 7, 2022
1 parent 18d615e commit 6344414
Show file tree
Hide file tree
Showing 7 changed files with 122 additions and 113 deletions.
12 changes: 4 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,18 +27,14 @@ If however, you want to build from source with this repo directly, you can get s
brew install go

# Build the charon binary
go build
go build -o charon

# Run the charon command to generate a local simnet.
# Use charon's create-cluster command to generate a local simnet cluster.
./charon --help
./charon create-cluster --simnet
/tmp/charon/run_cluster.sh
./charon create-cluster --cluster-dir=/tmp/charon-simnet --config=true --config-simnet
/tmp/charon-simnet/run_cluster.sh
```

### Better simnet output

If you install [tmux](https://github.com/tmux/tmux/wiki) and [teamocil](https://github.com/remi/teamocil), you will get the output of the nodes in different tmux panes when you run `run_cluster.sh`. Otherwise the output from all the nodes will be merged as the script output.

## Documentation

The [Obol Docs](https://docs.obol.tech/) website it best place to get started.
Expand Down
165 changes: 97 additions & 68 deletions cmd/createcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ if (type -P tmux >/dev/null && type -P teamocil >/dev/null); then
echo "Commands tmux and teamocil are installed"
tmux new-session 'teamocil --layout teamocil.yml'
else
echo "Commands tmux and teamocil are not installed, output will be merged"
echo "⚠️ Commands tmux and teamocil are not installed, output will be merged"
trap "exit" INT TERM ERR
trap "kill 0" EXIT
Expand All @@ -82,16 +82,17 @@ windows:

type clusterConfig struct {
ClusterDir string
Clean bool
NumNodes int
Threshold int
PortStart int
Simnet bool
Clean bool
SplitKeys bool
KeysDir string

// TestBinary overrides the charon binary for testing.
TestBinary string
SplitKeys bool
SplitKeysDir string

ConfigEnabled bool
ConfigSimnet bool
ConfigPortStart int
ConfigBinary string
}

func newCreateClusterCmd(runFunc func(io.Writer, clusterConfig) error) *cobra.Command {
Expand All @@ -116,11 +117,15 @@ func bindClusterFlags(flags *pflag.FlagSet, config *clusterConfig) {
flags.StringVar(&config.ClusterDir, "cluster-dir", "./charon/cluster", "The target folder to create the cluster in.")
flags.IntVarP(&config.NumNodes, "nodes", "n", 4, "The number of charon nodes in the cluster.")
flags.IntVarP(&config.Threshold, "threshold", "t", 3, "The threshold required for signature reconstruction. Minimum is n-(ceil(n/3)-1).")
flags.IntVar(&config.PortStart, "port-start", 16000, "Starting port number for nodes in cluster.")
flags.BoolVar(&config.Clean, "clean", false, "Delete the cluster directory before generating it.")
flags.BoolVar(&config.Simnet, "simnet", true, "Configures a simulated network cluster with mock beacon node and mock validator clients. It showcases a running charon in isolation.")

flags.BoolVar(&config.SplitKeys, "split-existing-keys", false, "Enables splitting of existing non-dvt validator keys into distributed threshold private shares (instead of creating new random keys).")
flags.StringVar(&config.KeysDir, "keys-dir", "", "Directory containing keys to split. Expects keys in keystore-*.json and passwords in keystore-*.txt. Requires --split-validator-keys.")
flags.StringVar(&config.SplitKeysDir, "split-keys-dir", "", "Directory containing keys to split. Expects keys in keystore-*.json and passwords in keystore-*.txt. Requires --split-validator-keys.")

flags.BoolVar(&config.ConfigEnabled, "config", false, "Enables creation of local non-docker config files.")
flags.BoolVar(&config.ConfigSimnet, "config-simnet", true, "Configures a simulated network cluster with mock beacon node and mock validator clients. It showcases a running charon in isolation. Requires --config.")
flags.StringVar(&config.ConfigBinary, "config-binary", "", "Path of the charon binary to use in the config files. Defaults to this binary if empty. Requires --config.")
flags.IntVar(&config.ConfigPortStart, "config-port-start", 16000, "Starting port number used in config files. Requires --config.")
}

func runCreateCluster(w io.Writer, conf clusterConfig) error {
Expand All @@ -129,19 +134,22 @@ func runCreateCluster(w io.Writer, conf clusterConfig) error {
if err := os.RemoveAll(conf.ClusterDir); err != nil {
return errors.Wrap(err, "remove cluster dir")
}
} else if _, err := os.Stat(path.Join(conf.ClusterDir, "manifest.json")); err == nil {
return errors.New("existing cluster found. Try again with --clean")
}

// Create cluster directory at given location
if err := os.MkdirAll(conf.ClusterDir, 0o755); err != nil {
return errors.Wrap(err, "mkdir")
}

// Get charon binary to include in run scripts
charonBin, err := os.Executable()
if err != nil {
return errors.Wrap(err, "get charon binary")
} else if conf.TestBinary != "" {
charonBin = conf.TestBinary
if conf.ConfigBinary == "" {
// Get charon binary to include in run scripts
var err error
conf.ConfigBinary, err = os.Executable()
if err != nil {
return errors.Wrap(err, "get charon binary")
}
}

// Get root bls key
Expand All @@ -151,42 +159,27 @@ func runCreateCluster(w io.Writer, conf clusterConfig) error {
}

// Get function to create sequential ports
nextPort := nextPortFunc(conf.PortStart)
nextPort := nextPortFunc(conf.ConfigPortStart)

// Generate threshold bls key shares
var (
dvs []tbls.TSS
splits [][]*bls_sig.SecretKeyShare
)
for _, secret := range secrets {
shares, verifier, err := tbls.SplitSecret(secret, conf.Threshold, conf.NumNodes, rand.Reader)
if err != nil {
return err
}

splits = append(splits, shares)

tss, err := tbls.NewTSS(verifier, len(shares))
if err != nil {
return err
}

dvs = append(dvs, tss)
dvs, shareSets, err2 := getTSSShares(secrets, conf)
if err2 != nil {
return err2
}

// Create p2p peers
var peers []p2p.Peer
for i := 0; i < conf.NumNodes; i++ {
peer, err := newPeer(conf.ClusterDir, nodeDir(conf.ClusterDir, i), charonBin, i, nextPort, conf.Simnet)
peer, err := newPeer(conf, i, nextPort)
if err != nil {
return err
}

peers = append(peers, peer)

var secrets []*bls_sig.SecretKey
for _, split := range splits {
secret, err := tblsconv.ShareToSecret(split[i])
for _, shares := range shareSets {
secret, err := tblsconv.ShareToSecret(shares[i])
if err != nil {
return err
}
Expand All @@ -204,25 +197,51 @@ func runCreateCluster(w io.Writer, conf clusterConfig) error {
return err
}

err = writeClusterScript(conf.ClusterDir, conf.NumNodes)
if err != nil {
return errors.Wrap(err, "write cluster script")
}
if conf.ConfigEnabled {
err = writeClusterScript(conf.ClusterDir, conf.NumNodes)
if err != nil {
return errors.Wrap(err, "write cluster script")
}

err = writeTeamocilYML(conf.ClusterDir, conf.NumNodes)
if err != nil {
return errors.Wrap(err, "write teamocil.yml")
err = writeTeamocilYML(conf.ClusterDir, conf.NumNodes)
if err != nil {
return errors.Wrap(err, "write teamocil.yml")
}
}

if conf.SplitKeys {
writeWarning(w)
}

writeOutput(w, conf, charonBin)
writeOutput(w, conf)

return nil
}

func getTSSShares(secrets []*bls_sig.SecretKey, conf clusterConfig) ([]tbls.TSS, [][]*bls_sig.SecretKeyShare, error) {
var (
dvs []tbls.TSS
splits [][]*bls_sig.SecretKeyShare
)
for _, secret := range secrets {
shares, verifier, err := tbls.SplitSecret(secret, conf.Threshold, conf.NumNodes, rand.Reader)
if err != nil {
return nil, nil, err
}

splits = append(splits, shares)

tss, err := tbls.NewTSS(verifier, len(shares))
if err != nil {
return nil, nil, err
}

dvs = append(dvs, tss)
}

return dvs, splits, nil
}

func writeWarning(w io.Writer) {
var sb strings.Builder
_, _ = sb.WriteString("\n")
Expand All @@ -238,11 +257,11 @@ func writeWarning(w io.Writer) {

func getKeys(conf clusterConfig) ([]*bls_sig.SecretKey, error) {
if conf.SplitKeys {
if conf.KeysDir == "" {
return nil, errors.New("--keys-dir required when splitting keys")
if conf.SplitKeysDir == "" {
return nil, errors.New("--split-keys-dir required when splitting keys")
}

return keystore.LoadKeys(conf.KeysDir)
return keystore.LoadKeys(conf.SplitKeysDir)
}

// TODO(corver): Add flag to generate more distributed-validators than 1
Expand Down Expand Up @@ -274,7 +293,7 @@ func writeManifest(config clusterConfig, tss []tbls.TSS, peers []p2p.Peer) error
}

// newPeer returns a new peer, generating a p2pkey and ENR and node directory and run script in the process.
func newPeer(clusterDir, nodeDir, charonBin string, peerIdx int, nextPort func() int, simnet bool) (p2p.Peer, error) {
func newPeer(conf clusterConfig, peerIdx int, nextPort func() int) (p2p.Peer, error) {
tcp := net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: nextPort(),
Expand All @@ -285,7 +304,9 @@ func newPeer(clusterDir, nodeDir, charonBin string, peerIdx int, nextPort func()
Port: nextPort(),
}

p2pKey, err := p2p.NewSavedPrivKey(nodeDir)
dir := nodeDir(conf.ClusterDir, peerIdx)

p2pKey, err := p2p.NewSavedPrivKey(dir)
if err != nil {
return p2p.Peer{}, errors.Wrap(err, "create p2p key")
}
Expand All @@ -306,38 +327,46 @@ func newPeer(clusterDir, nodeDir, charonBin string, peerIdx int, nextPort func()
return p2p.Peer{}, errors.Wrap(err, "new peer")
}

if err := writeRunScript(clusterDir, nodeDir, charonBin, nextPort(),
tcp.String(), udp.String(), nextPort(), simnet); err != nil {
return p2p.Peer{}, errors.Wrap(err, "write run script")
if conf.ConfigEnabled {
if err := writeRunScript(conf, dir, nextPort(), tcp.String(), udp.String(), nextPort()); err != nil {
return p2p.Peer{}, errors.Wrap(err, "write run script")
}
}

return peer, nil
}

// writeOutput writes the gen_cluster output.
func writeOutput(out io.Writer, config clusterConfig, charonBin string) {
func writeOutput(out io.Writer, conf clusterConfig) {
var sb strings.Builder
_, _ = sb.WriteString(fmt.Sprintf("Referencing charon binary in scripts: %s\n", charonBin))
_, _ = sb.WriteString("Created charon cluster:\n")
_, _ = sb.WriteString(fmt.Sprintf(" --simnet=%v\n", config.Simnet))
_, _ = sb.WriteString(fmt.Sprintf(" --split-existing-keys=%v\n", config.SplitKeys))
_, _ = sb.WriteString(fmt.Sprintf(" --split-existing-keys=%v\n", conf.SplitKeys))
_, _ = sb.WriteString(fmt.Sprintf(" --config=%v\n", conf.ConfigEnabled))
if conf.ConfigEnabled {
_, _ = sb.WriteString(fmt.Sprintf(" --config-simnet=%v\n", conf.ConfigSimnet))
_, _ = sb.WriteString(fmt.Sprintf(" --config-binary=%v\n", conf.ConfigBinary))
}
_, _ = sb.WriteString("\n")
_, _ = sb.WriteString(strings.TrimSuffix(config.ClusterDir, "/") + "/\n")
_, _ = sb.WriteString(strings.TrimSuffix(conf.ClusterDir, "/") + "/\n")
_, _ = sb.WriteString("├─ manifest.json\tCluster manifest defines the cluster; used by all nodes\n")
_, _ = sb.WriteString("├─ run_cluster.sh\tConvenience script to run all nodes\n")
_, _ = sb.WriteString("├─ teamocil.yml\t\tTeamocil config for splitting logs in tmux panes\n")
if conf.ConfigEnabled {
_, _ = sb.WriteString("├─ run_cluster.sh\tConvenience script to run all nodes\n")
_, _ = sb.WriteString("├─ teamocil.yml\t\tTeamocil config for splitting logs in tmux panes\n")
}
_, _ = sb.WriteString("├─ node[0-3]/\t\tDirectory for each node\n")
_, _ = sb.WriteString("│ ├─ p2pkey\t\tP2P networking private key for node authentication\n")
_, _ = sb.WriteString("│ ├─ keystore-*.json\tValidator private share key for duty signing\n")
_, _ = sb.WriteString("│ ├─ keystore-*.txt\tKeystore password files for keystore-*.json\n")
_, _ = sb.WriteString("│ ├─ run.sh\t\tScript to run the node\n")
if conf.ConfigEnabled {
_, _ = sb.WriteString("│ ├─ run.sh\t\tConfig script to run the node\n")
}

_, _ = fmt.Fprint(out, sb.String())
}

// writeRunScript creates run script for a node.
func writeRunScript(clusterDir string, nodeDir string, charonBin string, monitoringPort int,
tcpAddr string, udpAddr string, validatorAPIPort int, simnet bool,
func writeRunScript(conf clusterConfig, nodeDir string, monitoringPort int,
tcpAddr string, udpAddr string, validatorAPIPort int,
) error {
f, err := os.Create(nodeDir + "/run.sh")
if err != nil {
Expand All @@ -348,12 +377,12 @@ func writeRunScript(clusterDir string, nodeDir string, charonBin string, monitor
// Flags for running a node
var flags []string
flags = append(flags, fmt.Sprintf("--data-dir=\"%s\"", nodeDir))
flags = append(flags, fmt.Sprintf("--manifest-file=\"%s/manifest.json\"", clusterDir))
flags = append(flags, fmt.Sprintf("--manifest-file=\"%s/manifest.json\"", conf.ClusterDir))
flags = append(flags, fmt.Sprintf("--monitoring-address=\"127.0.0.1:%d\"", monitoringPort))
flags = append(flags, fmt.Sprintf("--validator-api-address=\"127.0.0.1:%d\"", validatorAPIPort))
flags = append(flags, fmt.Sprintf("--p2p-tcp-address=%s", tcpAddr))
flags = append(flags, fmt.Sprintf("--p2p-udp-address=%s", udpAddr))
if simnet {
if conf.ConfigSimnet {
flags = append(flags, "--simnet-beacon-mock")
flags = append(flags, "--simnet-validator-mock")
}
Expand All @@ -366,7 +395,7 @@ func writeRunScript(clusterDir string, nodeDir string, charonBin string, monitor
err = tmpl.Execute(f, struct {
CharonBin string
Flags []string
}{CharonBin: charonBin, Flags: flags})
}{CharonBin: conf.ConfigBinary, Flags: flags})
if err != nil {
return errors.Wrap(err, "execute template")
}
Expand Down
25 changes: 15 additions & 10 deletions cmd/createcluster_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,18 +44,19 @@ func TestCreateCluster(t *testing.T) {
{
Name: "simnet",
Config: clusterConfig{
NumNodes: 4,
Threshold: 3,
PortStart: 8000,
Simnet: true,
NumNodes: 4,
Threshold: 3,
ConfigEnabled: true,
ConfigPortStart: 8000,
ConfigSimnet: true,
},
}, {
Name: "splitkeys",
Config: clusterConfig{
NumNodes: 4,
Threshold: 3,
PortStart: 8000,
SplitKeys: true,
NumNodes: 4,
Threshold: 3,
ConfigEnabled: false,
SplitKeys: true,
},
Prep: func(t *testing.T, config clusterConfig) clusterConfig {
t.Helper()
Expand All @@ -71,7 +72,7 @@ func TestCreateCluster(t *testing.T) {
err = keystore.StoreKeys([]*bls_sig.SecretKey{secret1, secret2}, keyDir)
require.NoError(t, err)

config.KeysDir = keyDir
config.SplitKeysDir = keyDir

return config
},
Expand All @@ -90,7 +91,7 @@ func TestCreateCluster(t *testing.T) {
func testCreateCluster(t *testing.T, conf clusterConfig) {
t.Helper()

conf.TestBinary = "charon"
conf.ConfigBinary = "charon"

dir, err := os.MkdirTemp("", "")
require.NoError(t, err)
Expand Down Expand Up @@ -124,6 +125,10 @@ func testCreateCluster(t *testing.T, conf clusterConfig) {

t.Run("runsh", func(t *testing.T) {
b, err := os.ReadFile(path.Join(dir, "node0", "run.sh"))
if !conf.ConfigSimnet {
require.Error(t, err)
return
}
require.NoError(t, err)

b = bytes.ReplaceAll(b, []byte(dir), []byte("charon"))
Expand Down
Loading

0 comments on commit 6344414

Please sign in to comment.