From cfda0264a5e88f821f2b42822e12965fef886ef0 Mon Sep 17 00:00:00 2001 From: Warren He Date: Fri, 3 Jan 2020 16:34:02 -0800 Subject: [PATCH 01/10] byzantine: correct diagnostic message in mergeReceiveCommitment --- go/oasis-node/cmd/debug/byzantine/merge.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/oasis-node/cmd/debug/byzantine/merge.go b/go/oasis-node/cmd/debug/byzantine/merge.go index 88ff1675628..3e93d53b84a 100644 --- a/go/oasis-node/cmd/debug/byzantine/merge.go +++ b/go/oasis-node/cmd/debug/byzantine/merge.go @@ -43,7 +43,7 @@ func mergeReceiveCommitment(ph *p2pHandle) (*commitment.OpenComputeCommitment, e req.responseCh <- nil if req.msg.ComputeWorkerFinished == nil { - return nil, errors.Errorf("expecting signed transaction scheduler batch dispatch message, got %+v", req.msg) + return nil, errors.Errorf("expecting compute worker finished message, got %+v", req.msg) } openCom, err := req.msg.ComputeWorkerFinished.Commitment.Open() From 1e86fd9a2d13faf2f6477a3f4a57a052d03b06f2 Mon Sep 17 00:00:00 2001 From: Warren He Date: Fri, 3 Jan 2020 11:19:14 -0800 Subject: [PATCH 02/10] go registry: unify compute role --- go/common/node/node.go | 14 ++--------- .../tendermint/apps/scheduler/scheduler.go | 4 +-- .../cmd/debug/byzantine/byzantine.go | 6 ++--- go/oasis-node/cmd/node/node.go | 10 ++++---- go/oasis-node/cmd/registry/node/node.go | 16 ++++-------- go/oasis-node/node_test.go | 7 ++---- go/oasis-test-runner/oasis/args.go | 15 ++--------- go/oasis-test-runner/oasis/compute.go | 2 -- go/registry/api/api.go | 19 +++----------- go/registry/tests/tester.go | 2 +- go/worker/compute/init.go | 25 ++----------------- go/worker/computeenable/init.go | 25 +++++++++++++++++++ go/worker/merge/init.go | 25 ++----------------- go/worker/merge/worker.go | 7 ------ go/worker/txnscheduler/init.go | 11 ++------ go/worker/txnscheduler/worker.go | 20 +-------------- 16 files changed, 57 insertions(+), 151 deletions(-) create mode 100644 go/worker/computeenable/init.go diff --git a/go/common/node/node.go b/go/common/node/node.go index aaa94826c36..e92e594f689 100644 --- a/go/common/node/node.go +++ b/go/common/node/node.go @@ -72,14 +72,10 @@ const ( RoleComputeWorker RolesMask = 1 << 0 // RoleStorageWorker is Oasis storage worker role. RoleStorageWorker RolesMask = 1 << 1 - // RoleTransactionScheduler is Oasis transaction scheduler role. - RoleTransactionScheduler RolesMask = 1 << 2 // RoleKeyManager is the Oasis key manager role. - RoleKeyManager RolesMask = 1 << 3 - // RoleMergeWorker is the Oasis merge worker role. - RoleMergeWorker RolesMask = 1 << 4 + RoleKeyManager RolesMask = 1 << 2 // RoleValidator is the Oasis validator role. - RoleValidator RolesMask = 1 << 5 + RoleValidator RolesMask = 1 << 3 // RoleReserved are all the bits of the Oasis node roles bitmask // that are reserved and must not be used. @@ -104,15 +100,9 @@ func (m RolesMask) String() string { if m&RoleStorageWorker != 0 { ret = append(ret, "storage") } - if m&RoleTransactionScheduler != 0 { - ret = append(ret, "txn_scheduler") - } if m&RoleKeyManager != 0 { ret = append(ret, "key_manager") } - if m&RoleMergeWorker != 0 { - ret = append(ret, "merge") - } if m&RoleValidator != 0 { ret = append(ret, "validator") } diff --git a/go/consensus/tendermint/apps/scheduler/scheduler.go b/go/consensus/tendermint/apps/scheduler/scheduler.go index 9c3665c0b00..a56b0ec0ae2 100644 --- a/go/consensus/tendermint/apps/scheduler/scheduler.go +++ b/go/consensus/tendermint/apps/scheduler/scheduler.go @@ -380,7 +380,7 @@ func (app *schedulerApplication) isSuitableStorageWorker(n *node.Node, rt *regis } func (app *schedulerApplication) isSuitableTransactionScheduler(n *node.Node, rt *registry.Runtime, ts time.Time) bool { - if !n.HasRoles(node.RoleTransactionScheduler) { + if !n.HasRoles(node.RoleComputeWorker) { return false } for _, nrt := range n.Runtimes { @@ -393,7 +393,7 @@ func (app *schedulerApplication) isSuitableTransactionScheduler(n *node.Node, rt } func (app *schedulerApplication) isSuitableMergeWorker(n *node.Node, rt *registry.Runtime, ts time.Time) bool { - return n.HasRoles(node.RoleMergeWorker) + return n.HasRoles(node.RoleComputeWorker) } // Operates on consensus connection. diff --git a/go/oasis-node/cmd/debug/byzantine/byzantine.go b/go/oasis-node/cmd/debug/byzantine/byzantine.go index 32ae6bc56eb..d952cee89cb 100644 --- a/go/oasis-node/cmd/debug/byzantine/byzantine.go +++ b/go/oasis-node/cmd/debug/byzantine/byzantine.go @@ -424,7 +424,7 @@ func doMergeHonest(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("epochtimeWaitForEpoch: %+v", err)) } - if err = registryRegisterNode(ht.service, defaultIdentity, common.DataDir(), fakeAddresses, ph.service.Addresses(), defaultRuntimeID, nil, node.RoleMergeWorker); err != nil { + if err = registryRegisterNode(ht.service, defaultIdentity, common.DataDir(), fakeAddresses, ph.service.Addresses(), defaultRuntimeID, nil, node.RoleComputeWorker); err != nil { panic(fmt.Sprintf("registryRegisterNode: %+v", err)) } @@ -518,7 +518,7 @@ func doMergeWrong(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("epochtimeWaitForEpoch: %+v", err)) } - if err = registryRegisterNode(ht.service, defaultIdentity, common.DataDir(), fakeAddresses, ph.service.Addresses(), defaultRuntimeID, nil, node.RoleMergeWorker); err != nil { + if err = registryRegisterNode(ht.service, defaultIdentity, common.DataDir(), fakeAddresses, ph.service.Addresses(), defaultRuntimeID, nil, node.RoleComputeWorker); err != nil { panic(fmt.Sprintf("registryRegisterNode: %+v", err)) } @@ -636,7 +636,7 @@ func doMergeStraggler(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("epochtimeWaitForEpoch: %+v", err)) } - if err = registryRegisterNode(ht.service, defaultIdentity, common.DataDir(), fakeAddresses, ph.service.Addresses(), defaultRuntimeID, nil, node.RoleMergeWorker); err != nil { + if err = registryRegisterNode(ht.service, defaultIdentity, common.DataDir(), fakeAddresses, ph.service.Addresses(), defaultRuntimeID, nil, node.RoleComputeWorker); err != nil { panic(fmt.Sprintf("registryRegisterNode: %+v", err)) } diff --git a/go/oasis-node/cmd/node/node.go b/go/oasis-node/cmd/node/node.go index 5bb1ddf0f16..70c2b96cc65 100644 --- a/go/oasis-node/cmd/node/node.go +++ b/go/oasis-node/cmd/node/node.go @@ -59,6 +59,7 @@ import ( workerCommon "github.com/oasislabs/oasis-core/go/worker/common" "github.com/oasislabs/oasis-core/go/worker/common/p2p" "github.com/oasislabs/oasis-core/go/worker/compute" + "github.com/oasislabs/oasis-core/go/worker/computeenable" workerKeymanager "github.com/oasislabs/oasis-core/go/worker/keymanager" "github.com/oasislabs/oasis-core/go/worker/merge" "github.com/oasislabs/oasis-core/go/worker/registration" @@ -203,14 +204,14 @@ func (n *Node) initWorkers(logger *logging.Logger) error { return err } - // Initialize the P2P worker if any workers are enabled. Since the P2P + // Initialize the P2P worker if the compute worker is enabled. Since the P2P // layer does not have a separate Start method and starts listening // immediately when created, make sure that we don't start it if it is not // needed. // // Currently, only compute, txn scheduler and merge workers need P2P // transport. - if compute.Enabled() || txnscheduler.Enabled() || merge.Enabled() { + if computeenable.Enabled() { p2pCtx, p2pSvc := service.NewContextCleanup(context.Background()) if genesisDoc.Registry.Parameters.DebugAllowUnroutableAddresses { p2p.DebugForceAllowUnroutableAddresses() @@ -225,7 +226,7 @@ func (n *Node) initWorkers(logger *logging.Logger) error { // Initialize the common worker. n.CommonWorker, err = workerCommon.New( dataDir, - compute.Enabled() || workerStorage.Enabled() || txnscheduler.Enabled() || merge.Enabled() || workerKeymanager.Enabled(), + computeenable.Enabled() || workerStorage.Enabled() || workerKeymanager.Enabled(), n.Identity, n.RootHash, n.Registry, @@ -790,13 +791,12 @@ func init() { ias.Flags, workerKeymanager.Flags, runtimeRegistry.Flags, - compute.Flags, + computeenable.Flags, p2p.Flags, registration.Flags, txnscheduler.Flags, workerCommon.Flags, workerStorage.Flags, - merge.Flags, workerSentry.Flags, crash.InitFlags(), } { diff --git a/go/oasis-node/cmd/registry/node/node.go b/go/oasis-node/cmd/registry/node/node.go index a6dcc9c7755..a8faa4ed7c0 100644 --- a/go/oasis-node/cmd/registry/node/node.go +++ b/go/oasis-node/cmd/registry/node/node.go @@ -39,16 +39,14 @@ const ( CfgSelfSigned = "node.is_self_signed" CfgNodeRuntimeID = "node.runtime.id" - optRoleComputeWorker = "compute-worker" - optRoleStorageWorker = "storage-worker" - optRoleTransactionScheduler = "transaction-scheduler" - optRoleKeyManager = "key-manager" - optRoleMergeWorker = "merge-worker" - optRoleValidator = "validator" + optRoleComputeWorker = "compute-worker" + optRoleStorageWorker = "storage-worker" + optRoleKeyManager = "key-manager" + optRoleValidator = "validator" NodeGenesisFilename = "node_genesis.json" - maskCommitteeMember = node.RoleComputeWorker | node.RoleStorageWorker | node.RoleTransactionScheduler | node.RoleKeyManager | node.RoleMergeWorker + maskCommitteeMember = node.RoleComputeWorker | node.RoleStorageWorker | node.RoleKeyManager ) var ( @@ -276,12 +274,8 @@ func argsToRolesMask() (node.RolesMask, error) { rolesMask |= node.RoleComputeWorker case optRoleStorageWorker: rolesMask |= node.RoleStorageWorker - case optRoleTransactionScheduler: - rolesMask |= node.RoleTransactionScheduler case optRoleKeyManager: rolesMask |= node.RoleKeyManager - case optRoleMergeWorker: - rolesMask |= node.RoleMergeWorker case optRoleValidator: rolesMask |= node.RoleValidator default: diff --git a/go/oasis-node/node_test.go b/go/oasis-node/node_test.go index a0c20c443a4..f4853d5d511 100644 --- a/go/oasis-node/node_test.go +++ b/go/oasis-node/node_test.go @@ -41,10 +41,9 @@ import ( storageClientTests "github.com/oasislabs/oasis-core/go/storage/client/tests" storageTests "github.com/oasislabs/oasis-core/go/storage/tests" workerCommon "github.com/oasislabs/oasis-core/go/worker/common" - computeWorker "github.com/oasislabs/oasis-core/go/worker/compute" computeCommittee "github.com/oasislabs/oasis-core/go/worker/compute/committee" computeWorkerTests "github.com/oasislabs/oasis-core/go/worker/compute/tests" - mergeWorker "github.com/oasislabs/oasis-core/go/worker/merge" + "github.com/oasislabs/oasis-core/go/worker/computeenable" storageWorker "github.com/oasislabs/oasis-core/go/worker/storage" storageWorkerTests "github.com/oasislabs/oasis-core/go/worker/storage/tests" "github.com/oasislabs/oasis-core/go/worker/txnscheduler" @@ -67,14 +66,12 @@ var ( {cmdCommonFlags.CfgConsensusValidator, true}, {cmdCommonFlags.CfgDebugDontBlameOasis, true}, {storage.CfgBackend, "badger"}, - {computeWorker.CfgWorkerEnabled, true}, + {computeenable.CfgWorkerEnabled, true}, {workerCommon.CfgRuntimeBackend, "mock"}, {workerCommon.CfgRuntimeLoader, "mock-runtime"}, {workerCommon.CfgClientPort, workerClientPort}, {storageWorker.CfgWorkerEnabled, true}, - {txnscheduler.CfgWorkerEnabled, true}, {txnscheduler.CfgCheckTxEnabled, false}, - {mergeWorker.CfgWorkerEnabled, true}, {supplementarysanity.CfgEnabled, true}, {supplementarysanity.CfgInterval, 1}, {cmdCommon.CfgDebugAllowTestKeys, true}, diff --git a/go/oasis-test-runner/oasis/args.go b/go/oasis-test-runner/oasis/args.go index 5108304e8cb..29c7c0bd5a4 100644 --- a/go/oasis-test-runner/oasis/args.go +++ b/go/oasis-test-runner/oasis/args.go @@ -22,9 +22,8 @@ import ( "github.com/oasislabs/oasis-core/go/storage" workerCommon "github.com/oasislabs/oasis-core/go/worker/common" "github.com/oasislabs/oasis-core/go/worker/common/p2p" - "github.com/oasislabs/oasis-core/go/worker/compute" + "github.com/oasislabs/oasis-core/go/worker/computeenable" "github.com/oasislabs/oasis-core/go/worker/keymanager" - "github.com/oasislabs/oasis-core/go/worker/merge" "github.com/oasislabs/oasis-core/go/worker/registration" workerSentry "github.com/oasislabs/oasis-core/go/worker/sentry" workerStorage "github.com/oasislabs/oasis-core/go/worker/storage" @@ -208,7 +207,7 @@ func (args *argBuilder) workerRuntimeBinary(id common.Namespace, fn string) *arg } func (args *argBuilder) workerComputeEnabled() *argBuilder { - args.vec = append(args.vec, "--"+compute.CfgWorkerEnabled) + args.vec = append(args.vec, "--"+computeenable.CfgWorkerEnabled) return args } @@ -250,11 +249,6 @@ func (args *argBuilder) workerKeymanagerMayGenerate() *argBuilder { return args } -func (args *argBuilder) workerMergeEnabled() *argBuilder { - args.vec = append(args.vec, "--"+merge.CfgWorkerEnabled) - return args -} - func (args *argBuilder) workerSentryEnabled() *argBuilder { args.vec = append(args.vec, []string{ "--" + workerSentry.CfgEnabled, @@ -281,11 +275,6 @@ func (args *argBuilder) workerStorageDebugIgnoreApplies(ignore bool) *argBuilder return args } -func (args *argBuilder) workerTxnschedulerEnabled() *argBuilder { - args.vec = append(args.vec, "--"+txnscheduler.CfgWorkerEnabled) - return args -} - func (args *argBuilder) workerTxnschedulerCheckTxEnabled() *argBuilder { args.vec = append(args.vec, "--"+txnscheduler.CfgCheckTxEnabled) return args diff --git a/go/oasis-test-runner/oasis/compute.go b/go/oasis-test-runner/oasis/compute.go index 10d2110512a..b3dba36f76f 100644 --- a/go/oasis-test-runner/oasis/compute.go +++ b/go/oasis-test-runner/oasis/compute.go @@ -80,8 +80,6 @@ func (worker *Compute) startNode() error { workerComputeEnabled(). workerRuntimeBackend(worker.runtimeBackend). workerRuntimeLoader(worker.net.cfg.RuntimeLoaderBinary). - workerMergeEnabled(). - workerTxnschedulerEnabled(). workerTxnschedulerCheckTxEnabled(). appendNetwork(worker.net). appendEntity(worker.entity) diff --git a/go/registry/api/api.go b/go/registry/api/api.go index c6c591b298f..80483500cbb 100644 --- a/go/registry/api/api.go +++ b/go/registry/api/api.go @@ -147,8 +147,7 @@ var ( // RuntimesRequiredRoles are the Node roles that require runtimes. RuntimesRequiredRoles = node.RoleComputeWorker | - node.RoleKeyManager | - node.RoleTransactionScheduler + node.RoleKeyManager // ConsensusAddressRequiredRoles are the Node roles that require Consensus Address. ConsensusAddressRequiredRoles = node.RoleValidator @@ -156,14 +155,10 @@ var ( // CommitteeAddressRequiredRoles are the Node roles that require Committee Address. CommitteeAddressRequiredRoles = (node.RoleComputeWorker | node.RoleStorageWorker | - node.RoleTransactionScheduler | - node.RoleKeyManager | - node.RoleMergeWorker) + node.RoleKeyManager) // P2PAddressRequiredRoles are the Node roles that require P2P Address. - P2PAddressRequiredRoles = (node.RoleComputeWorker | - node.RoleTransactionScheduler | - node.RoleMergeWorker) + P2PAddressRequiredRoles = node.RoleComputeWorker ) // Backend is a registry implementation. @@ -1294,18 +1289,10 @@ func SanityCheckNodes(nodes []*node.SignedNode, seenEntities map[signature.Publi return fmt.Errorf("registry: sanity check failed: key manager node must have runtime(s)") } - if n.HasRoles(node.RoleTransactionScheduler) && len(n.Runtimes) == 0 { - return fmt.Errorf("registry: sanity check failed: transaction scheduler node must have runtime(s)") - } - if n.HasRoles(node.RoleStorageWorker) && !n.HasRoles(node.RoleComputeWorker) && !n.HasRoles(node.RoleKeyManager) && len(n.Runtimes) > 0 { return fmt.Errorf("registry: sanity check failed: storage worker node shouldn't have any runtimes") } - if n.HasRoles(node.RoleMergeWorker) && !n.HasRoles(node.RoleComputeWorker) && !n.HasRoles(node.RoleKeyManager) && len(n.Runtimes) > 0 { - return fmt.Errorf("registry: sanity check failed: merge worker node shouldn't have any runtimes") - } - if n.HasRoles(node.RoleValidator) && !n.HasRoles(node.RoleComputeWorker) && !n.HasRoles(node.RoleKeyManager) && len(n.Runtimes) > 0 { return fmt.Errorf("registry: sanity check failed: validator node shouldn't have any runtimes") } diff --git a/go/registry/tests/tester.go b/go/registry/tests/tester.go index 2de577a624d..bd570bd3cf9 100644 --- a/go/registry/tests/tester.go +++ b/go/registry/tests/tester.go @@ -629,7 +629,7 @@ func (ent *TestEntity) NewTestNodes(nCompute int, nStorage int, runtimes []*node var thisNodeRuntimes []*node.Runtime var role node.RolesMask if i < nCompute { - role = node.RoleComputeWorker | node.RoleTransactionScheduler | node.RoleMergeWorker + role = node.RoleComputeWorker thisNodeRuntimes = runtimes } else { role = node.RoleStorageWorker diff --git a/go/worker/compute/init.go b/go/worker/compute/init.go index 3da089a588d..437aa6034b0 100644 --- a/go/worker/compute/init.go +++ b/go/worker/compute/init.go @@ -1,27 +1,12 @@ package compute import ( - flag "github.com/spf13/pflag" - "github.com/spf13/viper" - workerCommon "github.com/oasislabs/oasis-core/go/worker/common" + "github.com/oasislabs/oasis-core/go/worker/computeenable" "github.com/oasislabs/oasis-core/go/worker/merge" "github.com/oasislabs/oasis-core/go/worker/registration" ) -const ( - // CfgWorkerEnabled enables the compute worker. - CfgWorkerEnabled = "worker.compute.enabled" -) - -// Flags has the configuration flags. -var Flags = flag.NewFlagSet("", flag.ContinueOnError) - -// Enabled reads our enabled flag from viper. -func Enabled() bool { - return viper.GetBool(CfgWorkerEnabled) -} - // New creates a new compute worker. func New( dataDir string, @@ -29,11 +14,5 @@ func New( mergeWorker *merge.Worker, registration *registration.Worker, ) (*Worker, error) { - return newWorker(dataDir, Enabled(), commonWorker, mergeWorker, registration) -} - -func init() { - Flags.Bool(CfgWorkerEnabled, false, "Enable compute worker process") - - _ = viper.BindPFlags(Flags) + return newWorker(dataDir, computeenable.Enabled(), commonWorker, mergeWorker, registration) } diff --git a/go/worker/computeenable/init.go b/go/worker/computeenable/init.go new file mode 100644 index 00000000000..585921116bd --- /dev/null +++ b/go/worker/computeenable/init.go @@ -0,0 +1,25 @@ +package computeenable + +import ( + flag "github.com/spf13/pflag" + "github.com/spf13/viper" +) + +const ( + // CfgWorkerEnabled enables the compute worker, tx scheduler worker, and merge worker. + CfgWorkerEnabled = "worker.compute.enabled" +) + +// Flags has the configuration flags. +var Flags = flag.NewFlagSet("", flag.ContinueOnError) + +// Enabled reads our enabled flag from viper. +func Enabled() bool { + return viper.GetBool(CfgWorkerEnabled) +} + +func init() { + Flags.Bool(CfgWorkerEnabled, false, "Enable compute worker processes") + + _ = viper.BindPFlags(Flags) +} diff --git a/go/worker/merge/init.go b/go/worker/merge/init.go index ca4d634bf5c..35b3c37882b 100644 --- a/go/worker/merge/init.go +++ b/go/worker/merge/init.go @@ -1,33 +1,12 @@ package merge import ( - flag "github.com/spf13/pflag" - "github.com/spf13/viper" - workerCommon "github.com/oasislabs/oasis-core/go/worker/common" + "github.com/oasislabs/oasis-core/go/worker/computeenable" "github.com/oasislabs/oasis-core/go/worker/registration" ) -const ( - // CfgWorkerEnabled enables the merge worker. - CfgWorkerEnabled = "worker.merge.enabled" -) - -// Flags has the configuration flags. -var Flags = flag.NewFlagSet("", flag.ContinueOnError) - -// Enabled reads our enabled flag from viper. -func Enabled() bool { - return viper.GetBool(CfgWorkerEnabled) -} - // New creates a new worker. func New(commonWorker *workerCommon.Worker, registration *registration.Worker) (*Worker, error) { - return newWorker(Enabled(), commonWorker, registration) -} - -func init() { - Flags.Bool(CfgWorkerEnabled, false, "Enable merge worker process") - - _ = viper.BindPFlags(Flags) + return newWorker(computeenable.Enabled(), commonWorker, registration) } diff --git a/go/worker/merge/worker.go b/go/worker/merge/worker.go index 622c54f0738..f87c26f9f4c 100644 --- a/go/worker/merge/worker.go +++ b/go/worker/merge/worker.go @@ -5,7 +5,6 @@ import ( "github.com/oasislabs/oasis-core/go/common" "github.com/oasislabs/oasis-core/go/common/logging" - "github.com/oasislabs/oasis-core/go/common/node" workerCommon "github.com/oasislabs/oasis-core/go/worker/common" committeeCommon "github.com/oasislabs/oasis-core/go/worker/common/committee" "github.com/oasislabs/oasis-core/go/worker/merge/committee" @@ -178,12 +177,6 @@ func newWorker(enabled bool, commonWorker *workerCommon.Worker, registrationWork return nil, err } } - - // Register merge worker role. - if err := w.registrationWorker.RegisterRole(node.RoleMergeWorker, - func(n *node.Node) error { return nil }); err != nil { - return nil, err - } } return w, nil diff --git a/go/worker/txnscheduler/init.go b/go/worker/txnscheduler/init.go index ce77ef14ab4..b621077bf29 100644 --- a/go/worker/txnscheduler/init.go +++ b/go/worker/txnscheduler/init.go @@ -6,13 +6,12 @@ import ( workerCommon "github.com/oasislabs/oasis-core/go/worker/common" "github.com/oasislabs/oasis-core/go/worker/compute" + "github.com/oasislabs/oasis-core/go/worker/computeenable" "github.com/oasislabs/oasis-core/go/worker/registration" txnSchedulerAlgorithm "github.com/oasislabs/oasis-core/go/worker/txnscheduler/algorithm" ) const ( - // CfgWorkerEnabled enables the tx scheduler worker. - CfgWorkerEnabled = "worker.txn_scheduler.enabled" // CfgCheckTxEnabled enables checking each transaction before scheduling it. CfgCheckTxEnabled = "worker.txn_scheduler.check_tx.enabled" ) @@ -20,11 +19,6 @@ const ( // Flags has the configuration flags. var Flags = flag.NewFlagSet("", flag.ContinueOnError) -// Enabled reads our enabled flag from viper. -func Enabled() bool { - return viper.GetBool(CfgWorkerEnabled) -} - // CheckTxEnabled reads our CheckTx enabled flag from viper. func CheckTxEnabled() bool { return viper.GetBool(CfgCheckTxEnabled) @@ -36,11 +30,10 @@ func New( compute *compute.Worker, registration *registration.Worker, ) (*Worker, error) { - return newWorker(Enabled(), commonWorker, compute, registration, CheckTxEnabled()) + return newWorker(computeenable.Enabled(), commonWorker, compute, registration, CheckTxEnabled()) } func init() { - Flags.Bool(CfgWorkerEnabled, false, "Enable transaction scheduler process") Flags.Bool(CfgCheckTxEnabled, false, "Enable checking transactions before scheduling them") _ = viper.BindPFlags(Flags) diff --git a/go/worker/txnscheduler/worker.go b/go/worker/txnscheduler/worker.go index b609bce50c4..fcc1de16e3e 100644 --- a/go/worker/txnscheduler/worker.go +++ b/go/worker/txnscheduler/worker.go @@ -146,7 +146,7 @@ func (w *Worker) registerRuntime(commonNode *committeeCommon.Node) error { computeNode := w.compute.GetRuntime(id) // Create worker host for the given runtime. - workerHostFactory, err := w.NewRuntimeWorkerHostFactory(node.RoleTransactionScheduler, id) + workerHostFactory, err := w.NewRuntimeWorkerHostFactory(node.RoleComputeWorker, id) if err != nil { return err } @@ -211,24 +211,6 @@ func newWorker( return nil, err } } - - // Register transaction scheduler worker role. - if err = w.registration.RegisterRole(node.RoleTransactionScheduler, func(n *node.Node) error { - if w.checkTxEnabled { - // Wait until all the runtimes are initialized. - for _, rt := range w.runtimes { - select { - case <-rt.Initialized(): - case <-w.ctx.Done(): - return w.ctx.Err() - } - } - } - - return nil - }); err != nil { - return nil, err - } } return w, nil From 9ac18aaf1b0abb2c358ce8da6ae39de3c0b8f854 Mon Sep 17 00:00:00 2001 From: Warren He Date: Fri, 3 Jan 2020 14:13:03 -0800 Subject: [PATCH 03/10] add changelog --- .changelog/2107.breaking.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changelog/2107.breaking.md diff --git a/.changelog/2107.breaking.md b/.changelog/2107.breaking.md new file mode 100644 index 00000000000..aaae6ac92d7 --- /dev/null +++ b/.changelog/2107.breaking.md @@ -0,0 +1,5 @@ +go node: Unite compute, merge, and transaction scheduler roles. + +We're removing the separation among registering nodes for the compute, merge, and transaction scheduler roles. +You now have to register for and enable all or none of these roles, under a new, broadened, and confusing--you're +welcome--term "compute." From 8f465f04385fbb7b024e5f762ba6795b49131c59 Mon Sep 17 00:00:00 2001 From: Warren He Date: Mon, 6 Jan 2020 14:39:16 -0800 Subject: [PATCH 04/10] byzantine: check that we're not scheduled for other roles --- .../cmd/debug/byzantine/byzantine.go | 76 +++++++++++++++++++ .../cmd/debug/byzantine/scheduler.go | 13 ++++ 2 files changed, 89 insertions(+) diff --git a/go/oasis-node/cmd/debug/byzantine/byzantine.go b/go/oasis-node/cmd/debug/byzantine/byzantine.go index d952cee89cb..c4166e1ecaf 100644 --- a/go/oasis-node/cmd/debug/byzantine/byzantine.go +++ b/go/oasis-node/cmd/debug/byzantine/byzantine.go @@ -140,10 +140,20 @@ func doComputeHonest(cmd *cobra.Command, args []string) { if err != nil { panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindStorage, err)) } + transactionSchedulerCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindTransactionScheduler, defaultRuntimeID) + if err != nil { + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindTransactionScheduler, err)) + } + if err = schedulerCheckNotScheduled(transactionSchedulerCommittee, defaultIdentity.NodeSigner.Public()); err != nil { + panic(fmt.Sprintf("scheduler check not scheduled txnscheduler failed: %+v", err)) + } mergeCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindMerge, defaultRuntimeID) if err != nil { panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindMerge, err)) } + if err = schedulerCheckNotScheduled(mergeCommittee, defaultIdentity.NodeSigner.Public()); err != nil { + panic(fmt.Sprintf("scheduler check not scheduled merge failed: %+v", err)) + } logger.Debug("compute honest: connecting to storage committee") hnss, err := storageConnectToCommittee(ht, electionHeight, storageCommittee, scheduler.Worker, defaultIdentity) @@ -262,10 +272,20 @@ func doComputeWrong(cmd *cobra.Command, args []string) { if err != nil { panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindStorage, err)) } + transactionSchedulerCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindTransactionScheduler, defaultRuntimeID) + if err != nil { + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindTransactionScheduler, err)) + } + if err = schedulerCheckNotScheduled(transactionSchedulerCommittee, defaultIdentity.NodeSigner.Public()); err != nil { + panic(fmt.Sprintf("scheduler check not scheduled txnscheduler failed: %+v", err)) + } mergeCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindMerge, defaultRuntimeID) if err != nil { panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindMerge, err)) } + if err = schedulerCheckNotScheduled(mergeCommittee, defaultIdentity.NodeSigner.Public()); err != nil { + panic(fmt.Sprintf("scheduler check not scheduled merge failed: %+v", err)) + } logger.Debug("compute honest: connecting to storage committee") hnss, err := storageConnectToCommittee(ht, electionHeight, storageCommittee, scheduler.Worker, defaultIdentity) @@ -379,6 +399,20 @@ func doComputeStraggler(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("scheduler check scheduled failed: %+v", err)) } logger.Debug("compute straggler: compute schedule ok") + transactionSchedulerCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindTransactionScheduler, defaultRuntimeID) + if err != nil { + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindTransactionScheduler, err)) + } + if err = schedulerCheckNotScheduled(transactionSchedulerCommittee, defaultIdentity.NodeSigner.Public()); err != nil { + panic(fmt.Sprintf("scheduler check not scheduled txnscheduler failed: %+v", err)) + } + mergeCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindMerge, defaultRuntimeID) + if err != nil { + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindMerge, err)) + } + if err = schedulerCheckNotScheduled(mergeCommittee, defaultIdentity.NodeSigner.Public()); err != nil { + panic(fmt.Sprintf("scheduler check not scheduled merge failed: %+v", err)) + } cbc := newComputeBatchContext() @@ -440,10 +474,24 @@ func doMergeHonest(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("scheduler check scheduled failed: %+v", err)) } logger.Debug("merge honest: merge schedule ok") + computeCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindCompute, defaultRuntimeID) + if err != nil { + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindCompute, err)) + } + if err = schedulerCheckNotScheduled(computeCommittee, defaultIdentity.NodeSigner.Public()); err != nil { + panic(fmt.Sprintf("scheduler check not scheduled compute failed: %+v", err)) + } storageCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindStorage, defaultRuntimeID) if err != nil { panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindStorage, err)) } + transactionSchedulerCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindTransactionScheduler, defaultRuntimeID) + if err != nil { + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindTransactionScheduler, err)) + } + if err = schedulerCheckNotScheduled(transactionSchedulerCommittee, defaultIdentity.NodeSigner.Public()); err != nil { + panic(fmt.Sprintf("scheduler check not scheduled txnscheduler failed: %+v", err)) + } logger.Debug("merge honest: connecting to storage committee") hnss, err := storageConnectToCommittee(ht, electionHeight, storageCommittee, scheduler.Worker, defaultIdentity) @@ -534,10 +582,24 @@ func doMergeWrong(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("scheduler check scheduled failed: %+v", err)) } logger.Debug("merge wrong: merge schedule ok") + computeCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindCompute, defaultRuntimeID) + if err != nil { + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindCompute, err)) + } + if err = schedulerCheckNotScheduled(computeCommittee, defaultIdentity.NodeSigner.Public()); err != nil { + panic(fmt.Sprintf("scheduler check not scheduled compute failed: %+v", err)) + } storageCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindStorage, defaultRuntimeID) if err != nil { panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindStorage, err)) } + transactionSchedulerCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindTransactionScheduler, defaultRuntimeID) + if err != nil { + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindTransactionScheduler, err)) + } + if err = schedulerCheckNotScheduled(transactionSchedulerCommittee, defaultIdentity.NodeSigner.Public()); err != nil { + panic(fmt.Sprintf("scheduler check not scheduled txnscheduler failed: %+v", err)) + } logger.Debug("merge wrong: connecting to storage committee") hnss, err := storageConnectToCommittee(ht, electionHeight, storageCommittee, scheduler.Worker, defaultIdentity) @@ -652,6 +714,20 @@ func doMergeStraggler(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("scheduler check scheduled failed: %+v", err)) } logger.Debug("merge straggler: merge schedule ok") + computeCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindCompute, defaultRuntimeID) + if err != nil { + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindCompute, err)) + } + if err = schedulerCheckNotScheduled(computeCommittee, defaultIdentity.NodeSigner.Public()); err != nil { + panic(fmt.Sprintf("scheduler check not scheduled compute failed: %+v", err)) + } + transactionSchedulerCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindTransactionScheduler, defaultRuntimeID) + if err != nil { + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindTransactionScheduler, err)) + } + if err = schedulerCheckNotScheduled(transactionSchedulerCommittee, defaultIdentity.NodeSigner.Public()); err != nil { + panic(fmt.Sprintf("scheduler check not scheduled txnscheduler failed: %+v", err)) + } mbc := newMergeBatchContext() diff --git a/go/oasis-node/cmd/debug/byzantine/scheduler.go b/go/oasis-node/cmd/debug/byzantine/scheduler.go index 8d51fbfa7d4..6468d4d8a8a 100644 --- a/go/oasis-node/cmd/debug/byzantine/scheduler.go +++ b/go/oasis-node/cmd/debug/byzantine/scheduler.go @@ -88,6 +88,19 @@ func schedulerCheckScheduled(committee *scheduler.Committee, nodeID signature.Pu return fmt.Errorf("we're not scheduled") } +func schedulerCheckNotScheduled(committee *scheduler.Committee, nodeID signature.PublicKey) error { + for _, member := range committee.Members { + if !member.PublicKey.Equal(nodeID) { + continue + } + + return fmt.Errorf("we're scheduled as %s", member.Role) + } + + // All good. + return nil +} + func schedulerForRoleInCommittee(ht *honestTendermint, height int64, committee *scheduler.Committee, role scheduler.Role, fn func(*node.Node) error) error { for _, member := range committee.Members { if member.Role != role { From eed039e128c98c1bc2e5df8d5e14c435709a9b07 Mon Sep 17 00:00:00 2001 From: Warren He Date: Mon, 6 Jan 2020 17:26:46 -0800 Subject: [PATCH 05/10] go oasis-test-runner: new identity seeds for byzantine nodes --- go/oasis-test-runner/oasis/compute.go | 9 +++- go/oasis-test-runner/oasis/oasis_test.go | 46 +++++++++++++++++++ .../scenario/e2e/byzantine.go | 18 ++++---- 3 files changed, 62 insertions(+), 11 deletions(-) create mode 100644 go/oasis-test-runner/oasis/oasis_test.go diff --git a/go/oasis-test-runner/oasis/compute.go b/go/oasis-test-runner/oasis/compute.go index b3dba36f76f..5fd1f90d519 100644 --- a/go/oasis-test-runner/oasis/compute.go +++ b/go/oasis-test-runner/oasis/compute.go @@ -10,7 +10,14 @@ import ( workerHost "github.com/oasislabs/oasis-core/go/worker/common/host" ) -const computeIdentitySeedTemplate = "ekiden node worker %d" +const ( + computeIdentitySeedTemplate = "ekiden node worker %d" + + ByzantineDefaultIdentitySeed = "ekiden byzantine node worker" // index 0 + ByzantineIndex1IdentitySeed = "ekiden byzantine node worker, luck=1" + ByzantineIndex2IdentitySeed = "ekiden byzantine node worker, luck=11" + ByzantineIndex3IdentitySeed = "ekiden byzantine node worker, luck=6" +) // Compute is an Oasis compute node. type Compute struct { // nolint: maligned diff --git a/go/oasis-test-runner/oasis/oasis_test.go b/go/oasis-test-runner/oasis/oasis_test.go new file mode 100644 index 00000000000..106e0876010 --- /dev/null +++ b/go/oasis-test-runner/oasis/oasis_test.go @@ -0,0 +1,46 @@ +package oasis + +import ( + "bytes" + "crypto" + "fmt" + "testing" + + "github.com/oasislabs/ed25519" + "github.com/stretchr/testify/require" + + "github.com/oasislabs/oasis-core/go/common/crypto/drbg" +) + +func generateDeterministicNodeKeys(t *testing.T, rawSeed string) (ed25519.PublicKey, ed25519.PrivateKey) { + h := crypto.SHA512.New() + n, err := h.Write([]byte(rawSeed)) + require.Equal(t, len(rawSeed), n, "SHA512 Write bytes") + require.NoError(t, err, "SHA512 Write") + seed := h.Sum(nil) + + rng, err := drbg.New(crypto.SHA512, seed, nil, []byte("deterministic node identities test")) + require.NoError(t, err, "drbg New") + pub, priv, err := ed25519.GenerateKey(rng) + require.NoError(t, err, "ed25519 GenerateKey") + return pub, priv +} + +func TestNodeIdentity(t *testing.T) { + c0, _ := generateDeterministicNodeKeys(t, fmt.Sprintf(computeIdentitySeedTemplate, 0)) // Dbeo + c1, _ := generateDeterministicNodeKeys(t, fmt.Sprintf(computeIdentitySeedTemplate, 1)) // oWk0 + c2, _ := generateDeterministicNodeKeys(t, fmt.Sprintf(computeIdentitySeedTemplate, 2)) // hcWV + require.Equal(t, 1, bytes.Compare(c2, c0)) + require.Equal(t, 1, bytes.Compare(c1, c2)) + + b0, _ := generateDeterministicNodeKeys(t, ByzantineDefaultIdentitySeed) + b1, _ := generateDeterministicNodeKeys(t, ByzantineIndex1IdentitySeed) + b2, _ := generateDeterministicNodeKeys(t, ByzantineIndex2IdentitySeed) + b3, _ := generateDeterministicNodeKeys(t, ByzantineIndex3IdentitySeed) + require.Equal(t, 1, bytes.Compare(c0, b0)) + require.Equal(t, 1, bytes.Compare(b1, c0)) + require.Equal(t, 1, bytes.Compare(c2, b1)) + require.Equal(t, 1, bytes.Compare(b2, c2)) + require.Equal(t, 1, bytes.Compare(c1, b2)) + require.Equal(t, 1, bytes.Compare(b3, c1)) +} diff --git a/go/oasis-test-runner/scenario/e2e/byzantine.go b/go/oasis-test-runner/scenario/e2e/byzantine.go index 0075f153e7b..22fa586ee17 100644 --- a/go/oasis-test-runner/scenario/e2e/byzantine.go +++ b/go/oasis-test-runner/scenario/e2e/byzantine.go @@ -9,42 +9,40 @@ import ( // TODO: Consider referencing script names directly from the Byzantine node. -const byzantineDefaultIdentitySeed = "ekiden byzantine node worker, luck=1" - var ( // ByzantineComputeHonest is the byzantine compute honest scenario. - ByzantineComputeHonest scenario.Scenario = newByzantineImpl("compute-honest", nil) + ByzantineComputeHonest scenario.Scenario = newByzantineImpl("compute-honest", nil, oasis.ByzantineDefaultIdentitySeed) // ByzantineComputeWrong is the byzantine compute wrong scenario. ByzantineComputeWrong scenario.Scenario = newByzantineImpl("compute-wrong", []log.WatcherHandlerFactory{ oasis.LogAssertNoTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertComputeDiscrepancyDetected(), oasis.LogAssertNoMergeDiscrepancyDetected(), - }) + }, oasis.ByzantineDefaultIdentitySeed) // ByzantineComputeStraggler is the byzantine compute straggler scenario. ByzantineComputeStraggler scenario.Scenario = newByzantineImpl("compute-straggler", []log.WatcherHandlerFactory{ oasis.LogAssertTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertComputeDiscrepancyDetected(), oasis.LogAssertNoMergeDiscrepancyDetected(), - }) + }, oasis.ByzantineDefaultIdentitySeed) // ByzantineMergeHonest is the byzantine merge honest scenario. - ByzantineMergeHonest scenario.Scenario = newByzantineImpl("merge-honest", nil) + ByzantineMergeHonest scenario.Scenario = newByzantineImpl("merge-honest", nil, oasis.ByzantineDefaultIdentitySeed) // ByzantineMergeWrong is the byzantine merge wrong scenario. ByzantineMergeWrong scenario.Scenario = newByzantineImpl("merge-wrong", []log.WatcherHandlerFactory{ oasis.LogAssertNoTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertNoComputeDiscrepancyDetected(), oasis.LogAssertMergeDiscrepancyDetected(), - }) + }, oasis.ByzantineDefaultIdentitySeed) // ByzantineMergeStraggler is the byzantine merge straggler scenario. ByzantineMergeStraggler scenario.Scenario = newByzantineImpl("merge-straggler", []log.WatcherHandlerFactory{ oasis.LogAssertTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertNoComputeDiscrepancyDetected(), oasis.LogAssertMergeDiscrepancyDetected(), - }) + }, oasis.ByzantineDefaultIdentitySeed) ) type byzantineImpl struct { @@ -55,7 +53,7 @@ type byzantineImpl struct { logWatcherHandlerFactories []log.WatcherHandlerFactory } -func newByzantineImpl(script string, logWatcherHandlerFactories []log.WatcherHandlerFactory) scenario.Scenario { +func newByzantineImpl(script string, logWatcherHandlerFactories []log.WatcherHandlerFactory, identitySeed string) scenario.Scenario { return &byzantineImpl{ basicImpl: *newBasicImpl( "byzantine/"+script, @@ -63,7 +61,7 @@ func newByzantineImpl(script string, logWatcherHandlerFactories []log.WatcherHan []string{"set", "hello_key", "hello_value"}, ), script: script, - identitySeed: byzantineDefaultIdentitySeed, + identitySeed: identitySeed, logWatcherHandlerFactories: logWatcherHandlerFactories, } } From 6096d85baf080a7c9ecfd3493f85502b937c8e11 Mon Sep 17 00:00:00 2001 From: Warren He Date: Tue, 7 Jan 2020 12:54:55 -0800 Subject: [PATCH 06/10] go scheduler: merge and storage eligibility by runtime --- .../tendermint/apps/scheduler/scheduler.go | 22 +++++++++++++++++-- go/registry/api/api.go | 11 +++++----- 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/go/consensus/tendermint/apps/scheduler/scheduler.go b/go/consensus/tendermint/apps/scheduler/scheduler.go index a56b0ec0ae2..c3025cb9709 100644 --- a/go/consensus/tendermint/apps/scheduler/scheduler.go +++ b/go/consensus/tendermint/apps/scheduler/scheduler.go @@ -376,7 +376,16 @@ func (app *schedulerApplication) isSuitableComputeWorker(n *node.Node, rt *regis } func (app *schedulerApplication) isSuitableStorageWorker(n *node.Node, rt *registry.Runtime, ts time.Time) bool { - return n.HasRoles(node.RoleStorageWorker) + if !n.HasRoles(node.RoleStorageWorker) { + return false + } + for _, nrt := range n.Runtimes { + if !nrt.ID.Equal(&rt.ID) { + continue + } + return true + } + return false } func (app *schedulerApplication) isSuitableTransactionScheduler(n *node.Node, rt *registry.Runtime, ts time.Time) bool { @@ -393,7 +402,16 @@ func (app *schedulerApplication) isSuitableTransactionScheduler(n *node.Node, rt } func (app *schedulerApplication) isSuitableMergeWorker(n *node.Node, rt *registry.Runtime, ts time.Time) bool { - return n.HasRoles(node.RoleComputeWorker) + if !n.HasRoles(node.RoleComputeWorker) { + return false + } + for _, nrt := range n.Runtimes { + if !nrt.ID.Equal(&rt.ID) { + continue + } + return true + } + return false } // Operates on consensus connection. diff --git a/go/registry/api/api.go b/go/registry/api/api.go index 80483500cbb..4f9cbddcad9 100644 --- a/go/registry/api/api.go +++ b/go/registry/api/api.go @@ -147,6 +147,7 @@ var ( // RuntimesRequiredRoles are the Node roles that require runtimes. RuntimesRequiredRoles = node.RoleComputeWorker | + node.RoleStorageWorker | node.RoleKeyManager // ConsensusAddressRequiredRoles are the Node roles that require Consensus Address. @@ -1285,15 +1286,15 @@ func SanityCheckNodes(nodes []*node.SignedNode, seenEntities map[signature.Publi return fmt.Errorf("registry: sanity check failed: compute worker node must have runtime(s)") } - if n.HasRoles(node.RoleKeyManager) && len(n.Runtimes) == 0 { - return fmt.Errorf("registry: sanity check failed: key manager node must have runtime(s)") + if n.HasRoles(node.RoleStorageWorker) && len(n.Runtimes) == 0 { + return fmt.Errorf("registry: sanity check failed: storage worker node must have runtime(s)") } - if n.HasRoles(node.RoleStorageWorker) && !n.HasRoles(node.RoleComputeWorker) && !n.HasRoles(node.RoleKeyManager) && len(n.Runtimes) > 0 { - return fmt.Errorf("registry: sanity check failed: storage worker node shouldn't have any runtimes") + if n.HasRoles(node.RoleKeyManager) && len(n.Runtimes) == 0 { + return fmt.Errorf("registry: sanity check failed: key manager node must have runtime(s)") } - if n.HasRoles(node.RoleValidator) && !n.HasRoles(node.RoleComputeWorker) && !n.HasRoles(node.RoleKeyManager) && len(n.Runtimes) > 0 { + if n.HasRoles(node.RoleValidator) && !n.HasRoles(node.RoleComputeWorker) && !n.HasRoles(node.RoleStorageWorker) && !n.HasRoles(node.RoleKeyManager) && len(n.Runtimes) > 0 { return fmt.Errorf("registry: sanity check failed: validator node shouldn't have any runtimes") } From 94c6c08748cd633cbd4a985ea5f28af12c0a2c1b Mon Sep 17 00:00:00 2001 From: Warren He Date: Tue, 7 Jan 2020 14:01:41 -0800 Subject: [PATCH 07/10] go registry: update tests for per-runtime storage --- go/registry/tests/tester.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/go/registry/tests/tester.go b/go/registry/tests/tester.go index bd570bd3cf9..ddeb43f386e 100644 --- a/go/registry/tests/tester.go +++ b/go/registry/tests/tester.go @@ -626,21 +626,18 @@ func (ent *TestEntity) NewTestNodes(nCompute int, nStorage int, runtimes []*node } nod.Entity = ent - var thisNodeRuntimes []*node.Runtime var role node.RolesMask if i < nCompute { role = node.RoleComputeWorker - thisNodeRuntimes = runtimes } else { role = node.RoleStorageWorker - thisNodeRuntimes = nil } nod.Node = &node.Node{ ID: nod.Signer.Public(), EntityID: ent.Entity.ID, Expiration: uint64(expiration), - Runtimes: thisNodeRuntimes, + Runtimes: runtimes, Roles: role, } addr := node.Address{ @@ -795,7 +792,7 @@ func (ent *TestEntity) NewTestNodes(nCompute int, nStorage int, runtimes []*node ID: nod.Signer.Public(), EntityID: ent.Entity.ID, Expiration: uint64(expiration), - Runtimes: thisNodeRuntimes, + Runtimes: runtimes, Roles: role, } addr = node.Address{ @@ -816,7 +813,7 @@ func (ent *TestEntity) NewTestNodes(nCompute int, nStorage int, runtimes []*node // Add invalid Re-Registration with changed Runtimes field. testRuntimeSigner := memorySigner.NewTestSigner("invalid-registration-runtime-seed") - newRuntimes := append([]*node.Runtime(nil), thisNodeRuntimes...) + newRuntimes := append([]*node.Runtime(nil), runtimes...) newRuntimes = append(newRuntimes, &node.Runtime{ID: publicKeyToNamespace(testRuntimeSigner.Public(), false)}) newNode := &node.Node{ ID: nod.Signer.Public(), From 0a9eaa85cd466418d26403fd2879a5b1f02db035 Mon Sep 17 00:00:00 2001 From: Warren He Date: Thu, 16 Jan 2020 15:56:46 -0800 Subject: [PATCH 08/10] go oasis-test-runner: update early registration wait --- go/oasis-test-runner/scenario/e2e/runtime_dynamic.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/go/oasis-test-runner/scenario/e2e/runtime_dynamic.go b/go/oasis-test-runner/scenario/e2e/runtime_dynamic.go index d0fc958910b..aebe9bc4d6b 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime_dynamic.go +++ b/go/oasis-test-runner/scenario/e2e/runtime_dynamic.go @@ -91,9 +91,9 @@ func (sc *runtimeDynamicImpl) Run(childEnv *env.Env) error { return err } - // NOTE: We also wait for storage workers as they can currently register even before the - // runtime is registered in the registry. If this changes, node count needs update. - numNodes := len(sc.net.Validators()) + len(sc.net.StorageWorkers()) + // NOTE: Storage workers need to wait until the runtime is registered in the registry. + // If this changes, node count needs update. + numNodes := len(sc.net.Validators()) if sc.net.Keymanager() != nil { numNodes++ } From bdbe9591d92f40526652b707a43b16b1cce4fcc9 Mon Sep 17 00:00:00 2001 From: Warren He Date: Tue, 14 Jan 2020 17:44:33 -0800 Subject: [PATCH 09/10] byzanine: beacon seed brute force tool --- go/consensus/tendermint/apps/beacon/beacon.go | 8 +- .../tendermint/apps/scheduler/scheduler.go | 41 ++++---- .../cmd/debug/byzantine/scheduler_test.go | 95 +++++++++++++++++++ 3 files changed, 124 insertions(+), 20 deletions(-) create mode 100644 go/oasis-node/cmd/debug/byzantine/scheduler_test.go diff --git a/go/consensus/tendermint/apps/beacon/beacon.go b/go/consensus/tendermint/apps/beacon/beacon.go index ec59822406d..c13a872f2d1 100644 --- a/go/consensus/tendermint/apps/beacon/beacon.go +++ b/go/consensus/tendermint/apps/beacon/beacon.go @@ -22,7 +22,7 @@ var ( errUnexpectedTimer = errors.New("beacon: unexpected timer") prodEntropyCtx = []byte("EkB-tmnt") - debugEntropyCtx = []byte("Ekb-Dumm") + DebugEntropyCtx = []byte("Ekb-Dumm") _ abci.Application = (*beaconApplication)(nil) ) @@ -121,11 +121,11 @@ func (app *beaconApplication) onBeaconEpochChange(ctx *abci.Context, epoch epoch } case true: // UNSAFE/DEBUG - Deterministic beacon. - entropyCtx = debugEntropyCtx + entropyCtx = DebugEntropyCtx entropy = []byte("If you change this, you will fuck up the byzantine tests!!!") } - b := getBeacon(epoch, entropyCtx, entropy) + b := GetBeacon(epoch, entropyCtx, entropy) app.logger.Debug("onBeaconEpochChange: generated beacon", "epoch", epoch, @@ -161,7 +161,7 @@ func New() abci.Application { return app } -func getBeacon(beaconEpoch epochtime.EpochTime, entropyCtx []byte, entropy []byte) []byte { +func GetBeacon(beaconEpoch epochtime.EpochTime, entropyCtx []byte, entropy []byte) []byte { var tmp [8]byte binary.LittleEndian.PutUint64(tmp[:], uint64(beaconEpoch)) diff --git a/go/consensus/tendermint/apps/scheduler/scheduler.go b/go/consensus/tendermint/apps/scheduler/scheduler.go index c3025cb9709..986e117b3e4 100644 --- a/go/consensus/tendermint/apps/scheduler/scheduler.go +++ b/go/consensus/tendermint/apps/scheduler/scheduler.go @@ -11,6 +11,7 @@ import ( "github.com/pkg/errors" "github.com/tendermint/tendermint/abci/types" + "github.com/oasislabs/oasis-core/go/common" "github.com/oasislabs/oasis-core/go/common/cbor" "github.com/oasislabs/oasis-core/go/common/crypto/drbg" "github.com/oasislabs/oasis-core/go/common/crypto/mathrand" @@ -37,12 +38,12 @@ import ( var ( _ abci.Application = (*schedulerApplication)(nil) - rngContextCompute = []byte("EkS-ABCI-Compute") - rngContextStorage = []byte("EkS-ABCI-Storage") - rngContextTransactionScheduler = []byte("EkS-ABCI-TransactionScheduler") - rngContextMerge = []byte("EkS-ABCI-Merge") - rngContextValidators = []byte("EkS-ABCI-Validators") - rngContextEntities = []byte("EkS-ABCI-Entities") + RNGContextCompute = []byte("EkS-ABCI-Compute") + RNGContextStorage = []byte("EkS-ABCI-Storage") + RNGContextTransactionScheduler = []byte("EkS-ABCI-TransactionScheduler") + RNGContextMerge = []byte("EkS-ABCI-Merge") + RNGContextValidators = []byte("EkS-ABCI-Validators") + RNGContextEntities = []byte("EkS-ABCI-Entities") errUnexpectedTransaction = errors.New("tendermint/scheduler: unexpected transaction") ) @@ -414,6 +415,16 @@ func (app *schedulerApplication) isSuitableMergeWorker(n *node.Node, rt *registr return false } +// GetPerm generates a permutation that we use to choose nodes from a list of eligible nodes to elect. +func GetPerm(beacon []byte, runtimeID common.Namespace, rngCtx []byte, nrNodes int) ([]int, error) { + drbg, err := drbg.New(crypto.SHA512, beacon, runtimeID[:], rngCtx) + if err != nil { + return nil, errors.Wrap(err, "tendermint/scheduler: couldn't instantiate DRBG") + } + rng := rand.New(mathrand.New(drbg)) + return rng.Perm(nrNodes), nil +} + // Operates on consensus connection. // Return error if node should crash. // For non-fatal problems, save a problem condition to the state and return successfully. @@ -437,24 +448,24 @@ func (app *schedulerApplication) electCommittee(ctx *abci.Context, request types switch kind { case scheduler.KindCompute: - rngCtx = rngContextCompute + rngCtx = RNGContextCompute threshold = staking.KindCompute isSuitableFn = app.isSuitableComputeWorker workerSize = int(rt.Compute.GroupSize) backupSize = int(rt.Compute.GroupBackupSize) case scheduler.KindMerge: - rngCtx = rngContextMerge + rngCtx = RNGContextMerge threshold = staking.KindCompute isSuitableFn = app.isSuitableMergeWorker workerSize = int(rt.Merge.GroupSize) backupSize = int(rt.Merge.GroupBackupSize) case scheduler.KindTransactionScheduler: - rngCtx = rngContextTransactionScheduler + rngCtx = RNGContextTransactionScheduler threshold = staking.KindCompute isSuitableFn = app.isSuitableTransactionScheduler workerSize = int(rt.TxnScheduler.GroupSize) case scheduler.KindStorage: - rngCtx = rngContextStorage + rngCtx = RNGContextStorage threshold = staking.KindStorage isSuitableFn = app.isSuitableStorageWorker workerSize = int(rt.Storage.GroupSize) @@ -499,12 +510,10 @@ func (app *schedulerApplication) electCommittee(ctx *abci.Context, request types } // Do the actual election. - drbg, err := drbg.New(crypto.SHA512, beacon, rt.ID[:], rngCtx) + idxs, err := GetPerm(beacon, rt.ID, rngCtx, nrNodes) if err != nil { - return errors.Wrap(err, "tendermint/scheduler: couldn't instantiate DRBG") + return err } - rng := rand.New(mathrand.New(drbg)) - idxs := rng.Perm(nrNodes) var members []*scheduler.CommitteeNode for i := 0; i < len(idxs); i++ { @@ -585,7 +594,7 @@ func (app *schedulerApplication) electValidators(ctx *abci.Context, beacon []byt } // Shuffle the node list. - drbg, err := drbg.New(crypto.SHA512, beacon, nil, rngContextValidators) + drbg, err := drbg.New(crypto.SHA512, beacon, nil, RNGContextValidators) if err != nil { return errors.Wrap(err, "tendermint/scheduler: couldn't instantiate DRBG") } @@ -662,7 +671,7 @@ func publicKeyMapToSliceByStake(entMap map[signature.PublicKey]bool, entityStake entities := publicKeyMapToSortedSlice(entMap) // Shuffle the sorted slice to make tie-breaks "random". - drbg, err := drbg.New(crypto.SHA512, beacon, nil, rngContextEntities) + drbg, err := drbg.New(crypto.SHA512, beacon, nil, RNGContextEntities) if err != nil { return nil, errors.Wrap(err, "tendermint/scheduler: couldn't instantiate DRBG") } diff --git a/go/oasis-node/cmd/debug/byzantine/scheduler_test.go b/go/oasis-node/cmd/debug/byzantine/scheduler_test.go new file mode 100644 index 00000000000..95379d4d607 --- /dev/null +++ b/go/oasis-node/cmd/debug/byzantine/scheduler_test.go @@ -0,0 +1,95 @@ +package byzantine + +import ( + "encoding/base64" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/oasislabs/oasis-core/go/common" + "github.com/oasislabs/oasis-core/go/consensus/tendermint/apps/beacon" + schedulerapp "github.com/oasislabs/oasis-core/go/consensus/tendermint/apps/scheduler" + epochtime "github.com/oasislabs/oasis-core/go/epochtime/api" + scheduler "github.com/oasislabs/oasis-core/go/scheduler/api" +) + +func hasSuitablePermutations(t *testing.T, beacon []byte, runtimeID common.Namespace) bool { + numComputeNodes := 4 + computeIdxs, err := schedulerapp.GetPerm(beacon, runtimeID, schedulerapp.RNGContextCompute, numComputeNodes) + require.NoError(t, err, "schedulerapp.GetPerm compute") + transactionSchedulerIdxs, err := schedulerapp.GetPerm(beacon, runtimeID, schedulerapp.RNGContextTransactionScheduler, numComputeNodes) + require.NoError(t, err, "schedulerapp.GetPerm transaction scheduler") + mergeIdxs, err := schedulerapp.GetPerm(beacon, runtimeID, schedulerapp.RNGContextMerge, numComputeNodes) + require.NoError(t, err, "schedulerapp.GetPerm merge") + + fmt.Printf("%20s schedule %v\n", scheduler.KindCompute, computeIdxs) + fmt.Printf("%20s schedule %v\n", scheduler.KindTransactionScheduler, transactionSchedulerIdxs) + fmt.Printf("%20s schedule %v\n", scheduler.KindMerge, mergeIdxs) + + committees := map[scheduler.CommitteeKind]struct { + workers int + backupWorkers int + perm []int + }{ + scheduler.KindCompute: {workers: 2, backupWorkers: 1, perm: computeIdxs}, + scheduler.KindTransactionScheduler: {workers: 1, backupWorkers: 0, perm: transactionSchedulerIdxs}, + scheduler.KindMerge: {workers: 2, backupWorkers: 1, perm: mergeIdxs}, + } + + for _, c1Kind := range []scheduler.CommitteeKind{ + scheduler.KindCompute, + scheduler.KindMerge, + } { + c1 := committees[c1Kind] + maxWorker := c1.workers + foundSuitable := false + for c1Pos := 0; c1Pos < maxWorker; c1Pos++ { + c1Slot := c1.perm[c1Pos] + conflict := false + CheckConflicts: + for c2Kind, c2 := range committees { + if c2Kind == c1Kind { + continue + } + totalScheduled := c2.workers + c2.backupWorkers + for c2Pos := 0; c2Pos < totalScheduled; c2Pos++ { + c2Slot := c2.perm[c2Pos] + if c1Slot == c2Slot { + conflict = true + break CheckConflicts + } + } + } + if !conflict { + fmt.Printf("suitable %s slot %d\n", c1Kind, c1Slot) + foundSuitable = true + break + } + } + if !foundSuitable { + fmt.Printf("no suitable %s slot\n", c1Kind) + return false + } + } + return true +} + +func TestDebugSchedule(t *testing.T) { + var epoch epochtime.EpochTime = 1 + var runtimeID common.Namespace + require.NoError(t, runtimeID.UnmarshalHex("8000000000000000000000000000000000000000000000000000000000000000"), "runtimeID.UnmarshalHex") + deterministicBeaconEntropy := []byte("If you change this, you will fuck up the byzantine tests!!!") + for { + fmt.Printf("assessing seed %s\n", deterministicBeaconEntropy) + + b := beacon.GetBeacon(epoch, beacon.DebugEntropyCtx, deterministicBeaconEntropy) + fmt.Printf("beacon %s\n", base64.StdEncoding.EncodeToString(b)) + + if hasSuitablePermutations(t, b, runtimeID) { + break + } + + deterministicBeaconEntropy = append(deterministicBeaconEntropy, '!') + } +} From db985d36e965c8f436f1ecba6e5d09deef7c60d7 Mon Sep 17 00:00:00 2001 From: Warren He Date: Wed, 8 Jan 2020 14:15:12 -0800 Subject: [PATCH 10/10] go beacon: change dummy entropy This is so that we have more usable testing schedules with a compute-only role and a merge-only role. --- go/consensus/tendermint/apps/beacon/beacon.go | 7 +++++- .../cmd/debug/byzantine/scheduler_test.go | 4 ++-- go/oasis-test-runner/oasis/compute.go | 8 +++---- go/oasis-test-runner/oasis/oasis_test.go | 6 ++--- .../scenario/e2e/byzantine.go | 22 ++++++++++++++----- 5 files changed, 31 insertions(+), 16 deletions(-) diff --git a/go/consensus/tendermint/apps/beacon/beacon.go b/go/consensus/tendermint/apps/beacon/beacon.go index c13a872f2d1..947d282ac02 100644 --- a/go/consensus/tendermint/apps/beacon/beacon.go +++ b/go/consensus/tendermint/apps/beacon/beacon.go @@ -122,7 +122,12 @@ func (app *beaconApplication) onBeaconEpochChange(ctx *abci.Context, epoch epoch case true: // UNSAFE/DEBUG - Deterministic beacon. entropyCtx = DebugEntropyCtx - entropy = []byte("If you change this, you will fuck up the byzantine tests!!!") + // We're setting this random seed so that we have suitable committee schedules for Byzantine E2E scenarios, + // where we want nodes to be scheduled for only one committee. The permutations derived from this on the first + // epoch need to have (i) an index that's compute worker only and (ii) an index that's merge worker only. See + // /go/oasis-test-runner/scenario/e2e/byzantine.go for the permutations generated from this seed. These + // permutations are generated independently of the deterministic node IDs. + entropy = []byte("If you change this, you will fuck up the byzantine tests!!") } b := GetBeacon(epoch, entropyCtx, entropy) diff --git a/go/oasis-node/cmd/debug/byzantine/scheduler_test.go b/go/oasis-node/cmd/debug/byzantine/scheduler_test.go index 95379d4d607..b0f92f5979a 100644 --- a/go/oasis-node/cmd/debug/byzantine/scheduler_test.go +++ b/go/oasis-node/cmd/debug/byzantine/scheduler_test.go @@ -76,10 +76,10 @@ func hasSuitablePermutations(t *testing.T, beacon []byte, runtimeID common.Names } func TestDebugSchedule(t *testing.T) { - var epoch epochtime.EpochTime = 1 + var epoch epochtime.EpochTime = 2 var runtimeID common.Namespace require.NoError(t, runtimeID.UnmarshalHex("8000000000000000000000000000000000000000000000000000000000000000"), "runtimeID.UnmarshalHex") - deterministicBeaconEntropy := []byte("If you change this, you will fuck up the byzantine tests!!!") + deterministicBeaconEntropy := []byte("If you change this, you will fuck up the byzantine tests!!") for { fmt.Printf("assessing seed %s\n", deterministicBeaconEntropy) diff --git a/go/oasis-test-runner/oasis/compute.go b/go/oasis-test-runner/oasis/compute.go index 5fd1f90d519..b87e65ab067 100644 --- a/go/oasis-test-runner/oasis/compute.go +++ b/go/oasis-test-runner/oasis/compute.go @@ -13,10 +13,10 @@ import ( const ( computeIdentitySeedTemplate = "ekiden node worker %d" - ByzantineDefaultIdentitySeed = "ekiden byzantine node worker" // index 0 - ByzantineIndex1IdentitySeed = "ekiden byzantine node worker, luck=1" - ByzantineIndex2IdentitySeed = "ekiden byzantine node worker, luck=11" - ByzantineIndex3IdentitySeed = "ekiden byzantine node worker, luck=6" + ByzantineDefaultIdentitySeed = "ekiden byzantine node worker" // slot 0 + ByzantineSlot1IdentitySeed = "ekiden byzantine node worker, luck=1" + ByzantineSlot2IdentitySeed = "ekiden byzantine node worker, luck=11" + ByzantineSlot3IdentitySeed = "ekiden byzantine node worker, luck=6" ) // Compute is an Oasis compute node. diff --git a/go/oasis-test-runner/oasis/oasis_test.go b/go/oasis-test-runner/oasis/oasis_test.go index 106e0876010..4476df1311c 100644 --- a/go/oasis-test-runner/oasis/oasis_test.go +++ b/go/oasis-test-runner/oasis/oasis_test.go @@ -34,9 +34,9 @@ func TestNodeIdentity(t *testing.T) { require.Equal(t, 1, bytes.Compare(c1, c2)) b0, _ := generateDeterministicNodeKeys(t, ByzantineDefaultIdentitySeed) - b1, _ := generateDeterministicNodeKeys(t, ByzantineIndex1IdentitySeed) - b2, _ := generateDeterministicNodeKeys(t, ByzantineIndex2IdentitySeed) - b3, _ := generateDeterministicNodeKeys(t, ByzantineIndex3IdentitySeed) + b1, _ := generateDeterministicNodeKeys(t, ByzantineSlot1IdentitySeed) + b2, _ := generateDeterministicNodeKeys(t, ByzantineSlot2IdentitySeed) + b3, _ := generateDeterministicNodeKeys(t, ByzantineSlot3IdentitySeed) require.Equal(t, 1, bytes.Compare(c0, b0)) require.Equal(t, 1, bytes.Compare(b1, c0)) require.Equal(t, 1, bytes.Compare(c2, b1)) diff --git a/go/oasis-test-runner/scenario/e2e/byzantine.go b/go/oasis-test-runner/scenario/e2e/byzantine.go index 22fa586ee17..25abd7a5fc0 100644 --- a/go/oasis-test-runner/scenario/e2e/byzantine.go +++ b/go/oasis-test-runner/scenario/e2e/byzantine.go @@ -10,39 +10,49 @@ import ( // TODO: Consider referencing script names directly from the Byzantine node. var ( + // Permutations generated in the epoch 2 election are + // compute: 3 (w), 0 (w), 2 (b), 1 (i) + // transaction scheduler: 0 (w), 3 (i), 1 (i), 2 (i) + // merge: 1 (w), 2 (w), 0 (b), 3 (i) + // w = worker; b = backup; i = invalid + // For compute scripts, it suffices to be index 3. + // For merge scripts, it suffices to be index 1. + // No index is transaction scheduler only. + // Indices are by order of node ID. + // ByzantineComputeHonest is the byzantine compute honest scenario. - ByzantineComputeHonest scenario.Scenario = newByzantineImpl("compute-honest", nil, oasis.ByzantineDefaultIdentitySeed) + ByzantineComputeHonest scenario.Scenario = newByzantineImpl("compute-honest", nil, oasis.ByzantineSlot3IdentitySeed) // ByzantineComputeWrong is the byzantine compute wrong scenario. ByzantineComputeWrong scenario.Scenario = newByzantineImpl("compute-wrong", []log.WatcherHandlerFactory{ oasis.LogAssertNoTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertComputeDiscrepancyDetected(), oasis.LogAssertNoMergeDiscrepancyDetected(), - }, oasis.ByzantineDefaultIdentitySeed) + }, oasis.ByzantineSlot3IdentitySeed) // ByzantineComputeStraggler is the byzantine compute straggler scenario. ByzantineComputeStraggler scenario.Scenario = newByzantineImpl("compute-straggler", []log.WatcherHandlerFactory{ oasis.LogAssertTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertComputeDiscrepancyDetected(), oasis.LogAssertNoMergeDiscrepancyDetected(), - }, oasis.ByzantineDefaultIdentitySeed) + }, oasis.ByzantineSlot3IdentitySeed) // ByzantineMergeHonest is the byzantine merge honest scenario. - ByzantineMergeHonest scenario.Scenario = newByzantineImpl("merge-honest", nil, oasis.ByzantineDefaultIdentitySeed) + ByzantineMergeHonest scenario.Scenario = newByzantineImpl("merge-honest", nil, oasis.ByzantineSlot1IdentitySeed) // ByzantineMergeWrong is the byzantine merge wrong scenario. ByzantineMergeWrong scenario.Scenario = newByzantineImpl("merge-wrong", []log.WatcherHandlerFactory{ oasis.LogAssertNoTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertNoComputeDiscrepancyDetected(), oasis.LogAssertMergeDiscrepancyDetected(), - }, oasis.ByzantineDefaultIdentitySeed) + }, oasis.ByzantineSlot1IdentitySeed) // ByzantineMergeStraggler is the byzantine merge straggler scenario. ByzantineMergeStraggler scenario.Scenario = newByzantineImpl("merge-straggler", []log.WatcherHandlerFactory{ oasis.LogAssertTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertNoComputeDiscrepancyDetected(), oasis.LogAssertMergeDiscrepancyDetected(), - }, oasis.ByzantineDefaultIdentitySeed) + }, oasis.ByzantineSlot1IdentitySeed) ) type byzantineImpl struct {