diff --git a/go/consensus/tendermint/apps/roothash/api.go b/go/consensus/tendermint/apps/roothash/api.go index 473ec18222d..04360196e8b 100644 --- a/go/consensus/tendermint/apps/roothash/api.go +++ b/go/consensus/tendermint/apps/roothash/api.go @@ -26,10 +26,10 @@ var ( // merge discrepancy detected events (value is a CBOR serialized // ValueMergeDiscrepancyDetected). KeyMergeDiscrepancyDetected = []byte("merge-discrepancy") - // KeyComputeDiscrepancyDetected is an ABCI event attribute key for + // KeyExecutionDiscrepancyDetected is an ABCI event attribute key for // merge discrepancy detected events (value is a CBOR serialized - // ValueComputeDiscrepancyDetected). - KeyComputeDiscrepancyDetected = []byte("compute-discrepancy") + // ValueExecutionDiscrepancyDetected). + KeyExecutionDiscrepancyDetected = []byte("execution-discrepancy") // KeyFinalized is an ABCI event attribute key for finalized blocks // (value is a CBOR serialized ValueFinalized). KeyFinalized = []byte("finalized") @@ -48,9 +48,9 @@ type ValueMergeDiscrepancyDetected struct { ID common.Namespace `json:"id"` } -// ValueComputeDiscrepancyDetected is the value component of a +// ValueExecutionDiscrepancyDetected is the value component of a // TagMergeDiscrepancyDetected. -type ValueComputeDiscrepancyDetected struct { - ID common.Namespace `json:"id"` - Event roothash.ComputeDiscrepancyDetectedEvent `json:"event"` +type ValueExecutionDiscrepancyDetected struct { + ID common.Namespace `json:"id"` + Event roothash.ExecutionDiscrepancyDetectedEvent `json:"event"` } diff --git a/go/consensus/tendermint/apps/roothash/roothash.go b/go/consensus/tendermint/apps/roothash/roothash.go index 1b05ea56fa9..391c4fb511a 100644 --- a/go/consensus/tendermint/apps/roothash/roothash.go +++ b/go/consensus/tendermint/apps/roothash/roothash.go @@ -122,15 +122,15 @@ func (app *rootHashApplication) onCommitteeChanged(ctx *abci.Context, epoch epoc // of all committees. We need this to be able to quickly see if any // committee members have changed. // - // We first include the current epoch, then all compute committee member + // We first include the current epoch, then all executor committee member // hashes and then the merge committee member hash: // // [little-endian epoch] - // "compute committees follow" - // [compute committe 1 members hash] - // [compute committe 2 members hash] + // "executor committees follow" + // [executor committe 1 members hash] + // [executor committe 2 members hash] // ... - // [compute committe n members hash] + // [executor committe n members hash] // "merge committee follows" // [merge committee members hash] // @@ -138,33 +138,33 @@ func (app *rootHashApplication) onCommitteeChanged(ctx *abci.Context, epoch epoc var rawEpoch [8]byte binary.LittleEndian.PutUint64(rawEpoch[:], uint64(epoch)) committeeIDParts = append(committeeIDParts, rawEpoch[:]) - committeeIDParts = append(committeeIDParts, []byte("compute committees follow")) + committeeIDParts = append(committeeIDParts, []byte("executor committees follow")) - // NOTE: There will later be multiple compute committees. - var computeCommittees []*scheduler.Committee - cc1, err := schedState.Committee(scheduler.KindCompute, rtID) + // NOTE: There will later be multiple executor committees. + var executorCommittees []*scheduler.Committee + xc1, err := schedState.Committee(scheduler.KindExecutor, rtID) if err != nil { - app.logger.Error("checkCommittees: failed to get compute committee from scheduler", + app.logger.Error("checkCommittees: failed to get executor committee from scheduler", "err", err, "runtime", rtID, ) continue } - if cc1 != nil { - computeCommittees = append(computeCommittees, cc1) + if xc1 != nil { + executorCommittees = append(executorCommittees, xc1) } - computePool := &commitment.MultiPool{ + executorPool := &commitment.MultiPool{ Committees: make(map[hash.Hash]*commitment.Pool), } - if len(computeCommittees) == 0 { - app.logger.Warn("checkCommittees: no compute committees", + if len(executorCommittees) == 0 { + app.logger.Warn("checkCommittees: no executor committees", "runtime", rtID, ) } - for _, computeCommittee := range computeCommittees { - computeNodeInfo := make(map[signature.PublicKey]commitment.NodeInfo) - for idx, n := range computeCommittee.Members { + for _, executorCommittee := range executorCommittees { + executorNodeInfo := make(map[signature.PublicKey]commitment.NodeInfo) + for idx, n := range executorCommittee.Members { var nodeRuntime *node.Runtime node, err1 := regState.Node(n.PublicKey) if err1 != nil { @@ -185,18 +185,18 @@ func (app *rootHashApplication) onCommitteeChanged(ctx *abci.Context, epoch epoc ) continue } - computeNodeInfo[n.PublicKey] = commitment.NodeInfo{ + executorNodeInfo[n.PublicKey] = commitment.NodeInfo{ CommitteeNode: idx, Runtime: nodeRuntime, } } - computeCommitteeID := computeCommittee.EncodedMembersHash() - committeeIDParts = append(committeeIDParts, computeCommitteeID[:]) + executorCommitteeID := executorCommittee.EncodedMembersHash() + committeeIDParts = append(committeeIDParts, executorCommitteeID[:]) - computePool.Committees[computeCommitteeID] = &commitment.Pool{ + executorPool.Committees[executorCommitteeID] = &commitment.Pool{ Runtime: rtState.Runtime, - Committee: computeCommittee, - NodeInfo: computeNodeInfo, + Committee: executorCommittee, + NodeInfo: executorNodeInfo, } } @@ -259,7 +259,7 @@ func (app *rootHashApplication) onCommitteeChanged(ctx *abci.Context, epoch epoc ) rtState.Timer.Stop(ctx) - rtState.Round = roothashState.NewRound(committeeID, computePool, &mergePool, blk) + rtState.Round = roothashState.NewRound(committeeID, executorPool, &mergePool, blk) // Emit an empty epoch transition block in the new round. This is required so that // the clients can be sure what state is final when an epoch transition occurs. @@ -309,13 +309,13 @@ func (app *rootHashApplication) ExecuteTx(ctx *abci.Context, tx *transaction.Tra state := roothashState.NewMutableState(ctx.State()) switch tx.Method { - case roothash.MethodComputeCommit: - var cc roothash.ComputeCommit - if err := cbor.Unmarshal(tx.Body, &cc); err != nil { + case roothash.MethodExecutorCommit: + var xc roothash.ExecutorCommit + if err := cbor.Unmarshal(tx.Body, &xc); err != nil { return err } - return app.commit(ctx, state, cc.ID, &cc) + return app.commit(ctx, state, xc.ID, &xc) case roothash.MethodMergeCommit: var mc roothash.MergeCommit if err := cbor.Unmarshal(tx.Body, &mc); err != nil { @@ -476,8 +476,8 @@ func (app *rootHashApplication) FireTimer(ctx *abci.Context, timer *abci.Timer) panic(err) } } - for _, pool := range rtState.Round.ComputePool.GetTimeoutCommittees(ctx.Now()) { - app.tryFinalizeCompute(ctx, runtime, rtState, pool, true) + for _, pool := range rtState.Round.ExecutorPool.GetTimeoutCommittees(ctx.Now()) { + app.tryFinalizeExecute(ctx, runtime, rtState, pool, true) } return nil @@ -583,12 +583,12 @@ func (app *rootHashApplication) commit( return err } } - case *roothash.ComputeCommit: + case *roothash.ExecutorCommit: pools := make(map[*commitment.Pool]bool) for _, commit := range c.Commits { var pool *commitment.Pool - if pool, err = rtState.Round.AddComputeCommitment(&commit, sv); err != nil { - logger.Error("failed to add compute commitment to round", + if pool, err = rtState.Round.AddExecutorCommitment(&commit, sv); err != nil { + logger.Error("failed to add executor commitment to round", "err", err, "round", blockNr, ) @@ -598,10 +598,10 @@ func (app *rootHashApplication) commit( pools[pool] = true } - // Try to finalize compute rounds. + // Try to finalize execute rounds. if !ctx.IsCheckOnly() { for pool := range pools { - app.tryFinalizeCompute(ctx, runtime, rtState, pool, false) + app.tryFinalizeExecute(ctx, runtime, rtState, pool, false) } } default: @@ -639,7 +639,7 @@ func (app *rootHashApplication) updateTimer( } } -func (app *rootHashApplication) tryFinalizeCompute( +func (app *rootHashApplication) tryFinalizeExecute( ctx *abci.Context, runtime *registry.Runtime, rtState *roothashState.RuntimeState, @@ -654,22 +654,22 @@ func (app *rootHashApplication) tryFinalizeCompute( defer app.updateTimer(ctx, runtime, rtState, blockNr) if rtState.Round.Finalized { - app.logger.Error("attempted to finalize compute when block already finalized", + app.logger.Error("attempted to finalize execute when block already finalized", "round", blockNr, "committee_id", committeeID, ) return } - _, err := pool.TryFinalize(ctx.Now(), runtime.Compute.RoundTimeout, forced, true) + _, err := pool.TryFinalize(ctx.Now(), runtime.Executor.RoundTimeout, forced, true) switch err { case nil: // No error -- there is no discrepancy. But only the merge committee - // can make progress even if we have all compute commitments. + // can make progress even if we have all executor commitments. // TODO: Check if we need to punish the merge committee. - app.logger.Warn("no compute discrepancy, but only merge committee can make progress", + app.logger.Warn("no execution discrepancy, but only merge committee can make progress", "round", blockNr, "committee_id", committeeID, ) @@ -684,20 +684,20 @@ func (app *rootHashApplication) tryFinalizeCompute( return case commitment.ErrDiscrepancyDetected: // Discrepancy has been detected. - app.logger.Warn("compute discrepancy detected", + app.logger.Warn("execution discrepancy detected", "round", blockNr, "committee_id", committeeID, - logging.LogEvent, roothash.LogEventComputeDiscrepancyDetected, + logging.LogEvent, roothash.LogEventExecutionDiscrepancyDetected, ) - tagV := ValueComputeDiscrepancyDetected{ + tagV := ValueExecutionDiscrepancyDetected{ ID: runtime.ID, - Event: roothash.ComputeDiscrepancyDetectedEvent{ + Event: roothash.ExecutionDiscrepancyDetectedEvent{ CommitteeID: pool.GetCommitteeID(), Timeout: forced, }, } - ctx.EmitEvent(tmapi.NewEventBuilder(app.Name()).Attribute(KeyComputeDiscrepancyDetected, cbor.Marshal(tagV))) + ctx.EmitEvent(tmapi.NewEventBuilder(app.Name()).Attribute(KeyExecutionDiscrepancyDetected, cbor.Marshal(tagV))) return default: } @@ -747,7 +747,7 @@ func (app *rootHashApplication) tryFinalizeMerge( blk.Header.Timestamp = uint64(ctx.Now().Unix()) rtState.Round.MergePool.ResetCommitments() - rtState.Round.ComputePool.ResetCommitments() + rtState.Round.ExecutorPool.ResetCommitments() rtState.Round.Finalized = true return blk diff --git a/go/consensus/tendermint/apps/roothash/state/round.go b/go/consensus/tendermint/apps/roothash/state/round.go index ed1684d7bd2..e8194ca595e 100644 --- a/go/consensus/tendermint/apps/roothash/state/round.go +++ b/go/consensus/tendermint/apps/roothash/state/round.go @@ -11,40 +11,40 @@ import ( // Round is a roothash round. type Round struct { - CommitteeID hash.Hash `json:"committee_id"` - ComputePool *commitment.MultiPool `json:"compute_pool"` - MergePool *commitment.Pool `json:"merge_pool"` + CommitteeID hash.Hash `json:"committee_id"` + ExecutorPool *commitment.MultiPool `json:"executor_pool"` + MergePool *commitment.Pool `json:"merge_pool"` CurrentBlock *block.Block `json:"current_block"` Finalized bool `json:"finalized"` } func (r *Round) Reset() { - r.ComputePool.ResetCommitments() + r.ExecutorPool.ResetCommitments() r.MergePool.ResetCommitments() r.Finalized = false } func (r *Round) GetNextTimeout() (timeout time.Time) { - timeout = r.ComputePool.GetNextTimeout() + timeout = r.ExecutorPool.GetNextTimeout() if timeout.IsZero() || (!r.MergePool.NextTimeout.IsZero() && r.MergePool.NextTimeout.Before(timeout)) { timeout = r.MergePool.NextTimeout } return } -func (r *Round) AddComputeCommitment(commitment *commitment.ComputeCommitment, sv commitment.SignatureVerifier) (*commitment.Pool, error) { +func (r *Round) AddExecutorCommitment(commitment *commitment.ExecutorCommitment, sv commitment.SignatureVerifier) (*commitment.Pool, error) { if r.Finalized { return nil, errors.New("tendermint/roothash: round is already finalized, can't commit") } - return r.ComputePool.AddComputeCommitment(r.CurrentBlock, sv, commitment) + return r.ExecutorPool.AddExecutorCommitment(r.CurrentBlock, sv, commitment) } func (r *Round) AddMergeCommitment(commitment *commitment.MergeCommitment, sv commitment.SignatureVerifier) error { if r.Finalized { return errors.New("tendermint/roothash: round is already finalized, can't commit") } - return r.MergePool.AddMergeCommitment(r.CurrentBlock, sv, commitment, r.ComputePool) + return r.MergePool.AddMergeCommitment(r.CurrentBlock, sv, commitment, r.ExecutorPool) } func (r *Round) Transition(blk *block.Block) { @@ -54,14 +54,14 @@ func (r *Round) Transition(blk *block.Block) { func NewRound( committeeID hash.Hash, - computePool *commitment.MultiPool, + executorPool *commitment.MultiPool, mergePool *commitment.Pool, blk *block.Block, ) *Round { r := &Round{ CommitteeID: committeeID, CurrentBlock: blk, - ComputePool: computePool, + ExecutorPool: executorPool, MergePool: mergePool, } r.Reset() diff --git a/go/consensus/tendermint/apps/scheduler/scheduler.go b/go/consensus/tendermint/apps/scheduler/scheduler.go index f41bbc608b7..2ebd08f2915 100644 --- a/go/consensus/tendermint/apps/scheduler/scheduler.go +++ b/go/consensus/tendermint/apps/scheduler/scheduler.go @@ -37,7 +37,7 @@ import ( var ( _ abci.Application = (*schedulerApplication)(nil) - rngContextCompute = []byte("EkS-ABCI-Compute") + rngContextExecutor = []byte("EkS-ABCI-Compute") rngContextStorage = []byte("EkS-ABCI-Storage") rngContextTransactionScheduler = []byte("EkS-ABCI-TransactionScheduler") rngContextMerge = []byte("EkS-ABCI-Merge") @@ -215,7 +215,7 @@ func (app *schedulerApplication) BeginBlock(ctx *abci.Context, request types.Req } kinds := []scheduler.CommitteeKind{ - scheduler.KindCompute, + scheduler.KindExecutor, scheduler.KindStorage, scheduler.KindTransactionScheduler, scheduler.KindMerge, @@ -339,7 +339,7 @@ func (app *schedulerApplication) FireTimer(ctx *abci.Context, t *abci.Timer) err return errors.New("tendermint/scheduler: unexpected timer") } -func (app *schedulerApplication) isSuitableComputeWorker(n *node.Node, rt *registry.Runtime, ts time.Time) bool { +func (app *schedulerApplication) isSuitableExecutorWorker(n *node.Node, rt *registry.Runtime, ts time.Time) bool { if !n.HasRoles(node.RoleComputeWorker) { return false } @@ -419,7 +419,7 @@ func (app *schedulerApplication) isSuitableMergeWorker(n *node.Node, rt *registr // For non-fatal problems, save a problem condition to the state and return successfully. func (app *schedulerApplication) electCommittee(ctx *abci.Context, request types.RequestBeginBlock, epoch epochtime.EpochTime, beacon []byte, entityStake *stakeAccumulator, entitiesEligibleForReward map[signature.PublicKey]bool, rt *registry.Runtime, nodes []*node.Node, kind scheduler.CommitteeKind) error { // Only generic compute runtimes need to elect all the committees. - if !rt.IsCompute() && kind != scheduler.KindCompute { + if !rt.IsCompute() && kind != scheduler.KindExecutor { return nil } @@ -436,12 +436,12 @@ func (app *schedulerApplication) electCommittee(ctx *abci.Context, request types ) switch kind { - case scheduler.KindCompute: - rngCtx = rngContextCompute + case scheduler.KindExecutor: + rngCtx = rngContextExecutor threshold = staking.KindCompute - isSuitableFn = app.isSuitableComputeWorker - workerSize = int(rt.Compute.GroupSize) - backupSize = int(rt.Compute.GroupBackupSize) + isSuitableFn = app.isSuitableExecutorWorker + workerSize = int(rt.Executor.GroupSize) + backupSize = int(rt.Executor.GroupBackupSize) case scheduler.KindMerge: rngCtx = rngContextMerge threshold = staking.KindCompute diff --git a/go/consensus/tendermint/roothash/roothash.go b/go/consensus/tendermint/roothash/roothash.go index bffd0c7b7bb..9571d994d6b 100644 --- a/go/consensus/tendermint/roothash/roothash.go +++ b/go/consensus/tendermint/roothash/roothash.go @@ -432,8 +432,8 @@ func (tb *tendermintBackend) worker(ctx context.Context) { // nolint: gocyclo notifiers := tb.getRuntimeNotifiers(value.ID) notifiers.eventNotifier.Broadcast(&api.Event{MergeDiscrepancyDetected: &value.Event}) - } else if bytes.Equal(pair.GetKey(), app.KeyComputeDiscrepancyDetected) { - var value app.ValueComputeDiscrepancyDetected + } else if bytes.Equal(pair.GetKey(), app.KeyExecutionDiscrepancyDetected) { + var value app.ValueExecutionDiscrepancyDetected if err := cbor.Unmarshal(pair.GetValue(), &value); err != nil { tb.logger.Error("worker: failed to get discrepancy from tag", "err", err, @@ -442,7 +442,7 @@ func (tb *tendermintBackend) worker(ctx context.Context) { // nolint: gocyclo } notifiers := tb.getRuntimeNotifiers(value.ID) - notifiers.eventNotifier.Broadcast(&api.Event{ComputeDiscrepancyDetected: &value.Event}) + notifiers.eventNotifier.Broadcast(&api.Event{ExecutionDiscrepancyDetected: &value.Event}) } } } diff --git a/go/genesis/tests/tester.go b/go/genesis/tests/tester.go index 1a93070be32..aeb463c4779 100644 --- a/go/genesis/tests/tester.go +++ b/go/genesis/tests/tester.go @@ -156,7 +156,7 @@ func TestGenesisSanityCheck(t *testing.T) { ID: testRuntimeID, Kind: registry.KindCompute, KeyManager: &testKMRuntime.ID, - Compute: registry.ComputeParameters{ + Executor: registry.ExecutorParameters{ GroupSize: 1, RoundTimeout: 1 * time.Second, }, @@ -378,7 +378,7 @@ func TestGenesisSanityCheck(t *testing.T) { d.Registry.Runtimes = []*registry.SignedRuntime{signedTestKMRuntime, signedTestRuntime, signedTestRuntime} require.Error(d.SanityCheck(), "duplicate runtime IDs should be rejected") - // TODO: fiddle with compute/merge/txnsched parameters. + // TODO: fiddle with executor/merge/txnsched parameters. d = *testDoc te = *testEntity diff --git a/go/oasis-net-runner/fixtures/default.go b/go/oasis-net-runner/fixtures/default.go index 19980d988c0..edaaa550f20 100644 --- a/go/oasis-net-runner/fixtures/default.go +++ b/go/oasis-net-runner/fixtures/default.go @@ -79,7 +79,7 @@ func NewDefaultFixture() (*oasis.NetworkFixture, error) { Entity: 0, Keymanager: 0, Binary: viper.GetString(cfgRuntimeBinary), - Compute: registry.ComputeParameters{ + Executor: registry.ExecutorParameters{ GroupSize: 2, GroupBackupSize: 1, RoundTimeout: 20 * time.Second, diff --git a/go/oasis-node/cmd/debug/byzantine/byzantine.go b/go/oasis-node/cmd/debug/byzantine/byzantine.go index 77f68768715..0b7ff4787de 100644 --- a/go/oasis-node/cmd/debug/byzantine/byzantine.go +++ b/go/oasis-node/cmd/debug/byzantine/byzantine.go @@ -37,20 +37,20 @@ var ( Short: "run some node behaviors for testing, often not honest", PersistentPreRun: activateCommonConfig, } - computeHonestCmd = &cobra.Command{ - Use: "compute-honest", - Short: "act as an honest compute worker", - Run: doComputeHonest, + executorHonestCmd = &cobra.Command{ + Use: "executor-honest", + Short: "act as an honest executor worker", + Run: doExecutorHonest, } - computeWrongCmd = &cobra.Command{ - Use: "compute-wrong", - Short: "act as a compute worker that sends wrong output", - Run: doComputeWrong, + executorWrongCmd = &cobra.Command{ + Use: "executor-wrong", + Short: "act as an executor worker that sends wrong output", + Run: doExecutorWrong, } - computeStragglerCmd = &cobra.Command{ - Use: "compute-straggler", - Short: "act as a compute worker that registers and doesn't do any work", - Run: doComputeStraggler, + executorStragglerCmd = &cobra.Command{ + Use: "executor-straggler", + Short: "act as an executor worker that registers and doesn't do any work", + Run: doExecutorStraggler, } mergeHonestCmd = &cobra.Command{ Use: "merge-honest", @@ -76,7 +76,7 @@ func activateCommonConfig(cmd *cobra.Command, args []string) { ias.SetAllowDebugEnclaves() } -func doComputeHonest(cmd *cobra.Command, args []string) { +func doExecutorHonest(cmd *cobra.Command, args []string) { if err := common.Init(); err != nil { common.EarlyLogAndExit(err) } @@ -120,18 +120,18 @@ func doComputeHonest(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("registryRegisterNode: %+v", err)) } - electionHeight, err := schedulerNextElectionHeight(ht.service, scheduler.KindCompute) + electionHeight, err := schedulerNextElectionHeight(ht.service, scheduler.KindExecutor) if err != nil { panic(fmt.Sprintf("scheduler next election height failed: %+v", err)) } - computeCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindCompute, defaultRuntimeID) + executorCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindExecutor, defaultRuntimeID) if err != nil { - panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindCompute, err)) + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindExecutor, err)) } - if err = schedulerCheckScheduled(computeCommittee, defaultIdentity.NodeSigner.Public(), scheduler.Worker); err != nil { + if err = schedulerCheckScheduled(executorCommittee, defaultIdentity.NodeSigner.Public(), scheduler.Worker); err != nil { panic(fmt.Sprintf("scheduler check scheduled failed: %+v", err)) } - logger.Debug("compute honest: compute schedule ok") + logger.Debug("executor honest: executor schedule ok") storageCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindStorage, defaultRuntimeID) if err != nil { panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindStorage, err)) @@ -151,7 +151,7 @@ func doComputeHonest(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("scheduler check not scheduled merge failed: %+v", err)) } - logger.Debug("compute honest: connecting to storage committee") + logger.Debug("executor honest: connecting to storage committee") hnss, err := storageConnectToCommittee(ht, electionHeight, storageCommittee, scheduler.Worker, defaultIdentity) if err != nil { panic(fmt.Sprintf("storage connect to committee failed: %+v", err)) @@ -163,7 +163,7 @@ func doComputeHonest(cmd *cobra.Command, args []string) { if err = cbc.receiveBatch(ph); err != nil { panic(fmt.Sprintf("compute receive batch failed: %+v", err)) } - logger.Debug("compute honest: received batch", "bd", cbc.bd) + logger.Debug("executor honest: received batch", "bd", cbc.bd) ctx := context.Background() @@ -186,7 +186,7 @@ func doComputeHonest(cmd *cobra.Command, args []string) { if err = cbc.commitTrees(ctx); err != nil { panic(fmt.Sprintf("compute commit trees failed: %+v", err)) } - logger.Debug("compute honest: committed storage trees", + logger.Debug("executor honest: committed storage trees", "io_write_log", cbc.ioWriteLog, "new_io_root", cbc.newIORoot, "state_write_log", cbc.stateWriteLog, @@ -197,17 +197,17 @@ func doComputeHonest(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("compute upload batch failed: %+v", err)) } - if err = cbc.createCommitment(defaultIdentity, rak, computeCommittee.EncodedMembersHash()); err != nil { + if err = cbc.createCommitment(defaultIdentity, rak, executorCommittee.EncodedMembersHash()); err != nil { panic(fmt.Sprintf("compute create commitment failed: %+v", err)) } if err = cbc.publishToCommittee(ht, electionHeight, mergeCommittee, scheduler.Worker, ph, defaultRuntimeID, electionHeight); err != nil { panic(fmt.Sprintf("compute publish to committee merge worker failed: %+v", err)) } - logger.Debug("compute honest: commitment sent") + logger.Debug("executor honest: commitment sent") } -func doComputeWrong(cmd *cobra.Command, args []string) { +func doExecutorWrong(cmd *cobra.Command, args []string) { if err := common.Init(); err != nil { common.EarlyLogAndExit(err) } @@ -248,18 +248,18 @@ func doComputeWrong(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("registryRegisterNode: %+v", err)) } - electionHeight, err := schedulerNextElectionHeight(ht.service, scheduler.KindCompute) + electionHeight, err := schedulerNextElectionHeight(ht.service, scheduler.KindExecutor) if err != nil { panic(fmt.Sprintf("scheduler next election height failed: %+v", err)) } - computeCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindCompute, defaultRuntimeID) + executorCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindExecutor, defaultRuntimeID) if err != nil { - panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindCompute, err)) + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindExecutor, err)) } - if err = schedulerCheckScheduled(computeCommittee, defaultIdentity.NodeSigner.Public(), scheduler.Worker); err != nil { + if err = schedulerCheckScheduled(executorCommittee, defaultIdentity.NodeSigner.Public(), scheduler.Worker); err != nil { panic(fmt.Sprintf("scheduler check scheduled failed: %+v", err)) } - logger.Debug("compute wrong: compute schedule ok") + logger.Debug("executor wrong: executor schedule ok") storageCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindStorage, defaultRuntimeID) if err != nil { panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindStorage, err)) @@ -279,7 +279,7 @@ func doComputeWrong(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("scheduler check not scheduled merge failed: %+v", err)) } - logger.Debug("compute honest: connecting to storage committee") + logger.Debug("executor honest: connecting to storage committee") hnss, err := storageConnectToCommittee(ht, electionHeight, storageCommittee, scheduler.Worker, defaultIdentity) if err != nil { panic(fmt.Sprintf("storage connect to committee failed: %+v", err)) @@ -291,7 +291,7 @@ func doComputeWrong(cmd *cobra.Command, args []string) { if err = cbc.receiveBatch(ph); err != nil { panic(fmt.Sprintf("compute receive batch failed: %+v", err)) } - logger.Debug("compute wrong: received batch", "bd", cbc.bd) + logger.Debug("executor wrong: received batch", "bd", cbc.bd) ctx := context.Background() @@ -314,7 +314,7 @@ func doComputeWrong(cmd *cobra.Command, args []string) { if err = cbc.commitTrees(ctx); err != nil { panic(fmt.Sprintf("compute commit trees failed: %+v", err)) } - logger.Debug("compute wrong: committed storage trees", + logger.Debug("executor wrong: committed storage trees", "io_write_log", cbc.ioWriteLog, "new_io_root", cbc.newIORoot, "state_write_log", cbc.stateWriteLog, @@ -325,17 +325,17 @@ func doComputeWrong(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("compute upload batch failed: %+v", err)) } - if err = cbc.createCommitment(defaultIdentity, rak, computeCommittee.EncodedMembersHash()); err != nil { + if err = cbc.createCommitment(defaultIdentity, rak, executorCommittee.EncodedMembersHash()); err != nil { panic(fmt.Sprintf("compute create commitment failed: %+v", err)) } if err = cbc.publishToCommittee(ht, electionHeight, mergeCommittee, scheduler.Worker, ph, defaultRuntimeID, electionHeight); err != nil { panic(fmt.Sprintf("compute publish to committee merge worker failed: %+v", err)) } - logger.Debug("compute wrong: commitment sent") + logger.Debug("executor wrong: commitment sent") } -func doComputeStraggler(cmd *cobra.Command, args []string) { +func doExecutorStraggler(cmd *cobra.Command, args []string) { if err := common.Init(); err != nil { common.EarlyLogAndExit(err) } @@ -375,18 +375,18 @@ func doComputeStraggler(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("registryRegisterNode: %+v", err)) } - electionHeight, err := schedulerNextElectionHeight(ht.service, scheduler.KindCompute) + electionHeight, err := schedulerNextElectionHeight(ht.service, scheduler.KindExecutor) if err != nil { panic(fmt.Sprintf("scheduler next election height failed: %+v", err)) } - computeCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindCompute, defaultRuntimeID) + executorCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindExecutor, defaultRuntimeID) if err != nil { - panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindCompute, err)) + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindExecutor, err)) } - if err = schedulerCheckScheduled(computeCommittee, defaultIdentity.NodeSigner.Public(), scheduler.Worker); err != nil { + if err = schedulerCheckScheduled(executorCommittee, defaultIdentity.NodeSigner.Public(), scheduler.Worker); err != nil { panic(fmt.Sprintf("scheduler check scheduled failed: %+v", err)) } - logger.Debug("compute straggler: compute schedule ok") + logger.Debug("executor straggler: executor schedule ok") transactionSchedulerCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindTransactionScheduler, defaultRuntimeID) if err != nil { panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindTransactionScheduler, err)) @@ -407,9 +407,9 @@ func doComputeStraggler(cmd *cobra.Command, args []string) { if err = cbc.receiveBatch(ph); err != nil { panic(fmt.Sprintf("compute receive batch failed: %+v", err)) } - logger.Debug("compute straggler: received batch", "bd", cbc.bd) + logger.Debug("executor straggler: received batch", "bd", cbc.bd) - logger.Debug("compute straggler: bailing") + logger.Debug("executor straggler: bailing") } func doMergeHonest(cmd *cobra.Command, args []string) { @@ -446,7 +446,7 @@ func doMergeHonest(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("registryRegisterNode: %+v", err)) } - electionHeight, err := schedulerNextElectionHeight(ht.service, scheduler.KindCompute) + electionHeight, err := schedulerNextElectionHeight(ht.service, scheduler.KindExecutor) if err != nil { panic(fmt.Sprintf("scheduler next election height failed: %+v", err)) } @@ -458,12 +458,12 @@ func doMergeHonest(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("scheduler check scheduled failed: %+v", err)) } logger.Debug("merge honest: merge schedule ok") - computeCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindCompute, defaultRuntimeID) + executorCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindExecutor, defaultRuntimeID) if err != nil { - panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindCompute, err)) + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindExecutor, err)) } - if err = schedulerCheckNotScheduled(computeCommittee, defaultIdentity.NodeSigner.Public()); err != nil { - panic(fmt.Sprintf("scheduler check not scheduled compute failed: %+v", err)) + if err = schedulerCheckNotScheduled(executorCommittee, defaultIdentity.NodeSigner.Public()); err != nil { + panic(fmt.Sprintf("scheduler check not scheduled executor failed: %+v", err)) } storageCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindStorage, defaultRuntimeID) if err != nil { @@ -550,7 +550,7 @@ func doMergeWrong(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("registryRegisterNode: %+v", err)) } - electionHeight, err := schedulerNextElectionHeight(ht.service, scheduler.KindCompute) + electionHeight, err := schedulerNextElectionHeight(ht.service, scheduler.KindExecutor) if err != nil { panic(fmt.Sprintf("scheduler next election height failed: %+v", err)) } @@ -562,12 +562,12 @@ func doMergeWrong(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("scheduler check scheduled failed: %+v", err)) } logger.Debug("merge wrong: merge schedule ok") - computeCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindCompute, defaultRuntimeID) + executorCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindExecutor, defaultRuntimeID) if err != nil { - panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindCompute, err)) + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindExecutor, err)) } - if err = schedulerCheckNotScheduled(computeCommittee, defaultIdentity.NodeSigner.Public()); err != nil { - panic(fmt.Sprintf("scheduler check not scheduled compute failed: %+v", err)) + if err = schedulerCheckNotScheduled(executorCommittee, defaultIdentity.NodeSigner.Public()); err != nil { + panic(fmt.Sprintf("scheduler check not scheduled executor failed: %+v", err)) } storageCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindStorage, defaultRuntimeID) if err != nil { @@ -606,8 +606,8 @@ func doMergeWrong(cmd *cobra.Command, args []string) { origCommitments := mbc.commitments var emptyRoot hash.Hash emptyRoot.Empty() - mbc.commitments = []*commitment.OpenComputeCommitment{ - &commitment.OpenComputeCommitment{ + mbc.commitments = []*commitment.OpenExecutorCommitment{ + &commitment.OpenExecutorCommitment{ Body: &commitment.ComputeBody{ Header: commitment.ComputeResultsHeader{ IORoot: emptyRoot, @@ -678,7 +678,7 @@ func doMergeStraggler(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("registryRegisterNode: %+v", err)) } - electionHeight, err := schedulerNextElectionHeight(ht.service, scheduler.KindCompute) + electionHeight, err := schedulerNextElectionHeight(ht.service, scheduler.KindExecutor) if err != nil { panic(fmt.Sprintf("scheduler next election height failed: %+v", err)) } @@ -690,12 +690,12 @@ func doMergeStraggler(cmd *cobra.Command, args []string) { panic(fmt.Sprintf("scheduler check scheduled failed: %+v", err)) } logger.Debug("merge straggler: merge schedule ok") - computeCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindCompute, defaultRuntimeID) + executorCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindExecutor, defaultRuntimeID) if err != nil { - panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindCompute, err)) + panic(fmt.Sprintf("scheduler get committee %s failed: %+v", scheduler.KindExecutor, err)) } - if err = schedulerCheckNotScheduled(computeCommittee, defaultIdentity.NodeSigner.Public()); err != nil { - panic(fmt.Sprintf("scheduler check not scheduled compute failed: %+v", err)) + if err = schedulerCheckNotScheduled(executorCommittee, defaultIdentity.NodeSigner.Public()); err != nil { + panic(fmt.Sprintf("scheduler check not scheduled executor failed: %+v", err)) } transactionSchedulerCommittee, err := schedulerGetCommittee(ht, electionHeight, scheduler.KindTransactionScheduler, defaultRuntimeID) if err != nil { @@ -718,9 +718,9 @@ func doMergeStraggler(cmd *cobra.Command, args []string) { // Register registers the byzantine sub-command and all of its children. func Register(parentCmd *cobra.Command) { - byzantineCmd.AddCommand(computeHonestCmd) - byzantineCmd.AddCommand(computeWrongCmd) - byzantineCmd.AddCommand(computeStragglerCmd) + byzantineCmd.AddCommand(executorHonestCmd) + byzantineCmd.AddCommand(executorWrongCmd) + byzantineCmd.AddCommand(executorStragglerCmd) byzantineCmd.AddCommand(mergeHonestCmd) byzantineCmd.AddCommand(mergeWrongCmd) byzantineCmd.AddCommand(mergeStragglerCmd) diff --git a/go/oasis-node/cmd/debug/byzantine/compute.go b/go/oasis-node/cmd/debug/byzantine/executor.go similarity index 96% rename from go/oasis-node/cmd/debug/byzantine/compute.go rename to go/oasis-node/cmd/debug/byzantine/executor.go index a29d1cdc54f..7c35855ef4f 100644 --- a/go/oasis-node/cmd/debug/byzantine/compute.go +++ b/go/oasis-node/cmd/debug/byzantine/executor.go @@ -35,7 +35,7 @@ type computeBatchContext struct { newIORoot hash.Hash storageReceipts []*storage.Receipt - commit *commitment.ComputeCommitment + commit *commitment.ExecutorCommitment } func newComputeBatchContext() *computeBatchContext { @@ -182,9 +182,9 @@ func (cbc *computeBatchContext) createCommitment(id *identity.Identity, rak sign computeBody.RakSig = rakSig.Signature } var err error - cbc.commit, err = commitment.SignComputeCommitment(id.NodeSigner, computeBody) + cbc.commit, err = commitment.SignExecutorCommitment(id.NodeSigner, computeBody) if err != nil { - return errors.Wrap(err, "commitment sign compute commitment") + return errors.Wrap(err, "commitment sign executor commitment") } return nil @@ -195,7 +195,7 @@ func (cbc *computeBatchContext) publishToCommittee(ht *honestTendermint, height RuntimeID: runtimeID, GroupVersion: groupVersion, SpanContext: nil, - ComputeWorkerFinished: &p2p.ComputeWorkerFinished{ + ExecutorWorkerFinished: &p2p.ExecutorWorkerFinished{ Commitment: *cbc.commit, }, }); err != nil { diff --git a/go/oasis-node/cmd/debug/byzantine/merge.go b/go/oasis-node/cmd/debug/byzantine/merge.go index 3e93d53b84a..899f21f9303 100644 --- a/go/oasis-node/cmd/debug/byzantine/merge.go +++ b/go/oasis-node/cmd/debug/byzantine/merge.go @@ -17,7 +17,7 @@ import ( type mergeBatchContext struct { currentBlock *block.Block - commitments []*commitment.OpenComputeCommitment + commitments []*commitment.OpenExecutorCommitment storageReceipts []*storage.Receipt newBlock *block.Block @@ -38,17 +38,17 @@ func (mbc *mergeBatchContext) loadCurrentBlock(ht *honestTendermint, runtimeID c return nil } -func mergeReceiveCommitment(ph *p2pHandle) (*commitment.OpenComputeCommitment, error) { +func mergeReceiveCommitment(ph *p2pHandle) (*commitment.OpenExecutorCommitment, error) { req := <-ph.requests req.responseCh <- nil - if req.msg.ComputeWorkerFinished == nil { - return nil, errors.Errorf("expecting compute worker finished message, got %+v", req.msg) + if req.msg.ExecutorWorkerFinished == nil { + return nil, errors.Errorf("expecting executor worker finished message, got %+v", req.msg) } - openCom, err := req.msg.ComputeWorkerFinished.Commitment.Open() + openCom, err := req.msg.ExecutorWorkerFinished.Commitment.Open() if err != nil { - return nil, errors.Wrap(err, "request message ComputeWorkerFinished Open") + return nil, errors.Wrap(err, "request message ExecutorWorkerFinished Open") } return openCom, nil @@ -119,14 +119,14 @@ func (mbc *mergeBatchContext) process(ctx context.Context, hnss []*honestNodeSto } func (mbc *mergeBatchContext) createCommitment(id *identity.Identity) error { - var computeCommits []commitment.ComputeCommitment + var executorCommits []commitment.ExecutorCommitment for _, openCom := range mbc.commitments { - computeCommits = append(computeCommits, openCom.ComputeCommitment) + executorCommits = append(executorCommits, openCom.ExecutorCommitment) } var err error mbc.commit, err = commitment.SignMergeCommitment(id.NodeSigner, &commitment.MergeBody{ - ComputeCommits: computeCommits, - Header: mbc.newBlock.Header, + ExecutorCommits: executorCommits, + Header: mbc.newBlock.Header, }) if err != nil { return errors.Wrap(err, "commitment sign merge commitment") diff --git a/go/oasis-node/cmd/node/node.go b/go/oasis-node/cmd/node/node.go index 70c2b96cc65..e128120bc14 100644 --- a/go/oasis-node/cmd/node/node.go +++ b/go/oasis-node/cmd/node/node.go @@ -59,7 +59,7 @@ import ( workerCommon "github.com/oasislabs/oasis-core/go/worker/common" "github.com/oasislabs/oasis-core/go/worker/common/p2p" "github.com/oasislabs/oasis-core/go/worker/compute" - "github.com/oasislabs/oasis-core/go/worker/computeenable" + "github.com/oasislabs/oasis-core/go/worker/executor" workerKeymanager "github.com/oasislabs/oasis-core/go/worker/keymanager" "github.com/oasislabs/oasis-core/go/worker/merge" "github.com/oasislabs/oasis-core/go/worker/registration" @@ -125,7 +125,7 @@ type Node struct { RuntimeClient runtimeClientAPI.RuntimeClient CommonWorker *workerCommon.Worker - ComputeWorker *compute.Worker + ExecutorWorker *executor.Worker StorageWorker *workerStorage.Worker TransactionSchedulerWorker *txnscheduler.Worker MergeWorker *merge.Worker @@ -204,14 +204,14 @@ func (n *Node) initWorkers(logger *logging.Logger) error { return err } - // Initialize the P2P worker if the compute worker is enabled. Since the P2P + // Initialize the P2P worker if the compute workers are enabled. Since the P2P // layer does not have a separate Start method and starts listening // immediately when created, make sure that we don't start it if it is not // needed. // - // Currently, only compute, txn scheduler and merge workers need P2P + // Currently, only executor, txn scheduler and merge workers need P2P // transport. - if computeenable.Enabled() { + if compute.Enabled() { p2pCtx, p2pSvc := service.NewContextCleanup(context.Background()) if genesisDoc.Registry.Parameters.DebugAllowUnroutableAddresses { p2p.DebugForceAllowUnroutableAddresses() @@ -226,7 +226,7 @@ func (n *Node) initWorkers(logger *logging.Logger) error { // Initialize the common worker. n.CommonWorker, err = workerCommon.New( dataDir, - computeenable.Enabled() || workerStorage.Enabled() || workerKeymanager.Enabled(), + compute.Enabled() || workerStorage.Enabled() || workerKeymanager.Enabled(), n.Identity, n.RootHash, n.Registry, @@ -311,8 +311,8 @@ func (n *Node) initWorkers(logger *logging.Logger) error { } n.svcMgr.Register(n.MergeWorker) - // Initialize the compute worker. - n.ComputeWorker, err = compute.New( + // Initialize the executor worker. + n.ExecutorWorker, err = executor.New( dataDir, n.CommonWorker, n.MergeWorker, @@ -321,7 +321,7 @@ func (n *Node) initWorkers(logger *logging.Logger) error { if err != nil { return err } - n.svcMgr.Register(n.ComputeWorker) + n.svcMgr.Register(n.ExecutorWorker) // Initialize the sentry worker. n.SentryWorker, err = workerSentry.New( @@ -337,7 +337,7 @@ func (n *Node) initWorkers(logger *logging.Logger) error { // Initialize the transaction scheduler. n.TransactionSchedulerWorker, err = txnscheduler.New( n.CommonWorker, - n.ComputeWorker, + n.ExecutorWorker, n.RegistrationWorker, ) if err != nil { @@ -354,8 +354,8 @@ func (n *Node) startWorkers(logger *logging.Logger) error { return err } - // Start the compute worker. - if err := n.ComputeWorker.Start(); err != nil { + // Start the executor worker. + if err := n.ExecutorWorker.Start(); err != nil { return err } @@ -791,7 +791,7 @@ func init() { ias.Flags, workerKeymanager.Flags, runtimeRegistry.Flags, - computeenable.Flags, + compute.Flags, p2p.Flags, registration.Flags, txnscheduler.Flags, diff --git a/go/oasis-node/cmd/registry/runtime/runtime.go b/go/oasis-node/cmd/registry/runtime/runtime.go index 2d96e6769fd..650a2703acf 100644 --- a/go/oasis-node/cmd/registry/runtime/runtime.go +++ b/go/oasis-node/cmd/registry/runtime/runtime.go @@ -45,11 +45,11 @@ const ( CfgVersion = "runtime.version" CfgVersionEnclave = "runtime.version.enclave" - // Compute commiteee flags. - CfgComputeGroupSize = "runtime.compute.group_size" - CfgComputeGroupBackupSize = "runtime.compute.group_backup_size" - CfgComputeAllowedStragglers = "runtime.compute.allowed_stragglers" - CfgComputeRoundTimeout = "runtime.compute.round_timeout" + // Executor committee flags. + CfgExecutorGroupSize = "runtime.executor.group_size" + CfgExecutorGroupBackupSize = "runtime.executor.group_backup_size" + CfgExecutorAllowedStragglers = "runtime.executor.allowed_stragglers" + CfgExecutorRoundTimeout = "runtime.executor.round_timeout" // Merge committee flags. CfgMergeGroupSize = "runtime.merge.group_size" @@ -328,11 +328,11 @@ func runtimeFromFlags() (*registry.Runtime, signature.Signer, error) { Version: version.FromU64(viper.GetUint64(CfgVersion)), }, KeyManager: kmID, - Compute: registry.ComputeParameters{ - GroupSize: uint64(viper.GetInt64(CfgComputeGroupSize)), - GroupBackupSize: uint64(viper.GetInt64(CfgComputeGroupBackupSize)), - AllowedStragglers: uint64(viper.GetInt64(CfgComputeAllowedStragglers)), - RoundTimeout: viper.GetDuration(CfgComputeRoundTimeout), + Executor: registry.ExecutorParameters{ + GroupSize: uint64(viper.GetInt64(CfgExecutorGroupSize)), + GroupBackupSize: uint64(viper.GetInt64(CfgExecutorGroupBackupSize)), + AllowedStragglers: uint64(viper.GetInt64(CfgExecutorAllowedStragglers)), + RoundTimeout: viper.GetDuration(CfgExecutorRoundTimeout), }, Merge: registry.MergeParameters{ GroupSize: uint64(viper.GetInt64(CfgMergeGroupSize)), @@ -440,11 +440,11 @@ func init() { runtimeFlags.String(CfgVersion, "", "Runtime version. Value is 64-bit hex e.g. 0x0000000100020003 for 1.2.3") runtimeFlags.StringSlice(CfgVersionEnclave, nil, "Runtime TEE enclave version(s)") - // Init Compute commitee flags. - runtimeFlags.Uint64(CfgComputeGroupSize, 1, "Number of workers in the runtime compute group/committee") - runtimeFlags.Uint64(CfgComputeGroupBackupSize, 0, "Number of backup workers in the runtime compute group/committee") - runtimeFlags.Uint64(CfgComputeAllowedStragglers, 0, "Number of stragglers allowed per round in the runtime compute group") - runtimeFlags.Duration(CfgComputeRoundTimeout, 10*time.Second, "Compute committee round timeout for this runtime") + // Init Executor committee flags. + runtimeFlags.Uint64(CfgExecutorGroupSize, 1, "Number of workers in the runtime executor group/committee") + runtimeFlags.Uint64(CfgExecutorGroupBackupSize, 0, "Number of backup workers in the runtime executor group/committee") + runtimeFlags.Uint64(CfgExecutorAllowedStragglers, 0, "Number of stragglers allowed per round in the runtime executor group") + runtimeFlags.Duration(CfgExecutorRoundTimeout, 10*time.Second, "Executor committee round timeout for this runtime") // Init Merge committee flags. runtimeFlags.Uint64(CfgMergeGroupSize, 1, "Number of workers in the runtime merge group/committee") diff --git a/go/oasis-node/node_test.go b/go/oasis-node/node_test.go index ee291b11f6a..968e29a059d 100644 --- a/go/oasis-node/node_test.go +++ b/go/oasis-node/node_test.go @@ -42,9 +42,9 @@ import ( storageClientTests "github.com/oasislabs/oasis-core/go/storage/client/tests" storageTests "github.com/oasislabs/oasis-core/go/storage/tests" workerCommon "github.com/oasislabs/oasis-core/go/worker/common" - computeCommittee "github.com/oasislabs/oasis-core/go/worker/compute/committee" - computeWorkerTests "github.com/oasislabs/oasis-core/go/worker/compute/tests" - "github.com/oasislabs/oasis-core/go/worker/computeenable" + "github.com/oasislabs/oasis-core/go/worker/compute" + executorCommittee "github.com/oasislabs/oasis-core/go/worker/executor/committee" + executorWorkerTests "github.com/oasislabs/oasis-core/go/worker/executor/tests" storageWorker "github.com/oasislabs/oasis-core/go/worker/storage" storageWorkerTests "github.com/oasislabs/oasis-core/go/worker/storage/tests" txnschedulerCommittee "github.com/oasislabs/oasis-core/go/worker/txnscheduler/committee" @@ -66,7 +66,7 @@ var ( {cmdCommonFlags.CfgConsensusValidator, true}, {cmdCommonFlags.CfgDebugDontBlameOasis, true}, {storage.CfgBackend, "badger"}, - {computeenable.CfgWorkerEnabled, true}, + {compute.CfgWorkerEnabled, true}, {workerCommon.CfgRuntimeBackend, "mock"}, {workerCommon.CfgRuntimeLoader, "mock-runtime"}, {workerCommon.CfgClientPort, workerClientPort}, @@ -78,7 +78,7 @@ var ( testRuntime = ®istry.Runtime{ // ID: default value, - Compute: registry.ComputeParameters{ + Executor: registry.ExecutorParameters{ GroupSize: 1, GroupBackupSize: 0, RoundTimeout: 20 * time.Second, @@ -108,7 +108,7 @@ type testNode struct { *node.Node runtimeID common.Namespace - computeCommitteeNode *computeCommittee.Node + executorCommitteeNode *executorCommittee.Node txnschedulerCommitteeNode *txnschedulerCommittee.Node entity *entity.Entity @@ -213,7 +213,7 @@ func TestNode(t *testing.T) { // including the worker tests. {"RegisterTestEntityRuntime", testRegisterEntityRuntime}, - {"ComputeWorker", testComputeWorker}, + {"ExecutorWorker", testExecutorWorker}, {"TransactionSchedulerWorker", testTransactionSchedulerWorker}, // StorageWorker test case @@ -270,10 +270,10 @@ func testRegisterEntityRuntime(t *testing.T, node *testNode) { err = consensusAPI.SignAndSubmitTx(context.Background(), node.Consensus, node.entitySigner, tx) require.NoError(err, "register test entity") - // Get the runtime and the corresponding compute committee node instance. - computeRT := node.ComputeWorker.GetRuntime(testRuntime.ID) - require.NotNil(t, computeRT) - node.computeCommitteeNode = computeRT + // Get the runtime and the corresponding executor committee node instance. + executorRT := node.ExecutorWorker.GetRuntime(testRuntime.ID) + require.NotNil(t, executorRT) + node.executorCommitteeNode = executorRT // Get the runtime and the corresponding transaction scheduler committee node instance. txnschedulerRT := node.TransactionSchedulerWorker.GetRuntime(testRuntime.ID) @@ -403,11 +403,11 @@ func testRootHash(t *testing.T, node *testNode) { roothashTests.RootHashImplementationTests(t, node.RootHash, node.Consensus, node.Identity) } -func testComputeWorker(t *testing.T, node *testNode) { +func testExecutorWorker(t *testing.T, node *testNode) { timeSource := (node.Epochtime).(epochtime.SetableBackend) - require.NotNil(t, node.computeCommitteeNode) - computeWorkerTests.WorkerImplementationTests(t, node.ComputeWorker, node.runtimeID, node.computeCommitteeNode, timeSource) + require.NotNil(t, node.executorCommitteeNode) + executorWorkerTests.WorkerImplementationTests(t, node.ExecutorWorker, node.runtimeID, node.executorCommitteeNode, timeSource) } func testStorageWorker(t *testing.T, node *testNode) { diff --git a/go/oasis-test-runner/oasis/args.go b/go/oasis-test-runner/oasis/args.go index 2d7bc4bbd00..2923cabaa68 100644 --- a/go/oasis-test-runner/oasis/args.go +++ b/go/oasis-test-runner/oasis/args.go @@ -21,7 +21,7 @@ import ( "github.com/oasislabs/oasis-core/go/storage" workerCommon "github.com/oasislabs/oasis-core/go/worker/common" "github.com/oasislabs/oasis-core/go/worker/common/p2p" - "github.com/oasislabs/oasis-core/go/worker/computeenable" + "github.com/oasislabs/oasis-core/go/worker/compute" "github.com/oasislabs/oasis-core/go/worker/keymanager" "github.com/oasislabs/oasis-core/go/worker/registration" workerSentry "github.com/oasislabs/oasis-core/go/worker/sentry" @@ -193,7 +193,7 @@ func (args *argBuilder) workerRuntimeBinary(id common.Namespace, fn string) *arg } func (args *argBuilder) workerComputeEnabled() *argBuilder { - args.vec = append(args.vec, "--"+computeenable.CfgWorkerEnabled) + args.vec = append(args.vec, "--"+compute.CfgWorkerEnabled) return args } diff --git a/go/oasis-test-runner/oasis/fixture.go b/go/oasis-test-runner/oasis/fixture.go index dddfec595e7..0675981efd5 100644 --- a/go/oasis-test-runner/oasis/fixture.go +++ b/go/oasis-test-runner/oasis/fixture.go @@ -160,7 +160,7 @@ type RuntimeFixture struct { GenesisState string `json:"genesis_state"` GenesisRound uint64 `json:"genesis_round"` - Compute registry.ComputeParameters `json:"compute"` + Executor registry.ExecutorParameters `json:"executor"` Merge registry.MergeParameters `json:"merge"` TxnScheduler registry.TxnSchedulerParameters `json:"txn_scheduler"` Storage registry.StorageParameters `json:"storage"` @@ -194,7 +194,7 @@ func (f *RuntimeFixture) Create(netFixture *NetworkFixture, net *Network) (*Runt Keymanager: km, TEEHardware: netFixture.TEE.Hardware, MrSigner: netFixture.TEE.MrSigner, - Compute: f.Compute, + Executor: f.Executor, Merge: f.Merge, TxnScheduler: f.TxnScheduler, Storage: f.Storage, diff --git a/go/oasis-test-runner/oasis/log.go b/go/oasis-test-runner/oasis/log.go index d002957ff66..67cc39859bc 100644 --- a/go/oasis-test-runner/oasis/log.go +++ b/go/oasis-test-runner/oasis/log.go @@ -37,16 +37,16 @@ func LogAssertNoRoundFailures() log.WatcherHandlerFactory { return LogAssertNotEvent(roothash.LogEventRoundFailed, "round failure detected") } -// LogAssertComputeDiscrepancyDetected returns a handler which checks whether a -// compute discrepancy was detected based on JSON log output. -func LogAssertComputeDiscrepancyDetected() log.WatcherHandlerFactory { - return LogAssertEvent(roothash.LogEventComputeDiscrepancyDetected, "compute discrepancy not detected") +// LogAssertExecutionDiscrepancyDetected returns a handler which checks whether an +// execution discrepancy was detected based on JSON log output. +func LogAssertExecutionDiscrepancyDetected() log.WatcherHandlerFactory { + return LogAssertEvent(roothash.LogEventExecutionDiscrepancyDetected, "execution discrepancy not detected") } -// LogAssertNoComputeDiscrepancyDetected returns a handler which checks whether a -// compute discrepancy was not detected based on JSON log output. -func LogAssertNoComputeDiscrepancyDetected() log.WatcherHandlerFactory { - return LogAssertNotEvent(roothash.LogEventComputeDiscrepancyDetected, "compute discrepancy detected") +// LogAssertNoExecutionDiscrepancyDetected returns a handler which checks whether an +// execution discrepancy was not detected based on JSON log output. +func LogAssertNoExecutionDiscrepancyDetected() log.WatcherHandlerFactory { + return LogAssertNotEvent(roothash.LogEventExecutionDiscrepancyDetected, "execution discrepancy detected") } // LogAssertMergeDiscrepancyDetected returns a handler which checks whether a diff --git a/go/oasis-test-runner/oasis/runtime.go b/go/oasis-test-runner/oasis/runtime.go index e7411ca0a65..8ccc6542725 100644 --- a/go/oasis-test-runner/oasis/runtime.go +++ b/go/oasis-test-runner/oasis/runtime.go @@ -47,7 +47,7 @@ type RuntimeCfg struct { // nolint: maligned GenesisState string GenesisRound uint64 - Compute registry.ComputeParameters + Executor registry.ExecutorParameters Merge registry.MergeParameters TxnScheduler registry.TxnSchedulerParameters Storage registry.StorageParameters @@ -93,10 +93,10 @@ func (net *Network) NewRuntime(cfg *RuntimeCfg) (*Runtime, error) { } if cfg.Kind == registry.KindCompute { args = append(args, []string{ - "--" + cmdRegRt.CfgComputeGroupSize, strconv.FormatUint(cfg.Compute.GroupSize, 10), - "--" + cmdRegRt.CfgComputeGroupBackupSize, strconv.FormatUint(cfg.Compute.GroupBackupSize, 10), - "--" + cmdRegRt.CfgComputeAllowedStragglers, strconv.FormatUint(cfg.Compute.AllowedStragglers, 10), - "--" + cmdRegRt.CfgComputeRoundTimeout, cfg.Compute.RoundTimeout.String(), + "--" + cmdRegRt.CfgExecutorGroupSize, strconv.FormatUint(cfg.Executor.GroupSize, 10), + "--" + cmdRegRt.CfgExecutorGroupBackupSize, strconv.FormatUint(cfg.Executor.GroupBackupSize, 10), + "--" + cmdRegRt.CfgExecutorAllowedStragglers, strconv.FormatUint(cfg.Executor.AllowedStragglers, 10), + "--" + cmdRegRt.CfgExecutorRoundTimeout, cfg.Executor.RoundTimeout.String(), "--" + cmdRegRt.CfgMergeGroupSize, strconv.FormatUint(cfg.Merge.GroupSize, 10), "--" + cmdRegRt.CfgMergeGroupBackupSize, strconv.FormatUint(cfg.Merge.GroupBackupSize, 10), "--" + cmdRegRt.CfgMergeAllowedStragglers, strconv.FormatUint(cfg.Merge.AllowedStragglers, 10), diff --git a/go/oasis-test-runner/scenario/e2e/basic.go b/go/oasis-test-runner/scenario/e2e/basic.go index 45a59c6447f..ac2638d2e2f 100644 --- a/go/oasis-test-runner/scenario/e2e/basic.go +++ b/go/oasis-test-runner/scenario/e2e/basic.go @@ -36,7 +36,7 @@ var ( DefaultBasicLogWatcherHandlerFactories = []log.WatcherHandlerFactory{ oasis.LogAssertNoTimeouts(), oasis.LogAssertNoRoundFailures(), - oasis.LogAssertNoComputeDiscrepancyDetected(), + oasis.LogAssertNoExecutionDiscrepancyDetected(), oasis.LogAssertNoMergeDiscrepancyDetected(), } ) @@ -102,7 +102,7 @@ func (sc *basicImpl) Fixture() (*oasis.NetworkFixture, error) { Entity: 0, Keymanager: 0, Binary: runtimeBinary, - Compute: registry.ComputeParameters{ + Executor: registry.ExecutorParameters{ GroupSize: 2, GroupBackupSize: 1, RoundTimeout: 10 * time.Second, diff --git a/go/oasis-test-runner/scenario/e2e/byzantine.go b/go/oasis-test-runner/scenario/e2e/byzantine.go index 3615159fccd..dd16abb584e 100644 --- a/go/oasis-test-runner/scenario/e2e/byzantine.go +++ b/go/oasis-test-runner/scenario/e2e/byzantine.go @@ -15,29 +15,29 @@ import ( var ( // Permutations generated in the epoch 1 election are - // compute: 0 (w), 3 (w), 1 (b), 2 (i) + // executor: 0 (w), 3 (w), 1 (b), 2 (i) // transaction scheduler: 1 (w), 3 (i), 0 (i), 2 (i) // merge: 2 (w), 0 (w), 1 (b), 3 (i) // w = worker; b = backup; i = invalid - // For compute scripts, it suffices to be index 3. + // For executor scripts, it suffices to be index 3. // For merge scripts, it suffices to be index 2. // No index is transaction scheduler only. // Indices are by order of node ID. - // ByzantineComputeHonest is the byzantine compute honest scenario. - ByzantineComputeHonest scenario.Scenario = newByzantineImpl("compute-honest", nil, oasis.ByzantineIndex3IdentitySeed) - // ByzantineComputeWrong is the byzantine compute wrong scenario. - ByzantineComputeWrong scenario.Scenario = newByzantineImpl("compute-wrong", []log.WatcherHandlerFactory{ + // ByzantineExecutorHonest is the byzantine executor honest scenario. + ByzantineExecutorHonest scenario.Scenario = newByzantineImpl("executor-honest", nil, oasis.ByzantineIndex3IdentitySeed) + // ByzantineExecutorWrong is the byzantine executor wrong scenario. + ByzantineExecutorWrong scenario.Scenario = newByzantineImpl("executor-wrong", []log.WatcherHandlerFactory{ oasis.LogAssertNoTimeouts(), oasis.LogAssertNoRoundFailures(), - oasis.LogAssertComputeDiscrepancyDetected(), + oasis.LogAssertExecutionDiscrepancyDetected(), oasis.LogAssertNoMergeDiscrepancyDetected(), }, oasis.ByzantineIndex3IdentitySeed) - // ByzantineComputeStraggler is the byzantine compute straggler scenario. - ByzantineComputeStraggler scenario.Scenario = newByzantineImpl("compute-straggler", []log.WatcherHandlerFactory{ + // ByzantineExecutorStraggler is the byzantine executor straggler scenario. + ByzantineExecutorStraggler scenario.Scenario = newByzantineImpl("executor-straggler", []log.WatcherHandlerFactory{ oasis.LogAssertTimeouts(), oasis.LogAssertNoRoundFailures(), - oasis.LogAssertComputeDiscrepancyDetected(), + oasis.LogAssertExecutionDiscrepancyDetected(), oasis.LogAssertNoMergeDiscrepancyDetected(), }, oasis.ByzantineIndex3IdentitySeed) @@ -47,14 +47,14 @@ var ( ByzantineMergeWrong scenario.Scenario = newByzantineImpl("merge-wrong", []log.WatcherHandlerFactory{ oasis.LogAssertNoTimeouts(), oasis.LogAssertNoRoundFailures(), - oasis.LogAssertNoComputeDiscrepancyDetected(), + oasis.LogAssertNoExecutionDiscrepancyDetected(), oasis.LogAssertMergeDiscrepancyDetected(), }, oasis.ByzantineIndex2IdentitySeed) // ByzantineMergeStraggler is the byzantine merge straggler scenario. ByzantineMergeStraggler scenario.Scenario = newByzantineImpl("merge-straggler", []log.WatcherHandlerFactory{ oasis.LogAssertTimeouts(), oasis.LogAssertNoRoundFailures(), - oasis.LogAssertNoComputeDiscrepancyDetected(), + oasis.LogAssertNoExecutionDiscrepancyDetected(), oasis.LogAssertMergeDiscrepancyDetected(), }, oasis.ByzantineIndex2IdentitySeed) ) diff --git a/go/oasis-test-runner/scenario/e2e/registry_cli.go b/go/oasis-test-runner/scenario/e2e/registry_cli.go index 4204dadffe5..6c190d8dfc0 100644 --- a/go/oasis-test-runner/scenario/e2e/registry_cli.go +++ b/go/oasis-test-runner/scenario/e2e/registry_cli.go @@ -556,7 +556,7 @@ func (r *registryCLIImpl) testRuntime(childEnv *env.Env) error { // Create runtime descriptor instance. testRuntime := registry.Runtime{ Kind: registry.KindCompute, - Compute: registry.ComputeParameters{ + Executor: registry.ExecutorParameters{ GroupSize: 1, GroupBackupSize: 2, AllowedStragglers: 3, @@ -668,10 +668,10 @@ func (r *registryCLIImpl) genRegisterRuntimeTx(childEnv *env.Env, runtime regist "--" + cmdRegRt.CfgKind, runtime.Kind.String(), "--" + cmdRegRt.CfgVersion, runtime.Version.Version.String(), "--" + cmdRegRt.CfgVersionEnclave, string(runtime.Version.TEE), - "--" + cmdRegRt.CfgComputeGroupSize, strconv.FormatUint(runtime.Compute.GroupSize, 10), - "--" + cmdRegRt.CfgComputeGroupBackupSize, strconv.FormatUint(runtime.Compute.GroupBackupSize, 10), - "--" + cmdRegRt.CfgComputeAllowedStragglers, strconv.FormatUint(runtime.Compute.AllowedStragglers, 10), - "--" + cmdRegRt.CfgComputeRoundTimeout, runtime.Compute.RoundTimeout.String(), + "--" + cmdRegRt.CfgExecutorGroupSize, strconv.FormatUint(runtime.Executor.GroupSize, 10), + "--" + cmdRegRt.CfgExecutorGroupBackupSize, strconv.FormatUint(runtime.Executor.GroupBackupSize, 10), + "--" + cmdRegRt.CfgExecutorAllowedStragglers, strconv.FormatUint(runtime.Executor.AllowedStragglers, 10), + "--" + cmdRegRt.CfgExecutorRoundTimeout, runtime.Executor.RoundTimeout.String(), "--" + cmdRegRt.CfgMergeGroupSize, strconv.FormatUint(runtime.Merge.GroupSize, 10), "--" + cmdRegRt.CfgMergeGroupBackupSize, strconv.FormatUint(runtime.Merge.GroupBackupSize, 10), "--" + cmdRegRt.CfgMergeAllowedStragglers, strconv.FormatUint(runtime.Merge.AllowedStragglers, 10), diff --git a/go/oasis-test-runner/test-runner.go b/go/oasis-test-runner/test-runner.go index 771b38bb330..10f5949c0b3 100644 --- a/go/oasis-test-runner/test-runner.go +++ b/go/oasis-test-runner/test-runner.go @@ -21,10 +21,10 @@ func main() { // Basic test. _ = cmd.Register(e2e.Basic) _ = cmd.Register(e2e.BasicEncryption) - // Byzantine compute node. - _ = cmd.Register(e2e.ByzantineComputeHonest) - _ = cmd.Register(e2e.ByzantineComputeWrong) - _ = cmd.Register(e2e.ByzantineComputeStraggler) + // Byzantine executor node. + _ = cmd.Register(e2e.ByzantineExecutorHonest) + _ = cmd.Register(e2e.ByzantineExecutorWrong) + _ = cmd.Register(e2e.ByzantineExecutorStraggler) // Byzantine merge node. _ = cmd.Register(e2e.ByzantineMergeHonest) _ = cmd.Register(e2e.ByzantineMergeWrong) diff --git a/go/registry/api/api.go b/go/registry/api/api.go index 9d0cd5dae3d..e78378090b5 100644 --- a/go/registry/api/api.go +++ b/go/registry/api/api.go @@ -900,7 +900,7 @@ func VerifyRegisterRuntimeArgs(logger *logging.Logger, sigRt *SignedRuntime, isG } // Ensure there is at least one member of the compute group. - if rt.Compute.GroupSize == 0 { + if rt.Executor.GroupSize == 0 { logger.Error("RegisterRuntime: compute group size too small", "runtime", rt, ) @@ -1032,17 +1032,17 @@ func SanityCheckRuntimes(runtimes []*SignedRuntime) (map[common.Namespace]*Runti // Check compute runtime parameters. if rt.Kind == KindCompute { - // Check runtime's Compute committee parameters. - if rt.Compute.GroupSize < 1 { - return nil, fmt.Errorf("registry: sanity check failed: compute group size must be >= 1 node") + // Check runtime's Executor committee parameters. + if rt.Executor.GroupSize < 1 { + return nil, fmt.Errorf("registry: sanity check failed: executor group size must be >= 1 node") } - if rt.Compute.RoundTimeout < 1*time.Second { - return nil, fmt.Errorf("registry: sanity check failed: compute round timeout must be >= 1 second") + if rt.Executor.RoundTimeout < 1*time.Second { + return nil, fmt.Errorf("registry: sanity check failed: executor round timeout must be >= 1 second") } - if rt.Compute.RoundTimeout.Truncate(time.Second) != rt.Compute.RoundTimeout { - return nil, fmt.Errorf("registry: sanity check failed: granularity of compute round timeout must be a second") + if rt.Executor.RoundTimeout.Truncate(time.Second) != rt.Executor.RoundTimeout { + return nil, fmt.Errorf("registry: sanity check failed: granularity of executor round timeout must be a second") } // Check runtime's Merge committee parameters. @@ -1055,7 +1055,7 @@ func SanityCheckRuntimes(runtimes []*SignedRuntime) (map[common.Namespace]*Runti } if rt.Merge.RoundTimeout.Truncate(time.Second) != rt.Merge.RoundTimeout { - return nil, fmt.Errorf("registry: sanity check failed: granularity of compute round timeout must be a second") + return nil, fmt.Errorf("registry: sanity check failed: granularity of merge round timeout must be a second") } // Check runtime's Transaction scheduler committee parameters. diff --git a/go/registry/api/runtime.go b/go/registry/api/runtime.go index a43cc06883a..3cc6ee31f87 100644 --- a/go/registry/api/runtime.go +++ b/go/registry/api/runtime.go @@ -33,7 +33,7 @@ var ( type RuntimeKind uint32 const ( - // KindCompute is a generic compute runtime. + // KindCompute is a generic executor runtime. KindCompute RuntimeKind = 0 // KindKeyManager is a key manager runtime. @@ -72,8 +72,8 @@ func (k *RuntimeKind) FromString(str string) error { return nil } -// ComputeParameters are parameters for the compute committee. -type ComputeParameters struct { +// ExecutorParameters are parameters for the executor committee. +type ExecutorParameters struct { // GroupSize is the size of the committee. GroupSize uint64 `json:"group_size"` @@ -149,8 +149,8 @@ type Runtime struct { // KeyManager is the key manager runtime ID for this runtime. KeyManager *common.Namespace `json:"key_manager,omitempty"` - // Compute stores parameters of the compute committee. - Compute ComputeParameters `json:"compute,omitempty"` + // Executor stores parameters of the executor committee. + Executor ExecutorParameters `json:"executor,omitempty"` // Merge stores parameters of the merge committee. Merge MergeParameters `json:"merge,omitempty"` diff --git a/go/registry/tests/tester.go b/go/registry/tests/tester.go index 8815200cad1..f693c1bfe2f 100644 --- a/go/registry/tests/tester.go +++ b/go/registry/tests/tester.go @@ -948,7 +948,7 @@ func BulkPopulate(t *testing.T, backend api.Backend, consensus consensusAPI.Back epoch, err := consensus.EpochTime().GetEpoch(context.Background(), consensusAPI.HeightLatest) require.NoError(err, "GetEpoch") - numCompute := int(runtimes[0].Runtime.Compute.GroupSize + runtimes[0].Runtime.Compute.GroupBackupSize) + numCompute := int(runtimes[0].Runtime.Executor.GroupSize + runtimes[0].Runtime.Executor.GroupBackupSize) numStorage := int(runtimes[0].Runtime.Storage.GroupSize) nodes, err := entity.NewTestNodes(numCompute, numStorage, rts, epoch+testRuntimeNodeExpiration) require.NoError(err, "NewTestNodes") @@ -968,7 +968,7 @@ func BulkPopulate(t *testing.T, backend api.Backend, consensus consensusAPI.Back } for _, v := range runtimes { - numNodes := v.Runtime.Compute.GroupSize + v.Runtime.Compute.GroupBackupSize + v.Runtime.Storage.GroupSize + numNodes := v.Runtime.Executor.GroupSize + v.Runtime.Executor.GroupBackupSize + v.Runtime.Storage.GroupSize require.EqualValues(len(nodes), numNodes, "runtime wants the expected number of nodes") v.entity = entity v.nodes = nodes @@ -1043,7 +1043,7 @@ func NewTestRuntime(seed []byte, entity *TestEntity) (*TestRuntime, error) { rt.Runtime = &api.Runtime{ ID: publicKeyToNamespace(rt.Signer.Public()), - Compute: api.ComputeParameters{ + Executor: api.ExecutorParameters{ GroupSize: 3, GroupBackupSize: 5, AllowedStragglers: 1, diff --git a/go/roothash/api/api.go b/go/roothash/api/api.go index a2dbd0ee512..4ff231a84ab 100644 --- a/go/roothash/api/api.go +++ b/go/roothash/api/api.go @@ -20,11 +20,11 @@ const ( // ModuleName is a unique module name for the roothash module. ModuleName = "roothash" - // LogEventComputeDiscrepancyDetected is a log event value that signals - // a compute discrepancy has been detected. - LogEventComputeDiscrepancyDetected = "roothash/compute_discrepancy_detected" + // LogEventExecutionDiscrepancyDetected is a log event value that signals + // an execution discrepancy has been detected. + LogEventExecutionDiscrepancyDetected = "roothash/execution_discrepancy_detected" // LogEventMergeDiscrepancyDetected is a log event value that signals - // a compute discrepancy has been detected. + // a merge discrepancy has been detected. LogEventMergeDiscrepancyDetected = "roothash/merge_discrepancy_detected" // LogEventTimerFired is a log event value that signals a timer has fired. LogEventTimerFired = "roothash/timer_fired" @@ -41,14 +41,14 @@ var ( // ErrNotFound is the error returned when a block is not found. ErrNotFound = errors.New(ModuleName, 2, "roothash: block not found") - // MethodComputeCommit is the method name for compute commit submission. - MethodComputeCommit = transaction.NewMethodName(ModuleName, "ComputeCommit", ComputeCommit{}) + // MethodExecutorCommit is the method name for executor commit submission. + MethodExecutorCommit = transaction.NewMethodName(ModuleName, "ExecutorCommit", ExecutorCommit{}) // MethodMergeCommit is the method name for merge commit submission. MethodMergeCommit = transaction.NewMethodName(ModuleName, "MergeCommit", MergeCommit{}) // Methods is a list of all methods supported by the roothash backend. Methods = []transaction.MethodName{ - MethodComputeCommit, + MethodExecutorCommit, MethodMergeCommit, } ) @@ -85,15 +85,15 @@ type Backend interface { Cleanup() } -// ComputeCommit is the argument set for the ComputeCommit method. -type ComputeCommit struct { - ID common.Namespace `json:"id"` - Commits []commitment.ComputeCommitment `json:"commits"` +// ExecutorCommit is the argument set for the ExecutorCommit method. +type ExecutorCommit struct { + ID common.Namespace `json:"id"` + Commits []commitment.ExecutorCommitment `json:"commits"` } -// NewComputeCommitTx creates a new compute commit transaction. -func NewComputeCommitTx(nonce uint64, fee *transaction.Fee, runtimeID common.Namespace, commits []commitment.ComputeCommitment) *transaction.Transaction { - return transaction.NewTransaction(nonce, fee, MethodComputeCommit, &ComputeCommit{ +// NewExecutorCommitTx creates a new executor commit transaction. +func NewExecutorCommitTx(nonce uint64, fee *transaction.Fee, runtimeID common.Namespace, commits []commitment.ExecutorCommitment) *transaction.Transaction { + return transaction.NewTransaction(nonce, fee, MethodExecutorCommit, &ExecutorCommit{ ID: runtimeID, Commits: commits, }) @@ -105,7 +105,7 @@ type MergeCommit struct { Commits []commitment.MergeCommitment `json:"commits"` } -// NewMergeCommitTx creates a new compute commit transaction. +// NewMergeCommitTx creates a new executor commit transaction. func NewMergeCommitTx(nonce uint64, fee *transaction.Fee, runtimeID common.Namespace, commits []commitment.MergeCommitment) *transaction.Transaction { return transaction.NewTransaction(nonce, fee, MethodMergeCommit, &MergeCommit{ ID: runtimeID, @@ -123,9 +123,9 @@ type AnnotatedBlock struct { Block *block.Block `json:"block"` } -// ComputeDiscrepancyDetectedEvent is a compute discrepancy detected event. -type ComputeDiscrepancyDetectedEvent struct { - // CommitteeID is the identifier of the compute committee where a +// ExecutionDiscrepancyDetectedEvent is an execute discrepancy detected event. +type ExecutionDiscrepancyDetectedEvent struct { + // CommitteeID is the identifier of the executor committee where a // discrepancy has been detected. CommitteeID hash.Hash `json:"cid"` // Timeout signals whether the discrepancy was due to a timeout. @@ -138,8 +138,8 @@ type MergeDiscrepancyDetectedEvent struct { // Event is a protocol event. type Event struct { - ComputeDiscrepancyDetected *ComputeDiscrepancyDetectedEvent - MergeDiscrepancyDetected *MergeDiscrepancyDetectedEvent + ExecutionDiscrepancyDetected *ExecutionDiscrepancyDetectedEvent + MergeDiscrepancyDetected *MergeDiscrepancyDetectedEvent } // MetricsMonitorable is the interface exposed by backends capable of diff --git a/go/roothash/api/commitment/compute.go b/go/roothash/api/commitment/executor.go similarity index 79% rename from go/roothash/api/commitment/compute.go rename to go/roothash/api/commitment/executor.go index 8c27fc528f5..d86380d8bdf 100644 --- a/go/roothash/api/commitment/compute.go +++ b/go/roothash/api/commitment/executor.go @@ -14,9 +14,9 @@ import ( ) var ( - // ComputeSignatureContext is the signature context used to sign compute + // ExecutorSignatureContext is the signature context used to sign executor // worker commitments. - ComputeSignatureContext = signature.NewContext("oasis-core/roothash: compute commitment", signature.WithChainSeparation()) + ExecutorSignatureContext = signature.NewContext("oasis-core/roothash: executor commitment", signature.WithChainSeparation()) // ComputeResultsHeaderSignatureContext is the signature context used to // sign compute results headers with RAK. @@ -27,7 +27,7 @@ var ( // header is a compressed representation (e.g., hashes instead of full content) of // the actual results. // -// These headers are signed by RAK inside the runtime and included in compute +// These headers are signed by RAK inside the runtime and included in executor // commitments. // // Keep the roothash RAK validation in sync with changes to this structure. @@ -133,65 +133,65 @@ func (m *ComputeBody) VerifyStorageReceipt(ns common.Namespace, round uint64, re return nil } -// ComputeCommitment is a roothash commitment from a compute worker. +// ExecutorCommitment is a roothash commitment from an executor worker. // // The signed content is ComputeBody. -type ComputeCommitment struct { +type ExecutorCommitment struct { signature.Signed } -// OpenComputeCommitment is a compute commitment that has been verified and +// OpenExecutorCommitment is an executor commitment that has been verified and // deserialized. // // The open commitment still contains the original signed commitment. -type OpenComputeCommitment struct { - ComputeCommitment +type OpenExecutorCommitment struct { + ExecutorCommitment Body *ComputeBody `json:"body"` } // MostlyEqual returns true if the commitment is mostly equal to another // specified commitment as per discrepancy detection criteria. -func (c OpenComputeCommitment) MostlyEqual(other OpenCommitment) bool { +func (c OpenExecutorCommitment) MostlyEqual(other OpenCommitment) bool { h := c.Body.Header.EncodedHash() - otherHash := other.(OpenComputeCommitment).Body.Header.EncodedHash() + otherHash := other.(OpenExecutorCommitment).Body.Header.EncodedHash() return h.Equal(&otherHash) } // ToVote returns a hash that represents a vote for this commitment as // per discrepancy resolution criteria. -func (c OpenComputeCommitment) ToVote() hash.Hash { +func (c OpenExecutorCommitment) ToVote() hash.Hash { return c.Body.Header.EncodedHash() } // ToDDResult returns a commitment-specific result after discrepancy // detection. -func (c OpenComputeCommitment) ToDDResult() interface{} { +func (c OpenExecutorCommitment) ToDDResult() interface{} { return c.Body.Header } -// Open validates the compute commitment signature, and de-serializes the message. +// Open validates the executor commitment signature, and de-serializes the message. // This does not validate the RAK signature. -func (c *ComputeCommitment) Open() (*OpenComputeCommitment, error) { +func (c *ExecutorCommitment) Open() (*OpenExecutorCommitment, error) { var body ComputeBody - if err := c.Signed.Open(ComputeSignatureContext, &body); err != nil { + if err := c.Signed.Open(ExecutorSignatureContext, &body); err != nil { return nil, errors.New("roothash/commitment: commitment has invalid signature") } - return &OpenComputeCommitment{ - ComputeCommitment: *c, - Body: &body, + return &OpenExecutorCommitment{ + ExecutorCommitment: *c, + Body: &body, }, nil } -// SignComputeCommitment serializes the message and signs the commitment. -func SignComputeCommitment(signer signature.Signer, body *ComputeBody) (*ComputeCommitment, error) { - signed, err := signature.SignSigned(signer, ComputeSignatureContext, body) +// SignExecutorCommitment serializes the message and signs the commitment. +func SignExecutorCommitment(signer signature.Signer, body *ComputeBody) (*ExecutorCommitment, error) { + signed, err := signature.SignSigned(signer, ExecutorSignatureContext, body) if err != nil { return nil, err } - return &ComputeCommitment{ + return &ExecutorCommitment{ Signed: *signed, }, nil } diff --git a/go/roothash/api/commitment/merge.go b/go/roothash/api/commitment/merge.go index 025021b4242..6241b05942a 100644 --- a/go/roothash/api/commitment/merge.go +++ b/go/roothash/api/commitment/merge.go @@ -14,8 +14,8 @@ import ( var MergeSignatureContext = signature.NewContext("oasis-core/roothash: merge commitment", signature.WithChainSeparation()) type MergeBody struct { - ComputeCommits []ComputeCommitment `json:"commits"` - Header block.Header `json:"header"` + ExecutorCommits []ExecutorCommitment `json:"commits"` + Header block.Header `json:"header"` } // MergeCommitment is a roothash commitment from a merge worker. diff --git a/go/roothash/api/commitment/pool.go b/go/roothash/api/commitment/pool.go index 216aff36aca..00e60fa994a 100644 --- a/go/roothash/api/commitment/pool.go +++ b/go/roothash/api/commitment/pool.go @@ -28,7 +28,7 @@ var ( ErrDiscrepancyDetected = errors.New(moduleName, 8, "roothash/commitment: discrepancy detected") ErrStillWaiting = errors.New(moduleName, 9, "roothash/commitment: still waiting for commits") ErrInsufficientVotes = errors.New(moduleName, 10, "roothash/commitment: insufficient votes to finalize discrepancy resolution round") - ErrBadComputeCommits = errors.New(moduleName, 11, "roothash/commitment: bad compute commitments") + ErrBadExecutorCommits = errors.New(moduleName, 11, "roothash/commitment: bad executor commitments") ErrInvalidCommitteeID = errors.New(moduleName, 12, "roothash/commitment: invalid committee ID") ErrTxnSchedSigInvalid = errors.New(moduleName, 13, "roothash/commitment: txn scheduler signature invalid") ErrInvalidMessages = errors.New(moduleName, 14, "roothash/commitment: invalid messages") @@ -51,7 +51,7 @@ type NodeInfo struct { Runtime *node.Runtime `json:"runtime"` } -// Pool is a serializable pool of commiments that can be used to perform +// Pool is a serializable pool of commitments that can be used to perform // discrepancy detection. // // The pool is not safe for concurrent use. @@ -63,9 +63,9 @@ type Pool struct { Committee *scheduler.Committee `json:"committee"` // NodeInfo contains node information about committee members. NodeInfo map[signature.PublicKey]NodeInfo `json:"node_info"` - // ComputeCommitments are the commitments in the pool iff Committee.Kind - // is scheduler.KindCompute. - ComputeCommitments map[signature.PublicKey]OpenComputeCommitment `json:"compute_commitments,omitempty"` + // ExecuteCommitments are the commitments in the pool iff Committee.Kind + // is scheduler.KindExecutor. + ExecuteCommitments map[signature.PublicKey]OpenExecutorCommitment `json:"execute_commitments,omitempty"` // MergeCommitments are the commitments in the pool iff Committee.Kind // is scheduler.KindMerge. MergeCommitments map[signature.PublicKey]OpenMergeCommitment `json:"merge_commitments,omitempty"` @@ -86,8 +86,8 @@ func (p *Pool) GetCommitteeID() hash.Hash { // ResetCommitments resets the commitments in the pool and clears the discrepancy // flag. func (p *Pool) ResetCommitments() { - if p.ComputeCommitments == nil || len(p.ComputeCommitments) > 0 { - p.ComputeCommitments = make(map[signature.PublicKey]OpenComputeCommitment) + if p.ExecuteCommitments == nil || len(p.ExecuteCommitments) > 0 { + p.ExecuteCommitments = make(map[signature.PublicKey]OpenExecutorCommitment) } if p.MergeCommitments == nil || len(p.MergeCommitments) > 0 { p.MergeCommitments = make(map[signature.PublicKey]OpenMergeCommitment) @@ -107,8 +107,8 @@ func (p *Pool) getCommitment(id signature.PublicKey) (OpenCommitment, bool) { ) switch p.Committee.Kind { - case scheduler.KindCompute: - com, ok = p.ComputeCommitments[id] + case scheduler.KindExecutor: + com, ok = p.ExecuteCommitments[id] case scheduler.KindMerge: com, ok = p.MergeCommitments[id] default: @@ -117,11 +117,11 @@ func (p *Pool) getCommitment(id signature.PublicKey) (OpenCommitment, bool) { return com, ok } -func (p *Pool) addOpenComputeCommitment(blk *block.Block, sv SignatureVerifier, openCom *OpenComputeCommitment) error { +func (p *Pool) addOpenExecutorCommitment(blk *block.Block, sv SignatureVerifier, openCom *OpenExecutorCommitment) error { if p.Committee == nil || p.NodeInfo == nil { return ErrNoCommittee } - if p.Committee.Kind != scheduler.KindCompute { + if p.Committee.Kind != scheduler.KindExecutor { return ErrInvalidCommitteeKind } @@ -138,7 +138,7 @@ func (p *Pool) addOpenComputeCommitment(blk *block.Block, sv SignatureVerifier, // TODO: Check for signs of double signing (#1804). // Ensure the node did not already submit a commitment. - if _, ok := p.ComputeCommitments[id]; ok { + if _, ok := p.ExecuteCommitments[id]; ok { return ErrAlreadyCommitted } @@ -166,7 +166,7 @@ func (p *Pool) addOpenComputeCommitment(blk *block.Block, sv SignatureVerifier, // Verify that this is for the correct committee. cID := p.GetCommitteeID() if !cID.Equal(&body.CommitteeID) { - logger.Debug("compute commitment has invalid committee ID", + logger.Debug("executor commitment has invalid committee ID", "expected_committee_id", cID, "committee_id", body.CommitteeID, "node_id", id, @@ -176,7 +176,7 @@ func (p *Pool) addOpenComputeCommitment(blk *block.Block, sv SignatureVerifier, // Check if the block is based on the previous block. if !header.IsParentOf(&blk.Header) { - logger.Debug("compute commitment is not based on correct block", + logger.Debug("executor commitment is not based on correct block", "committee_id", cID, "node_id", id, "expected_previous_hash", blk.Header.EncodedHash(), @@ -188,7 +188,7 @@ func (p *Pool) addOpenComputeCommitment(blk *block.Block, sv SignatureVerifier, // Verify that the txn scheduler signature for current commitment is valid. currentTxnSchedSig := body.TxnSchedSig if err := sv.VerifyCommitteeSignatures(scheduler.KindTransactionScheduler, []signature.Signature{body.TxnSchedSig}); err != nil { - logger.Debug("compute commitment has bad transaction scheduler signers", + logger.Debug("executor commitment has bad transaction scheduler signers", "committee_id", cID, "node_id", id, "err", err, @@ -201,7 +201,7 @@ func (p *Pool) addOpenComputeCommitment(blk *block.Block, sv SignatureVerifier, // Check if the header refers to merkle roots in storage. if err := sv.VerifyCommitteeSignatures(scheduler.KindStorage, body.StorageSignatures); err != nil { - logger.Debug("compute commitment has bad storage receipt signers", + logger.Debug("executor commitment has bad storage receipt signers", "committee_id", cID, "node_id", id, "err", err, @@ -209,7 +209,7 @@ func (p *Pool) addOpenComputeCommitment(blk *block.Block, sv SignatureVerifier, return err } if err := body.VerifyStorageReceiptSignatures(blk.Header.Namespace, blk.Header.Round+1); err != nil { - logger.Debug("compute commitment has bad storage receipt signatures", + logger.Debug("executor commitment has bad storage receipt signatures", "committee_id", cID, "node_id", id, "err", err, @@ -219,7 +219,7 @@ func (p *Pool) addOpenComputeCommitment(blk *block.Block, sv SignatureVerifier, // Go through existing commitments and check if the txn scheduler signed // different batches for the same committee. - for _, com := range p.ComputeCommitments { + for _, com := range p.ExecuteCommitments { cb := com.Body if cID.Equal(&cb.CommitteeID) { existingTxnSchedSig := cb.TxnSchedSig @@ -234,23 +234,23 @@ func (p *Pool) addOpenComputeCommitment(blk *block.Block, sv SignatureVerifier, } } - if p.ComputeCommitments == nil { - p.ComputeCommitments = make(map[signature.PublicKey]OpenComputeCommitment) + if p.ExecuteCommitments == nil { + p.ExecuteCommitments = make(map[signature.PublicKey]OpenExecutorCommitment) } - p.ComputeCommitments[id] = *openCom + p.ExecuteCommitments[id] = *openCom return nil } -// AddComputeCommitment verifies and adds a new compute commitment to the pool. -func (p *Pool) AddComputeCommitment(blk *block.Block, sv SignatureVerifier, commitment *ComputeCommitment) error { +// AddExecutorCommitment verifies and adds a new executor commitment to the pool. +func (p *Pool) AddExecutorCommitment(blk *block.Block, sv SignatureVerifier, commitment *ExecutorCommitment) error { // Check the commitment signature and de-serialize into header. openCom, err := commitment.Open() if err != nil { return err } - return p.addOpenComputeCommitment(blk, sv, openCom) + return p.addOpenExecutorCommitment(blk, sv, openCom) } // CheckEnoughCommitments checks if there are enough commitments in the pool to be @@ -284,8 +284,8 @@ func (p *Pool) CheckEnoughCommitments(didTimeout bool) error { // are allowed. if didTimeout { switch p.Committee.Kind { - case scheduler.KindCompute: - required -= int(p.Runtime.Compute.AllowedStragglers) + case scheduler.KindExecutor: + required -= int(p.Runtime.Executor.AllowedStragglers) case scheduler.KindMerge: required -= int(p.Runtime.Merge.AllowedStragglers) default: @@ -464,7 +464,7 @@ func (p *Pool) TryFinalize( // AddMergeCommitment verifies and adds a new merge commitment to the pool. // -// Any compute commitments are added to the provided pool. +// Any executor commitments are added to the provided pool. func (p *Pool) AddMergeCommitment( blk *block.Block, sv SignatureVerifier, @@ -511,16 +511,16 @@ func (p *Pool) AddMergeCommitment( return ErrNotBasedOnCorrectBlock } - // Check compute commitments -- all commitments must be valid and there + // Check executor commitments -- all commitments must be valid and there // must be no discrepancy as the merge committee nodes are supposed to // check this. - if err = ccPool.addComputeCommitments(blk, sv, body.ComputeCommits); err != nil { + if err = ccPool.addExecutorCommitments(blk, sv, body.ExecutorCommits); err != nil { return err } - // There must be enough compute commits for all committees. + // There must be enough executor commits for all committees. if err = ccPool.CheckEnoughCommitments(); err != nil { - return ErrBadComputeCommits + return ErrBadExecutorCommits } for _, sp := range ccPool.Committees { @@ -538,19 +538,19 @@ func (p *Pool) AddMergeCommitment( } fallthrough default: - logger.Debug("discrepancy detection failed for compute committee", + logger.Debug("discrepancy detection failed for executor committee", "err", err, ) - return ErrBadComputeCommits + return ErrBadExecutorCommits } } else { // If there was a discrepancy before it must be resolved now. _, err = sp.ResolveDiscrepancy() if err != nil { - logger.Debug("discrepancy resolution failed for compute committee", + logger.Debug("discrepancy resolution failed for executor committee", "err", err, ) - return ErrBadComputeCommits + return ErrBadExecutorCommits } } } @@ -589,10 +589,10 @@ func (p *Pool) GetCommitteeNode(id signature.PublicKey) (*scheduler.CommitteeNod return p.Committee.Members[ni.CommitteeNode], nil } -// GetComputeCommitments returns a list of compute commitments in the pool. -func (p *Pool) GetComputeCommitments() (result []ComputeCommitment) { - for _, c := range p.ComputeCommitments { - result = append(result, c.ComputeCommitment) +// GetExecutorCommitments returns a list of executor commitments in the pool. +func (p *Pool) GetExecutorCommitments() (result []ExecutorCommitment) { + for _, c := range p.ExecuteCommitments { + result = append(result, c.ExecutorCommitment) } return } @@ -608,8 +608,8 @@ type MultiPool struct { Committees map[hash.Hash]*Pool `json:"committees"` } -// AddComputeCommitment verifies and adds a new compute commitment to the pool. -func (m *MultiPool) AddComputeCommitment(blk *block.Block, sv SignatureVerifier, commitment *ComputeCommitment) (*Pool, error) { +// AddExecutorCommitment verifies and adds a new executor commitment to the pool. +func (m *MultiPool) AddExecutorCommitment(blk *block.Block, sv SignatureVerifier, commitment *ExecutorCommitment) (*Pool, error) { // Check the commitment signature and de-serialize into header. openCom, err := commitment.Open() if err != nil { @@ -621,14 +621,14 @@ func (m *MultiPool) AddComputeCommitment(blk *block.Block, sv SignatureVerifier, return nil, ErrInvalidCommitteeID } - return p, p.addOpenComputeCommitment(blk, sv, openCom) + return p, p.addOpenExecutorCommitment(blk, sv, openCom) } -// addComputeCommitments verifies and adds multiple compute commitments to the pool. +// addExecutorCommitments verifies and adds multiple executor commitments to the pool. // All valid commitments will be added, redundant commitments will be ignored. // // Note that any signatures being invalid will result in no changes to the pool. -func (m *MultiPool) addComputeCommitments(blk *block.Block, sv SignatureVerifier, commitments []ComputeCommitment) error { +func (m *MultiPool) addExecutorCommitments(blk *block.Block, sv SignatureVerifier, commitments []ExecutorCommitment) error { // Batch verify all of the signatures at once. msgs := make([][]byte, 0, len(commitments)) sigs := make([]signature.Signature, 0, len(commitments)) @@ -638,7 +638,7 @@ func (m *MultiPool) addComputeCommitments(blk *block.Block, sv SignatureVerifier sigs = append(sigs, v.Signature) } - if !signature.VerifyBatch(ComputeSignatureContext, msgs, sigs) { + if !signature.VerifyBatch(ExecutorSignatureContext, msgs, sigs) { return signature.ErrVerifyFailed } @@ -652,9 +652,9 @@ func (m *MultiPool) addComputeCommitments(blk *block.Block, sv SignatureVerifier continue } - openCom := &OpenComputeCommitment{ - ComputeCommitment: v, - Body: &body, + openCom := &OpenExecutorCommitment{ + ExecutorCommitment: v, + Body: &body, } p := m.Committees[openCom.Body.CommitteeID] @@ -663,7 +663,7 @@ func (m *MultiPool) addComputeCommitments(blk *block.Block, sv SignatureVerifier continue } - err := p.addOpenComputeCommitment(blk, sv, openCom) + err := p.addOpenExecutorCommitment(blk, sv, openCom) switch err { case nil, ErrAlreadyCommitted: default: @@ -671,7 +671,7 @@ func (m *MultiPool) addComputeCommitments(blk *block.Block, sv SignatureVerifier } } if hadError { - return ErrBadComputeCommits + return ErrBadExecutorCommits } return nil @@ -691,11 +691,11 @@ func (m *MultiPool) CheckEnoughCommitments() error { return nil } -// GetComputeCommitments returns a list of compute commitments in the pool. -func (m *MultiPool) GetComputeCommitments() (result []ComputeCommitment) { +// GetExecutorCommitments returns a list of executor commitments in the pool. +func (m *MultiPool) GetExecutorCommitments() (result []ExecutorCommitment) { for _, p := range m.Committees { - for _, c := range p.ComputeCommitments { - result = append(result, c.ComputeCommitment) + for _, c := range p.ExecuteCommitments { + result = append(result, c.ExecutorCommitment) } } return diff --git a/go/roothash/api/commitment/pool_test.go b/go/roothash/api/commitment/pool_test.go index cf194457b15..5efd74d715a 100644 --- a/go/roothash/api/commitment/pool_test.go +++ b/go/roothash/api/commitment/pool_test.go @@ -71,13 +71,13 @@ func TestPoolDefault(t *testing.T) { StateRoot: blk.Header.StateRoot, }, } - commit, err := SignComputeCommitment(sk, &body) - require.NoError(t, err, "SignComputeCommitment") + commit, err := SignExecutorCommitment(sk, &body) + require.NoError(t, err, "SignExecutorCommitment") // An empty pool should work but should always error. pool := Pool{} - err = pool.AddComputeCommitment(blk, nopSV, commit) - require.Error(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(blk, nopSV, commit) + require.Error(t, err, "AddExecutorCommitment") err = pool.CheckEnoughCommitments(false) require.Error(t, err, "CheckEnoughCommitments") require.Equal(t, ErrNoCommittee, err) @@ -108,7 +108,7 @@ func TestPoolSingleCommitment(t *testing.T) { // Generate a committee. cID := sk.Public() committee := &scheduler.Committee{ - Kind: scheduler.KindCompute, + Kind: scheduler.KindExecutor, Members: []*scheduler.CommitteeNode{ &scheduler.CommitteeNode{ Role: scheduler.Worker, @@ -135,8 +135,8 @@ func TestPoolSingleCommitment(t *testing.T) { // Generate a commitment. childBlk, parentBlk, body := generateComputeBody(t, committee) - commit, err := SignComputeCommitment(sk, &body) - require.NoError(t, err, "SignComputeCommitment") + commit, err := SignExecutorCommitment(sk, &body) + require.NoError(t, err, "SignExecutorCommitment") sv := &staticSignatureVerifier{ storagePublicKey: body.StorageSignatures[0].PublicKey, @@ -144,10 +144,10 @@ func TestPoolSingleCommitment(t *testing.T) { } // Adding a commitment not based on correct block should fail. - err = pool.AddComputeCommitment(parentBlk, sv, commit) - require.Error(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(parentBlk, sv, commit) + require.Error(t, err, "AddExecutorCommitment") - // There should not be enough compute commitments. + // There should not be enough executor commitments. err = pool.CheckEnoughCommitments(false) require.Error(t, err, "CheckEnoughCommitments") require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") @@ -160,30 +160,30 @@ func TestPoolSingleCommitment(t *testing.T) { bodyIncorrectStorageSig := body // This generates a new signing key so verification should fail. bodyIncorrectStorageSig.StorageSignatures[0] = generateStorageReceiptSignature(t, parentBlk, &bodyIncorrectStorageSig) - incorrectCommit, err := SignComputeCommitment(sk, &bodyIncorrectStorageSig) - require.NoError(t, err, "SignComputeCommitment") - err = pool.AddComputeCommitment(childBlk, sv, incorrectCommit) - require.Error(t, err, "AddComputeCommitment") + incorrectCommit, err := SignExecutorCommitment(sk, &bodyIncorrectStorageSig) + require.NoError(t, err, "SignExecutorCommitment") + err = pool.AddExecutorCommitment(childBlk, sv, incorrectCommit) + require.Error(t, err, "AddExecutorCommitment") // Adding a commitment having txn scheduler inputs signed with an incorrect // public key should fail. bodyIncorrectTxnSchedSig := body // This generates a new signing key so verification should fail. bodyIncorrectTxnSchedSig.TxnSchedSig = generateTxnSchedulerSignature(t, childBlk, &bodyIncorrectTxnSchedSig) - incorrectCommit, err = SignComputeCommitment(sk, &bodyIncorrectTxnSchedSig) - require.NoError(t, err, "SignComputeCommitment") - err = pool.AddComputeCommitment(childBlk, sv, incorrectCommit) - require.Error(t, err, "AddComputeCommitment") + incorrectCommit, err = SignExecutorCommitment(sk, &bodyIncorrectTxnSchedSig) + require.NoError(t, err, "SignExecutorCommitment") + err = pool.AddExecutorCommitment(childBlk, sv, incorrectCommit) + require.Error(t, err, "AddExecutorCommitment") // Adding a commitment should succeed. - err = pool.AddComputeCommitment(childBlk, sv, commit) - require.NoError(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, sv, commit) + require.NoError(t, err, "AddExecutorCommitment") // Adding a commitment twice for the same node should fail. - err = pool.AddComputeCommitment(childBlk, sv, commit) - require.Error(t, err, "AddComputeCommitment(duplicate)") + err = pool.AddExecutorCommitment(childBlk, sv, commit) + require.Error(t, err, "AddExecutorCommitment(duplicate)") - // There should be enough compute commitments. + // There should be enough executor commitments. err = pool.CheckEnoughCommitments(false) require.NoError(t, err, "CheckEnoughCommitments") @@ -218,7 +218,7 @@ func TestPoolSingleCommitmentTEE(t *testing.T) { // Generate a committee. cID := sk.Public() committee := &scheduler.Committee{ - Kind: scheduler.KindCompute, + Kind: scheduler.KindExecutor, Members: []*scheduler.CommitteeNode{ &scheduler.CommitteeNode{ Role: scheduler.Worker, @@ -255,14 +255,14 @@ func TestPoolSingleCommitmentTEE(t *testing.T) { require.NoError(t, err, "Sign") body.RakSig = rakSig.Signature - commit, err := SignComputeCommitment(sk, &body) - require.NoError(t, err, "SignComputeCommitment") + commit, err := SignExecutorCommitment(sk, &body) + require.NoError(t, err, "SignExecutorCommitment") // Adding a commitment not based on correct block should fail. - err = pool.AddComputeCommitment(parentBlk, nopSV, commit) - require.Error(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(parentBlk, nopSV, commit) + require.Error(t, err, "AddExecutorCommitment") - // There should not be enough compute commitments. + // There should not be enough executor commitments. err = pool.CheckEnoughCommitments(false) require.Error(t, err, "CheckEnoughCommitments") require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") @@ -271,14 +271,14 @@ func TestPoolSingleCommitmentTEE(t *testing.T) { require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") // Adding a commitment should succeed. - err = pool.AddComputeCommitment(childBlk, nopSV, commit) - require.NoError(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, commit) + require.NoError(t, err, "AddExecutorCommitment") // Adding a commitment twice for the same node should fail. - err = pool.AddComputeCommitment(childBlk, nopSV, commit) - require.Error(t, err, "AddComputeCommitment(duplicate)") + err = pool.AddExecutorCommitment(childBlk, nopSV, commit) + require.Error(t, err, "AddExecutorCommitment(duplicate)") - // There should be enough compute commitments. + // There should be enough executor commitments. err = pool.CheckEnoughCommitments(false) require.NoError(t, err, "CheckEnoughCommitments") @@ -312,26 +312,26 @@ func TestPoolTwoCommitments(t *testing.T) { bodyInvalidID := body bodyInvalidID.CommitteeID.FromBytes([]byte("invalid-committee-id")) - commit1, err := SignComputeCommitment(sk1, &body) - require.NoError(t, err, "SignComputeCommitment") + commit1, err := SignExecutorCommitment(sk1, &body) + require.NoError(t, err, "SignExecutorCommitment") - commit2, err := SignComputeCommitment(sk2, &body) - require.NoError(t, err, "SignComputeCommitment") + commit2, err := SignExecutorCommitment(sk2, &body) + require.NoError(t, err, "SignExecutorCommitment") // Invalid committee. - cInvalidCommit, err := SignComputeCommitment(sk1, &bodyInvalidID) - require.NoError(t, err, "SignComputeCommitment") + cInvalidCommit, err := SignExecutorCommitment(sk1, &bodyInvalidID) + require.NoError(t, err, "SignExecutorCommitment") // Adding a commitment for an invalid committee should fail. - err = pool.AddComputeCommitment(childBlk, nopSV, cInvalidCommit) - require.Error(t, err, "AddComputeCommitment") - require.Equal(t, ErrInvalidCommitteeID, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, cInvalidCommit) + require.Error(t, err, "AddExecutorCommitment") + require.Equal(t, ErrInvalidCommitteeID, err, "AddExecutorCommitment") // Adding commitment 1 should succeed. - err = pool.AddComputeCommitment(childBlk, nopSV, commit1) - require.NoError(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, commit1) + require.NoError(t, err, "AddExecutorCommitment") - // There should not be enough compute commitments. + // There should not be enough executor commitments. err = pool.CheckEnoughCommitments(false) require.Error(t, err, "CheckEnoughCommitments") require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") @@ -340,10 +340,10 @@ func TestPoolTwoCommitments(t *testing.T) { require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") // Adding commitment 2 should succeed. - err = pool.AddComputeCommitment(childBlk, nopSV, commit2) - require.NoError(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, commit2) + require.NoError(t, err, "AddExecutorCommitment") - // There should be enough compute commitments. + // There should be enough executor commitments. err = pool.CheckEnoughCommitments(false) require.NoError(t, err, "CheckEnoughCommitments") @@ -366,11 +366,11 @@ func TestPoolTwoCommitments(t *testing.T) { // Generate a commitment. childBlk, parentBlk, body := generateComputeBody(t, committee) - commit1, err := SignComputeCommitment(sk1, &body) - require.NoError(t, err, "SignComputeCommitment") + commit1, err := SignExecutorCommitment(sk1, &body) + require.NoError(t, err, "SignExecutorCommitment") - commit3, err := SignComputeCommitment(sk3, &body) - require.NoError(t, err, "SignComputeCommitment") + commit3, err := SignExecutorCommitment(sk3, &body) + require.NoError(t, err, "SignExecutorCommitment") correctHeader := body.Header @@ -378,14 +378,14 @@ func TestPoolTwoCommitments(t *testing.T) { body.Header.StateRoot.FromBytes([]byte("discrepancy")) body.StorageSignatures = []signature.Signature{generateStorageReceiptSignature(t, parentBlk, &body)} - commit2, err := SignComputeCommitment(sk2, &body) - require.NoError(t, err, "SignComputeCommitment") + commit2, err := SignExecutorCommitment(sk2, &body) + require.NoError(t, err, "SignExecutorCommitment") // Adding commitment 1 should succeed. - err = pool.AddComputeCommitment(childBlk, nopSV, commit1) - require.NoError(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, commit1) + require.NoError(t, err, "AddExecutorCommitment") - // There should not be enough compute commitments. + // There should not be enough executor commitments. err = pool.CheckEnoughCommitments(false) require.Error(t, err, "CheckEnoughCommitments") require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") @@ -394,10 +394,10 @@ func TestPoolTwoCommitments(t *testing.T) { require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") // Adding commitment 2 should succeed. - err = pool.AddComputeCommitment(childBlk, nopSV, commit2) - require.NoError(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, commit2) + require.NoError(t, err, "AddExecutorCommitment") - // There should be enough compute commitments. + // There should be enough executor commitments. err = pool.CheckEnoughCommitments(false) require.NoError(t, err, "CheckEnoughCommitments") @@ -407,16 +407,16 @@ func TestPoolTwoCommitments(t *testing.T) { require.Equal(t, ErrDiscrepancyDetected, err) require.Equal(t, true, pool.Discrepancy) - // There should not be enough compute commitments from backup workers. + // There should not be enough executor commitments from backup workers. err = pool.CheckEnoughCommitments(false) require.Error(t, err, "CheckEnoughCommitments") require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") // Resolve discrepancy with commit from backup worker. - err = pool.AddComputeCommitment(childBlk, nopSV, commit3) - require.NoError(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, commit3) + require.NoError(t, err, "AddExecutorCommitment") - // There should be enough compute commitments from backup workers. + // There should be enough executor commitments from backup workers. err = pool.CheckEnoughCommitments(false) require.NoError(t, err, "CheckEnoughCommitments") @@ -450,7 +450,7 @@ func TestPoolSerialization(t *testing.T) { // Generate a committee. cID := sk.Public() committee := &scheduler.Committee{ - Kind: scheduler.KindCompute, + Kind: scheduler.KindExecutor, Members: []*scheduler.CommitteeNode{ &scheduler.CommitteeNode{ Role: scheduler.Worker, @@ -477,19 +477,19 @@ func TestPoolSerialization(t *testing.T) { // Generate a commitment. childBlk, _, body := generateComputeBody(t, committee) - commit, err := SignComputeCommitment(sk, &body) - require.NoError(t, err, "SignComputeCommitment") + commit, err := SignExecutorCommitment(sk, &body) + require.NoError(t, err, "SignExecutorCommitment") // Adding a commitment should succeed. - err = pool.AddComputeCommitment(childBlk, nopSV, commit) - require.NoError(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, commit) + require.NoError(t, err, "AddExecutorCommitment") m := cbor.Marshal(pool) var d Pool err = cbor.Unmarshal(m, &d) require.NoError(t, err) - // There should be enough compute commitments. + // There should be enough executor commitments. err = pool.CheckEnoughCommitments(false) require.NoError(t, err, "CheckEnoughCommitments") @@ -530,45 +530,45 @@ func TestMultiPoolSerialization(t *testing.T) { _, _, body2 := generateComputeBody(t, committee2) // First committee. - c1commit1, err := SignComputeCommitment(sks1[0], &body1) - require.NoError(t, err, "SignComputeCommitment") + c1commit1, err := SignExecutorCommitment(sks1[0], &body1) + require.NoError(t, err, "SignExecutorCommitment") - c1commit2, err := SignComputeCommitment(sks1[1], &body1) - require.NoError(t, err, "SignComputeCommitment") + c1commit2, err := SignExecutorCommitment(sks1[1], &body1) + require.NoError(t, err, "SignExecutorCommitment") // Second committee. - c2commit1, err := SignComputeCommitment(sks2[0], &body2) - require.NoError(t, err, "SignComputeCommitment") + c2commit1, err := SignExecutorCommitment(sks2[0], &body2) + require.NoError(t, err, "SignExecutorCommitment") - c2commit2, err := SignComputeCommitment(sks2[1], &body2) - require.NoError(t, err, "SignComputeCommitment") + c2commit2, err := SignExecutorCommitment(sks2[1], &body2) + require.NoError(t, err, "SignExecutorCommitment") // Adding commitment 1 should succeed. - sp, err := pool.AddComputeCommitment(childBlk, nopSV, c1commit1) - require.NoError(t, err, "AddComputeCommitment") - require.Equal(t, pool.Committees[com1ID], sp, "AddComputeCommitment") + sp, err := pool.AddExecutorCommitment(childBlk, nopSV, c1commit1) + require.NoError(t, err, "AddExecutorCommitment") + require.Equal(t, pool.Committees[com1ID], sp, "AddExecutorCommitment") // Adding commitment 2 should succeed. - sp, err = pool.AddComputeCommitment(childBlk, nopSV, c1commit2) - require.NoError(t, err, "AddComputeCommitment") - require.Equal(t, pool.Committees[com1ID], sp, "AddComputeCommitment") + sp, err = pool.AddExecutorCommitment(childBlk, nopSV, c1commit2) + require.NoError(t, err, "AddExecutorCommitment") + require.Equal(t, pool.Committees[com1ID], sp, "AddExecutorCommitment") // Adding commitment 3 should succeed. - sp, err = pool.AddComputeCommitment(childBlk, nopSV, c2commit1) - require.NoError(t, err, "AddComputeCommitment") - require.Equal(t, pool.Committees[com2ID], sp, "AddComputeCommitment") + sp, err = pool.AddExecutorCommitment(childBlk, nopSV, c2commit1) + require.NoError(t, err, "AddExecutorCommitment") + require.Equal(t, pool.Committees[com2ID], sp, "AddExecutorCommitment") // Adding commitment 4 should succeed. - sp, err = pool.AddComputeCommitment(childBlk, nopSV, c2commit2) - require.NoError(t, err, "AddComputeCommitment") - require.Equal(t, pool.Committees[com2ID], sp, "AddComputeCommitment") + sp, err = pool.AddExecutorCommitment(childBlk, nopSV, c2commit2) + require.NoError(t, err, "AddExecutorCommitment") + require.Equal(t, pool.Committees[com2ID], sp, "AddExecutorCommitment") m := cbor.Marshal(pool) var d MultiPool err = cbor.Unmarshal(m, &d) require.NoError(t, err) - // There should be enough compute commitments. + // There should be enough executor commitments. err = d.CheckEnoughCommitments() require.NoError(t, err, "CheckEnoughCommitments") } @@ -576,10 +576,10 @@ func TestMultiPoolSerialization(t *testing.T) { func TestPoolMergeCommitment(t *testing.T) { genesisTestHelpers.SetTestChainContext() - rt, computeSks, computeCommittee, computeNodeInfo := generateMockCommittee(t) + rt, executorSks, executorCommittee, executorNodeInfo := generateMockCommittee(t) _, mergeSks, mergeCommittee, mergeNodeInfo := generateMockCommittee(t) mergeCommittee.Kind = scheduler.KindMerge - computeCommitteeID := computeCommittee.EncodedMembersHash() + executorCommitteeID := executorCommittee.EncodedMembersHash() t.Run("NoDiscrepancy", func(t *testing.T) { // Create a merge commitment pool. @@ -589,30 +589,30 @@ func TestPoolMergeCommitment(t *testing.T) { NodeInfo: mergeNodeInfo, } - // Create a compute commitment multi-pool. - computePool := MultiPool{ + // Create a executor commitment multi-pool. + executorPool := MultiPool{ Committees: map[hash.Hash]*Pool{ - computeCommitteeID: &Pool{ + executorCommitteeID: &Pool{ Runtime: rt, - Committee: computeCommittee, - NodeInfo: computeNodeInfo, + Committee: executorCommittee, + NodeInfo: executorNodeInfo, }, }, } // Generate a commitment. - childBlk, parentBlk, body := generateComputeBody(t, computeCommittee) + childBlk, parentBlk, body := generateComputeBody(t, executorCommittee) - commit1, err := SignComputeCommitment(computeSks[0], &body) - require.NoError(t, err, "SignComputeCommitment") + commit1, err := SignExecutorCommitment(executorSks[0], &body) + require.NoError(t, err, "SignExecutorCommitment") - commit2, err := SignComputeCommitment(computeSks[1], &body) - require.NoError(t, err, "SignComputeCommitment") + commit2, err := SignExecutorCommitment(executorSks[1], &body) + require.NoError(t, err, "SignExecutorCommitment") // Generate a merge commitment. mergeBody := MergeBody{ - ComputeCommits: []ComputeCommitment{*commit1, *commit2}, - Header: parentBlk.Header, + ExecutorCommits: []ExecutorCommitment{*commit1, *commit2}, + Header: parentBlk.Header, } mergeCommit1, err := SignMergeCommitment(mergeSks[0], &mergeBody) @@ -622,7 +622,7 @@ func TestPoolMergeCommitment(t *testing.T) { require.NoError(t, err, "SignMergeCommitment") // Adding commitment 1 should succeed. - err = mergePool.AddMergeCommitment(childBlk, nopSV, mergeCommit1, &computePool) + err = mergePool.AddMergeCommitment(childBlk, nopSV, mergeCommit1, &executorPool) require.NoError(t, err, "AddMergeCommitment") // There should not be enough merge commitments. @@ -634,10 +634,10 @@ func TestPoolMergeCommitment(t *testing.T) { require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") // Adding commitment 2 should succeed. - err = mergePool.AddMergeCommitment(childBlk, nopSV, mergeCommit2, &computePool) - require.NoError(t, err, "AddComputeCommitment") + err = mergePool.AddMergeCommitment(childBlk, nopSV, mergeCommit2, &executorPool) + require.NoError(t, err, "AddExecutorCommitment") - m := cbor.Marshal(computePool) + m := cbor.Marshal(executorPool) var d MultiPool err = cbor.Unmarshal(m, &d) require.NoError(t, err) @@ -654,7 +654,7 @@ func TestPoolMergeCommitment(t *testing.T) { require.EqualValues(t, &parentBlk.Header, &header, "DD should return the same header") }) - t.Run("ResolvedComputeDiscrepancy", func(t *testing.T) { + t.Run("ResolvedExecutionDiscrepancy", func(t *testing.T) { // Create a merge commitment pool. mergePool := Pool{ Runtime: rt, @@ -662,37 +662,37 @@ func TestPoolMergeCommitment(t *testing.T) { NodeInfo: mergeNodeInfo, } - // Create a compute commitment multi-pool. - computePool := MultiPool{ + // Create a executor commitment multi-pool. + executorPool := MultiPool{ Committees: map[hash.Hash]*Pool{ - computeCommitteeID: &Pool{ + executorCommitteeID: &Pool{ Runtime: rt, - Committee: computeCommittee, - NodeInfo: computeNodeInfo, + Committee: executorCommittee, + NodeInfo: executorNodeInfo, }, }, } // Generate a commitment. - childBlk, parentBlk, body := generateComputeBody(t, computeCommittee) + childBlk, parentBlk, body := generateComputeBody(t, executorCommittee) - commit1, err := SignComputeCommitment(computeSks[0], &body) - require.NoError(t, err, "SignComputeCommitment") + commit1, err := SignExecutorCommitment(executorSks[0], &body) + require.NoError(t, err, "SignExecutorCommitment") - commit3, err := SignComputeCommitment(computeSks[2], &body) - require.NoError(t, err, "SignComputeCommitment") + commit3, err := SignExecutorCommitment(executorSks[2], &body) + require.NoError(t, err, "SignExecutorCommitment") // Update state root and fix the storage receipt. body.Header.StateRoot.FromBytes([]byte("discrepancy")) body.StorageSignatures = []signature.Signature{generateStorageReceiptSignature(t, parentBlk, &body)} - commit2, err := SignComputeCommitment(computeSks[1], &body) - require.NoError(t, err, "SignComputeCommitment") + commit2, err := SignExecutorCommitment(executorSks[1], &body) + require.NoError(t, err, "SignExecutorCommitment") // Generate a merge commitment. mergeBody := MergeBody{ - ComputeCommits: []ComputeCommitment{*commit1, *commit2, *commit3}, - Header: parentBlk.Header, + ExecutorCommits: []ExecutorCommitment{*commit1, *commit2, *commit3}, + Header: parentBlk.Header, } mergeCommit1, err := SignMergeCommitment(mergeSks[0], &mergeBody) @@ -702,7 +702,7 @@ func TestPoolMergeCommitment(t *testing.T) { require.NoError(t, err, "SignMergeCommitment") // Adding commitment 1 should succeed. - err = mergePool.AddMergeCommitment(childBlk, nopSV, mergeCommit1, &computePool) + err = mergePool.AddMergeCommitment(childBlk, nopSV, mergeCommit1, &executorPool) require.NoError(t, err, "AddMergeCommitment") // There should not be enough merge commitments. @@ -714,10 +714,10 @@ func TestPoolMergeCommitment(t *testing.T) { require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") // Adding commitment 2 should succeed. - err = mergePool.AddMergeCommitment(childBlk, nopSV, mergeCommit2, &computePool) - require.NoError(t, err, "AddComputeCommitment") + err = mergePool.AddMergeCommitment(childBlk, nopSV, mergeCommit2, &executorPool) + require.NoError(t, err, "AddExecutorCommitment") - m := cbor.Marshal(computePool) + m := cbor.Marshal(executorPool) var d MultiPool err = cbor.Unmarshal(m, &d) require.NoError(t, err) @@ -768,64 +768,64 @@ func TestMultiPool(t *testing.T) { bodyInvalidID.CommitteeID.FromBytes([]byte("invalid-committee-id")) // First committee. - c1commit1, err := SignComputeCommitment(sks1[0], &body1) - require.NoError(t, err, "SignComputeCommitment") + c1commit1, err := SignExecutorCommitment(sks1[0], &body1) + require.NoError(t, err, "SignExecutorCommitment") - c1commit2, err := SignComputeCommitment(sks1[1], &body1) - require.NoError(t, err, "SignComputeCommitment") + c1commit2, err := SignExecutorCommitment(sks1[1], &body1) + require.NoError(t, err, "SignExecutorCommitment") // Second committee. - c2commit1, err := SignComputeCommitment(sks2[0], &body2) - require.NoError(t, err, "SignComputeCommitment") + c2commit1, err := SignExecutorCommitment(sks2[0], &body2) + require.NoError(t, err, "SignExecutorCommitment") - c2commit2, err := SignComputeCommitment(sks2[1], &body2) - require.NoError(t, err, "SignComputeCommitment") + c2commit2, err := SignExecutorCommitment(sks2[1], &body2) + require.NoError(t, err, "SignExecutorCommitment") // Invalid committee. - cInvalidCommit, err := SignComputeCommitment(sks1[0], &bodyInvalidID) - require.NoError(t, err, "SignComputeCommitment") + cInvalidCommit, err := SignExecutorCommitment(sks1[0], &bodyInvalidID) + require.NoError(t, err, "SignExecutorCommitment") // Adding a commitment for an invalid committee should fail. - _, err = pool.AddComputeCommitment(childBlk, nopSV, cInvalidCommit) - require.Error(t, err, "AddComputeCommitment") - require.Equal(t, ErrInvalidCommitteeID, err, "AddComputeCommitment") + _, err = pool.AddExecutorCommitment(childBlk, nopSV, cInvalidCommit) + require.Error(t, err, "AddExecutorCommitment") + require.Equal(t, ErrInvalidCommitteeID, err, "AddExecutorCommitment") // Adding commitment 1 should succeed. - sp, err := pool.AddComputeCommitment(childBlk, nopSV, c1commit1) - require.NoError(t, err, "AddComputeCommitment") - require.Equal(t, pool.Committees[com1ID], sp, "AddComputeCommitment") + sp, err := pool.AddExecutorCommitment(childBlk, nopSV, c1commit1) + require.NoError(t, err, "AddExecutorCommitment") + require.Equal(t, pool.Committees[com1ID], sp, "AddExecutorCommitment") - // There should not be enough compute commitments. + // There should not be enough executor commitments. err = pool.CheckEnoughCommitments() require.Error(t, err, "CheckEnoughCommitments") require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") // Adding commitment 2 should succeed. - sp, err = pool.AddComputeCommitment(childBlk, nopSV, c1commit2) - require.NoError(t, err, "AddComputeCommitment") - require.Equal(t, pool.Committees[com1ID], sp, "AddComputeCommitment") + sp, err = pool.AddExecutorCommitment(childBlk, nopSV, c1commit2) + require.NoError(t, err, "AddExecutorCommitment") + require.Equal(t, pool.Committees[com1ID], sp, "AddExecutorCommitment") - // There should not be enough compute commitments. + // There should not be enough executor commitments. err = pool.CheckEnoughCommitments() require.Error(t, err, "CheckEnoughCommitments") require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") // Adding commitment 3 should succeed. - sp, err = pool.AddComputeCommitment(childBlk, nopSV, c2commit1) - require.NoError(t, err, "AddComputeCommitment") - require.Equal(t, pool.Committees[com2ID], sp, "AddComputeCommitment") + sp, err = pool.AddExecutorCommitment(childBlk, nopSV, c2commit1) + require.NoError(t, err, "AddExecutorCommitment") + require.Equal(t, pool.Committees[com2ID], sp, "AddExecutorCommitment") - // There should not be enough compute commitments. + // There should not be enough executor commitments. err = pool.CheckEnoughCommitments() require.Error(t, err, "CheckEnoughCommitments") require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") // Adding commitment 4 should succeed. - sp, err = pool.AddComputeCommitment(childBlk, nopSV, c2commit2) - require.NoError(t, err, "AddComputeCommitment") - require.Equal(t, pool.Committees[com2ID], sp, "AddComputeCommitment") + sp, err = pool.AddExecutorCommitment(childBlk, nopSV, c2commit2) + require.NoError(t, err, "AddExecutorCommitment") + require.Equal(t, pool.Committees[com2ID], sp, "AddExecutorCommitment") - // There should be enough compute commitments. + // There should be enough executor commitments. err = pool.CheckEnoughCommitments() require.NoError(t, err, "CheckEnoughCommitments") }) @@ -852,55 +852,55 @@ func TestMultiPool(t *testing.T) { _, parentBlk, body2 := generateComputeBody(t, committee2) // First committee. - c1commit1, err := SignComputeCommitment(sks1[0], &body1) - require.NoError(t, err, "SignComputeCommitment") + c1commit1, err := SignExecutorCommitment(sks1[0], &body1) + require.NoError(t, err, "SignExecutorCommitment") - c1commit2, err := SignComputeCommitment(sks1[1], &body1) - require.NoError(t, err, "SignComputeCommitment") + c1commit2, err := SignExecutorCommitment(sks1[1], &body1) + require.NoError(t, err, "SignExecutorCommitment") // Second committee. - c2commit1, err := SignComputeCommitment(sks2[0], &body2) - require.NoError(t, err, "SignComputeCommitment") + c2commit1, err := SignExecutorCommitment(sks2[0], &body2) + require.NoError(t, err, "SignExecutorCommitment") // Update state root and fix the storage receipt. body2.Header.StateRoot.FromBytes([]byte("discrepancy")) body2.StorageSignatures = []signature.Signature{generateStorageReceiptSignature(t, parentBlk, &body2)} - c2commit2, err := SignComputeCommitment(sks2[1], &body2) - require.NoError(t, err, "SignComputeCommitment") + c2commit2, err := SignExecutorCommitment(sks2[1], &body2) + require.NoError(t, err, "SignExecutorCommitment") // Adding commitment 1 should succeed. - _, err = pool.AddComputeCommitment(childBlk, nopSV, c1commit1) - require.NoError(t, err, "AddComputeCommitment") + _, err = pool.AddExecutorCommitment(childBlk, nopSV, c1commit1) + require.NoError(t, err, "AddExecutorCommitment") - // There should not be enough compute commitments. + // There should not be enough executor commitments. err = pool.CheckEnoughCommitments() require.Error(t, err, "CheckEnoughCommitments") require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") // Adding commitment 2 should succeed. - _, err = pool.AddComputeCommitment(childBlk, nopSV, c1commit2) - require.NoError(t, err, "AddComputeCommitment") + _, err = pool.AddExecutorCommitment(childBlk, nopSV, c1commit2) + require.NoError(t, err, "AddExecutorCommitment") - // There should not be enough compute commitments. + // There should not be enough executor commitments. err = pool.CheckEnoughCommitments() require.Error(t, err, "CheckEnoughCommitments") require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") // Adding commitment 3 should succeed. - _, err = pool.AddComputeCommitment(childBlk, nopSV, c2commit1) - require.NoError(t, err, "AddComputeCommitment") + _, err = pool.AddExecutorCommitment(childBlk, nopSV, c2commit1) + require.NoError(t, err, "AddExecutorCommitment") - // There should not be enough compute commitments. + // There should not be enough executor commitments. err = pool.CheckEnoughCommitments() require.Error(t, err, "CheckEnoughCommitments") require.Equal(t, ErrStillWaiting, err, "CheckEnoughCommitments") // Adding commitment 4 should succeed. - _, err = pool.AddComputeCommitment(childBlk, nopSV, c2commit2) - require.NoError(t, err, "AddComputeCommitment") + _, err = pool.AddExecutorCommitment(childBlk, nopSV, c2commit2) + require.NoError(t, err, "AddExecutorCommitment") - // There should be enough compute commitments. + // There should be enough executor commitments. err = pool.CheckEnoughCommitments() require.NoError(t, err, "CheckEnoughCommitments") }) @@ -931,24 +931,24 @@ func TestTryFinalize(t *testing.T) { bodyInvalidID := body bodyInvalidID.CommitteeID.FromBytes([]byte("invalid-committee-id")) - commit1, err := SignComputeCommitment(sk1, &body) - require.NoError(t, err, "SignComputeCommitment") + commit1, err := SignExecutorCommitment(sk1, &body) + require.NoError(t, err, "SignExecutorCommitment") - commit2, err := SignComputeCommitment(sk2, &body) - require.NoError(t, err, "SignComputeCommitment") + commit2, err := SignExecutorCommitment(sk2, &body) + require.NoError(t, err, "SignExecutorCommitment") // Invalid committee. - cInvalidCommit, err := SignComputeCommitment(sk1, &bodyInvalidID) - require.NoError(t, err, "SignComputeCommitment") + cInvalidCommit, err := SignExecutorCommitment(sk1, &bodyInvalidID) + require.NoError(t, err, "SignExecutorCommitment") // Adding a commitment for an invalid committee should fail. - err = pool.AddComputeCommitment(childBlk, nopSV, cInvalidCommit) - require.Error(t, err, "AddComputeCommitment") - require.Equal(t, ErrInvalidCommitteeID, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, cInvalidCommit) + require.Error(t, err, "AddExecutorCommitment") + require.Equal(t, ErrInvalidCommitteeID, err, "AddExecutorCommitment") // Adding commitment 1 should succeed. - err = pool.AddComputeCommitment(childBlk, nopSV, commit1) - require.NoError(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, commit1) + require.NoError(t, err, "AddExecutorCommitment") _, err = pool.TryFinalize(now, roundTimeout, false, true) require.Error(t, err, "TryFinalize") @@ -956,8 +956,8 @@ func TestTryFinalize(t *testing.T) { require.EqualValues(t, now.Add(roundTimeout).Round(time.Second), pool.NextTimeout, "NextTimeout should be set") // Adding commitment 2 should succeed. - err = pool.AddComputeCommitment(childBlk, nopSV, commit2) - require.NoError(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, commit2) + require.NoError(t, err, "AddExecutorCommitment") dc, err := pool.TryFinalize(now, roundTimeout, false, true) require.NoError(t, err, "TryFinalize") @@ -977,11 +977,11 @@ func TestTryFinalize(t *testing.T) { // Generate a commitment. childBlk, parentBlk, body := generateComputeBody(t, committee) - commit1, err := SignComputeCommitment(sk1, &body) - require.NoError(t, err, "SignComputeCommitment") + commit1, err := SignExecutorCommitment(sk1, &body) + require.NoError(t, err, "SignExecutorCommitment") - commit3, err := SignComputeCommitment(sk3, &body) - require.NoError(t, err, "SignComputeCommitment") + commit3, err := SignExecutorCommitment(sk3, &body) + require.NoError(t, err, "SignExecutorCommitment") correctHeader := body.Header @@ -989,12 +989,12 @@ func TestTryFinalize(t *testing.T) { body.Header.StateRoot.FromBytes([]byte("discrepancy")) body.StorageSignatures = []signature.Signature{generateStorageReceiptSignature(t, parentBlk, &body)} - commit2, err := SignComputeCommitment(sk2, &body) - require.NoError(t, err, "SignComputeCommitment") + commit2, err := SignExecutorCommitment(sk2, &body) + require.NoError(t, err, "SignExecutorCommitment") // Adding commitment 1 should succeed. - err = pool.AddComputeCommitment(childBlk, nopSV, commit1) - require.NoError(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, commit1) + require.NoError(t, err, "AddExecutorCommitment") _, err = pool.TryFinalize(now, roundTimeout, false, true) require.Error(t, err, "TryFinalize") @@ -1002,8 +1002,8 @@ func TestTryFinalize(t *testing.T) { require.EqualValues(t, now.Add(roundTimeout).Round(time.Second), pool.NextTimeout, "NextTimeout should be set") // Adding commitment 2 should succeed. - err = pool.AddComputeCommitment(childBlk, nopSV, commit2) - require.NoError(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, commit2) + require.NoError(t, err, "AddExecutorCommitment") // There should be a discrepancy. _, err = pool.TryFinalize(now, roundTimeout, false, true) @@ -1011,14 +1011,14 @@ func TestTryFinalize(t *testing.T) { require.Equal(t, ErrDiscrepancyDetected, err) require.Equal(t, true, pool.Discrepancy) - // There should not be enough compute commitments from backup workers. + // There should not be enough executor commitments from backup workers. _, err = pool.TryFinalize(now, roundTimeout, false, true) require.Error(t, err, "TryFinalize") require.Equal(t, ErrStillWaiting, err) // Resolve discrepancy with commit from backup worker. - err = pool.AddComputeCommitment(childBlk, nopSV, commit3) - require.NoError(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, commit3) + require.NoError(t, err, "AddExecutorCommitment") dc, err := pool.TryFinalize(now, roundTimeout, false, true) require.NoError(t, err, "TryFinalize") @@ -1038,17 +1038,17 @@ func TestTryFinalize(t *testing.T) { // Generate a commitment. childBlk, _, body := generateComputeBody(t, committee) - commit1, err := SignComputeCommitment(sk1, &body) - require.NoError(t, err, "SignComputeCommitment") + commit1, err := SignExecutorCommitment(sk1, &body) + require.NoError(t, err, "SignExecutorCommitment") - commit3, err := SignComputeCommitment(sk3, &body) - require.NoError(t, err, "SignComputeCommitment") + commit3, err := SignExecutorCommitment(sk3, &body) + require.NoError(t, err, "SignExecutorCommitment") correctHeader := body.Header // Adding commitment 1 should succeed. - err = pool.AddComputeCommitment(childBlk, nopSV, commit1) - require.NoError(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, commit1) + require.NoError(t, err, "AddExecutorCommitment") _, err = pool.TryFinalize(now, roundTimeout, false, true) require.Error(t, err, "TryFinalize") @@ -1072,15 +1072,15 @@ func TestTryFinalize(t *testing.T) { require.Equal(t, true, pool.Discrepancy) require.True(t, pool.NextTimeout.IsZero(), "NextTimeout should be zero") - // There should not be enough compute commitments from backup workers. + // There should not be enough executor commitments from backup workers. _, err = pool.TryFinalize(nowAfterTimeout, roundTimeout, false, true) require.Error(t, err, "TryFinalize") require.Equal(t, ErrStillWaiting, err) require.EqualValues(t, nowAfterTimeout.Add(roundTimeout).Round(time.Second), pool.NextTimeout, "NextTimeout should be set") // Resolve discrepancy with commit from backup worker. - err = pool.AddComputeCommitment(childBlk, nopSV, commit3) - require.NoError(t, err, "AddComputeCommitment") + err = pool.AddExecutorCommitment(childBlk, nopSV, commit3) + require.NoError(t, err, "AddExecutorCommitment") dc, err := pool.TryFinalize(now, roundTimeout, false, true) require.NoError(t, err, "TryFinalize") @@ -1119,7 +1119,7 @@ func generateMockCommittee(t *testing.T) ( c2ID := sk2.Public() c3ID := sk3.Public() committee = &scheduler.Committee{ - Kind: scheduler.KindCompute, + Kind: scheduler.KindExecutor, Members: []*scheduler.CommitteeNode{ &scheduler.CommitteeNode{ Role: scheduler.Worker, diff --git a/go/roothash/api/commitment/txnscheduler.go b/go/roothash/api/commitment/txnscheduler.go index 3efe9d66003..32ef6e3efc3 100644 --- a/go/roothash/api/commitment/txnscheduler.go +++ b/go/roothash/api/commitment/txnscheduler.go @@ -11,16 +11,16 @@ import ( var TxnSchedulerBatchDispatchSigCtx = signature.NewContext("oasis-core/roothash: tx batch dispatch", signature.WithChainSeparation()) // TxnSchedulerBatchDispatch is the message sent from the transaction -// scheduler to compute workers after a batch is ready to be computed. +// scheduler to executor workers after a batch is ready to be executed. // // Don't forget to bump CommitteeProtocol version in go/common/version // if you change anything in this struct. type TxnSchedulerBatchDispatch struct { - // CommitteeID is the committee ID of the target compute committee. + // CommitteeID is the committee ID of the target executor committee. CommitteeID hash.Hash `json:"cid"` // IORoot is the I/O root containing the inputs (transactions) that - // the compute node should use. + // the executor node should use. IORoot hash.Hash `json:"io_root"` // StorageSignatures are the storage receipt signatures for the I/O root. diff --git a/go/roothash/tests/tester.go b/go/roothash/tests/tester.go index c56340d32f0..d9f22863213 100644 --- a/go/roothash/tests/tester.go +++ b/go/roothash/tests/tester.go @@ -39,7 +39,7 @@ type runtimeState struct { rt *registryTests.TestRuntime genesisBlock *block.Block - computeCommittee *testCommittee + executorCommittee *testCommittee mergeCommittee *testCommittee storageCommittee *testCommittee txnSchedCommittee *testCommittee @@ -195,7 +195,7 @@ func (s *runtimeState) testEpochTransitionBlock(t *testing.T, scheduler schedule nodes[node.Node.ID] = node } - s.computeCommittee, s.mergeCommittee, s.storageCommittee, s.txnSchedCommittee = mustGetCommittee(t, s.rt, epoch+1, scheduler, nodes) + s.executorCommittee, s.mergeCommittee, s.storageCommittee, s.txnSchedCommittee = mustGetCommittee(t, s.rt, epoch+1, scheduler, nodes) // Wait to receive an epoch transition block. for { @@ -228,7 +228,7 @@ func testSuccessfulRound(t *testing.T, backend api.Backend, consensus consensusA func (s *runtimeState) testSuccessfulRound(t *testing.T, backend api.Backend, consensus consensusAPI.Backend, identity *identity.Identity) { require := require.New(t) - rt, computeCommittee, mergeCommittee := s.rt, s.computeCommittee, s.mergeCommittee + rt, executorCommittee, mergeCommittee := s.rt, s.executorCommittee, s.mergeCommittee dataDir, err := ioutil.TempDir("", "oasis-storage-test_") require.NoError(err, "TempDir") @@ -293,13 +293,13 @@ func (s *runtimeState) testSuccessfulRound(t *testing.T, backend api.Backend, co }, ) - // Generate all the compute commitments. + // Generate all the executor commitments. var toCommit []*registryTests.TestNode - var computeCommits []commitment.ComputeCommitment - toCommit = append(toCommit, computeCommittee.workers...) + var executorCommits []commitment.ExecutorCommitment + toCommit = append(toCommit, executorCommittee.workers...) for _, node := range toCommit { commitBody := commitment.ComputeBody{ - CommitteeID: computeCommittee.committee.EncodedMembersHash(), + CommitteeID: executorCommittee.committee.EncodedMembersHash(), Header: commitment.ComputeResultsHeader{ PreviousHash: parent.Header.PreviousHash, IORoot: parent.Header.IORoot, @@ -324,10 +324,10 @@ func (s *runtimeState) testSuccessfulRound(t *testing.T, backend api.Backend, co commitBody.TxnSchedSig = signedDispatch.Signature // `err` shadows outside. - commit, err := commitment.SignComputeCommitment(node.Signer, &commitBody) // nolint: vetshadow + commit, err := commitment.SignExecutorCommitment(node.Signer, &commitBody) // nolint: vetshadow require.NoError(err, "SignSigned") - computeCommits = append(computeCommits, *commit) + executorCommits = append(executorCommits, *commit) } // Generate all the merge commitments. @@ -336,8 +336,8 @@ func (s *runtimeState) testSuccessfulRound(t *testing.T, backend api.Backend, co toCommit = append(toCommit, mergeCommittee.workers...) for _, node := range toCommit { commitBody := commitment.MergeBody{ - ComputeCommits: computeCommits, - Header: parent.Header, + ExecutorCommits: executorCommits, + Header: parent.Header, } // `err` shadows outside. commit, err := commitment.SignMergeCommitment(node.Signer, &commitBody) // nolint: vetshadow @@ -399,7 +399,7 @@ func mustGetCommittee( sched scheduler.Backend, nodes map[signature.PublicKey]*registryTests.TestNode, ) ( - computeCommittee *testCommittee, + executorCommittee *testCommittee, mergeCommittee *testCommittee, storageCommittee *testCommittee, txnSchedCommittee *testCommittee, @@ -440,7 +440,7 @@ func mustGetCommittee( case scheduler.KindTransactionScheduler: groupSize = int(rt.Runtime.TxnScheduler.GroupSize) groupBackupSize = 0 - case scheduler.KindCompute: + case scheduler.KindExecutor: fallthrough case scheduler.KindMerge: groupSize = int(rt.Runtime.Merge.GroupSize) @@ -461,15 +461,15 @@ func mustGetCommittee( switch committee.Kind { case scheduler.KindTransactionScheduler: txnSchedCommittee = &ret - case scheduler.KindCompute: - computeCommittee = &ret + case scheduler.KindExecutor: + executorCommittee = &ret case scheduler.KindMerge: mergeCommittee = &ret case scheduler.KindStorage: storageCommittee = &ret } - if computeCommittee == nil || mergeCommittee == nil || storageCommittee == nil || txnSchedCommittee == nil { + if executorCommittee == nil || mergeCommittee == nil || storageCommittee == nil || txnSchedCommittee == nil { continue } diff --git a/go/runtime/client/tests/tester.go b/go/runtime/client/tests/tester.go index adb02331afc..57e03d42636 100644 --- a/go/runtime/client/tests/tester.go +++ b/go/runtime/client/tests/tester.go @@ -67,7 +67,7 @@ func testQuery( // Fetch blocks. blk, err := c.GetBlock(ctx, &api.GetBlockRequest{RuntimeID: runtimeID, Round: 1}) - // Epoch transition from TestNode/ComputeWorker/InitialEpochTransition + // Epoch transition from TestNode/ExecutorWorker/InitialEpochTransition require.NoError(t, err, "GetBlock") require.EqualValues(t, 1, blk.Header.Round) diff --git a/go/scheduler/api/api.go b/go/scheduler/api/api.go index 021a7150d81..bcd019cc483 100644 --- a/go/scheduler/api/api.go +++ b/go/scheduler/api/api.go @@ -67,8 +67,8 @@ type CommitteeNode struct { type CommitteeKind uint8 const ( - // KindCompute is a compute committee. - KindCompute CommitteeKind = 0 + // KindExecutor is an executor committee. + KindExecutor CommitteeKind = 0 // KindStorage is a storage committee. KindStorage CommitteeKind = 1 @@ -86,7 +86,7 @@ const ( // NeedsLeader returns if committee kind needs leader role. func (k CommitteeKind) NeedsLeader() bool { switch k { - case KindCompute: + case KindExecutor: return false case KindMerge: return false @@ -100,8 +100,8 @@ func (k CommitteeKind) NeedsLeader() bool { // String returns a string representation of a CommitteeKind. func (k CommitteeKind) String() string { switch k { - case KindCompute: - return "compute" + case KindExecutor: + return "executor" case KindStorage: return "storage" case KindTransactionScheduler: diff --git a/go/scheduler/tests/tester.go b/go/scheduler/tests/tester.go index 49b9465230f..4d3bcea3a82 100644 --- a/go/scheduler/tests/tester.go +++ b/go/scheduler/tests/tester.go @@ -42,8 +42,8 @@ func SchedulerImplementationTests(t *testing.T, name string, backend api.Backend epochtime := consensus.EpochTime().(epochtime.SetableBackend) epoch := epochtimeTests.MustAdvanceEpoch(t, epochtime, 1) - ensureValidCommittees := func(expectedCompute, expectedStorage, expectedTransactionScheduler int) { - var compute, storage, transactionScheduler *api.Committee + ensureValidCommittees := func(expectedExecutor, expectedStorage, expectedTransactionScheduler int) { + var executor, storage, transactionScheduler *api.Committee var seen int for seen < 3 { select { @@ -56,10 +56,10 @@ func SchedulerImplementationTests(t *testing.T, name string, backend api.Backend } switch committee.Kind { - case api.KindCompute: - require.Nil(compute, "haven't seen a compute committee yet") - compute = committee - require.Len(committee.Members, expectedCompute, "committee has all compute nodes") + case api.KindExecutor: + require.Nil(executor, "haven't seen an executor committee yet") + executor = committee + require.Len(committee.Members, expectedExecutor, "committee has all executor nodes") case api.KindStorage: require.Nil(storage, "haven't seen a storage committee yet") require.Len(committee.Members, expectedStorage, "committee has all storage nodes") @@ -88,9 +88,9 @@ func SchedulerImplementationTests(t *testing.T, name string, backend api.Backend require.NoError(err, "GetCommittees") for _, committee := range committees { switch committee.Kind { - case api.KindCompute: - require.EqualValues(compute, committee, "fetched compute committee is identical") - compute = nil + case api.KindExecutor: + require.EqualValues(executor, committee, "fetched executor committee is identical") + executor = nil case api.KindStorage: require.EqualValues(storage, committee, "fetched storage committee is identical") storage = nil @@ -100,25 +100,25 @@ func SchedulerImplementationTests(t *testing.T, name string, backend api.Backend } } - require.Nil(compute, "fetched a compute committee") + require.Nil(executor, "fetched an executor committee") require.Nil(storage, "fetched a storage committee") require.Nil(transactionScheduler, "fetched a transaction scheduler committee") } - var nCompute, nStorage int + var nExecutor, nStorage int for _, n := range nodes { if n.HasRoles(node.RoleComputeWorker) { - nCompute++ + nExecutor++ } if n.HasRoles(node.RoleStorageWorker) { nStorage++ } } - ensureValidCommittees(nCompute, nStorage, int(rt.Runtime.TxnScheduler.GroupSize)) + ensureValidCommittees(nExecutor, nStorage, int(rt.Runtime.TxnScheduler.GroupSize)) // Re-register the runtime with less nodes. - rt.Runtime.Compute.GroupSize = 2 - rt.Runtime.Compute.GroupBackupSize = 1 + rt.Runtime.Executor.GroupSize = 2 + rt.Runtime.Executor.GroupBackupSize = 1 rt.Runtime.Storage.GroupSize = 1 rt.MustRegister(t, consensus.Registry(), consensus) @@ -171,9 +171,9 @@ func requireValidCommitteeMembers(t *testing.T, committee *api.Committee, runtim require.Equal(0, leaders, fmt.Sprintf("%s committee shouldn't have a leader", committee.Kind)) } switch committee.Kind { - case api.KindCompute: - require.EqualValues(runtime.Compute.GroupSize, workers, "compute committee should have the correct number of workers") - require.EqualValues(runtime.Compute.GroupBackupSize, backups, "compute committee should have the correct number of backup workers") + case api.KindExecutor: + require.EqualValues(runtime.Executor.GroupSize, workers, "executor committee should have the correct number of workers") + require.EqualValues(runtime.Executor.GroupBackupSize, backups, "executor committee should have the correct number of backup workers") case api.KindMerge: require.EqualValues(runtime.Merge.GroupSize, workers, "merge committee should have the correct number of workers") require.EqualValues(runtime.Merge.GroupBackupSize, backups, "merge committee should have the correct number of backup workers") diff --git a/go/storage/api/root_cache.go b/go/storage/api/root_cache.go index 58ae33d5a22..a6610658ff7 100644 --- a/go/storage/api/root_cache.go +++ b/go/storage/api/root_cache.go @@ -160,7 +160,7 @@ func (rc *RootCache) Apply( func (rc *RootCache) getApplyLock(root, expectedNewRoot Root) *sync.Mutex { // Lock the Apply call based on (oldRoot, expectedNewRoot), so that when - // multiple compute committees commit the same write logs, we only write + // multiple executor committees commit the same write logs, we only write // the first one and go through the fast path for the rest. lockID := root.EncodedHash().String() + expectedNewRoot.EncodedHash().String() diff --git a/go/worker/common/committee/group.go b/go/worker/common/committee/group.go index 99fda1f1118..23ad89f49de 100644 --- a/go/worker/common/committee/group.go +++ b/go/worker/common/committee/group.go @@ -47,15 +47,15 @@ type epoch struct { // committee election. groupVersion int64 - // computeCommittee is the compute committee we are a member of. - computeCommittee *CommitteeInfo - // computeCommitteeID is the identifier of our compute committee. - computeCommitteeID hash.Hash - // computeCommittees are all compute committees. - computeCommittees map[hash.Hash]*CommitteeInfo - // computeCommitteesByPeer is a set of P2P public keys of compute committee + // executorCommittee is the executor committee we are a member of. + executorCommittee *CommitteeInfo + // executorCommitteeID is the identifier of our executor committee. + executorCommitteeID hash.Hash + // executorCommittees are all executor committees. + executorCommittees map[hash.Hash]*CommitteeInfo + // executorCommitteesByPeer is a set of P2P public keys of executor committee // members. - computeCommitteesByPeer map[signature.PublicKey]bool + executorCommitteesByPeer map[signature.PublicKey]bool // txnSchedulerCommitee is the txn scheduler committee we are a member of. txnSchedulerCommittee *CommitteeInfo @@ -75,15 +75,15 @@ type epoch struct { type EpochSnapshot struct { groupVersion int64 - computeCommitteeID hash.Hash + executorCommitteeID hash.Hash - computeRole scheduler.Role + executorRole scheduler.Role txnSchedulerRole scheduler.Role mergeRole scheduler.Role runtime *registry.Runtime - computeCommittees map[hash.Hash]*CommitteeInfo + executorCommittees map[hash.Hash]*CommitteeInfo txnSchedulerCommittee *CommitteeInfo mergeCommittee *CommitteeInfo storageCommittee *CommitteeInfo @@ -91,13 +91,13 @@ type EpochSnapshot struct { // NewMockEpochSnapshot returns a mock epoch snapshot to be used in tests. func NewMockEpochSnapshot() *EpochSnapshot { - var computeCommitteeID hash.Hash - computeCommitteeID.FromBytes([]byte("mock committee id")) + var executorCommitteeID hash.Hash + executorCommitteeID.FromBytes([]byte("mock committee id")) return &EpochSnapshot{ - computeCommitteeID: computeCommitteeID, - computeCommittees: map[hash.Hash]*CommitteeInfo{ - computeCommitteeID: &CommitteeInfo{}, + executorCommitteeID: executorCommitteeID, + executorCommittees: map[hash.Hash]*CommitteeInfo{ + executorCommitteeID: &CommitteeInfo{}, }, } } @@ -113,35 +113,35 @@ func (e *EpochSnapshot) GetRuntime() *registry.Runtime { return e.runtime } -// GetComputeCommittees returns the current compute committees. -func (e *EpochSnapshot) GetComputeCommittees() map[hash.Hash]*CommitteeInfo { - return e.computeCommittees +// GetExecutorCommittees returns the current executor committees. +func (e *EpochSnapshot) GetExecutorCommittees() map[hash.Hash]*CommitteeInfo { + return e.executorCommittees } -// GetComputeCommitteeID returns ID of the compute committee the current node is +// GetExecutorCommitteeID returns ID of the executor committee the current node is // a member of. // // NOTE: Will return an invalid all-zero ID if not a member. -func (e *EpochSnapshot) GetComputeCommitteeID() hash.Hash { - return e.computeCommitteeID +func (e *EpochSnapshot) GetExecutorCommitteeID() hash.Hash { + return e.executorCommitteeID } -// IsComputeMember checks if the current node is a member of the compute committee +// IsExecutorMember checks if the current node is a member of the executor committee // in the current epoch. -func (e *EpochSnapshot) IsComputeMember() bool { - return e.computeRole != scheduler.Invalid +func (e *EpochSnapshot) IsExecutorMember() bool { + return e.executorRole != scheduler.Invalid } -// IsComputeWorker checks if the current node is a worker of the compute committee +// IsExecutorWorker checks if the current node is a worker of the executor committee // in the current epoch. -func (e *EpochSnapshot) IsComputeWorker() bool { - return e.computeRole == scheduler.Worker +func (e *EpochSnapshot) IsExecutorWorker() bool { + return e.executorRole == scheduler.Worker } -// IsComputeBackupWorker checks if the current node is a backup worker of the compute +// IsExecutorBackupWorker checks if the current node is a backup worker of the executor // committee in the current epoch. -func (e *EpochSnapshot) IsComputeBackupWorker() bool { - return e.computeRole == scheduler.BackupWorker +func (e *EpochSnapshot) IsExecutorBackupWorker() bool { + return e.executorRole == scheduler.BackupWorker } // GetTransactionSchedulerCommittee returns the current txn scheduler committee. @@ -207,7 +207,7 @@ func (e *EpochSnapshot) VerifyCommitteeSignatures(kind scheduler.CommitteeKind, } // Group encapsulates communication with a group of nodes in the -// compute committee. +// executor committee. type Group struct { sync.RWMutex @@ -272,10 +272,10 @@ func (g *Group) EpochTransition(ctx context.Context, height int64) error { publicIdentity := g.identity.NodeSigner.Public() // Find the current committees. - computeCommittees := make(map[hash.Hash]*CommitteeInfo) - computeCommitteesByPeer := make(map[signature.PublicKey]bool) - var computeCommittee, txnSchedulerCommittee, mergeCommittee, storageCommittee *CommitteeInfo - var computeCommitteeID hash.Hash + executorCommittees := make(map[hash.Hash]*CommitteeInfo) + executorCommitteesByPeer := make(map[signature.PublicKey]bool) + var executorCommittee, txnSchedulerCommittee, mergeCommittee, storageCommittee *CommitteeInfo + var executorCommitteeID hash.Hash var txnSchedulerLeaderPeerID signature.PublicKey for _, cm := range committees { var nodes []*node.Node @@ -310,21 +310,21 @@ func (g *Group) EpochTransition(ctx context.Context, height int64) error { } switch cm.Kind { - case scheduler.KindCompute: - // There can be multiple compute committees per runtime. + case scheduler.KindExecutor: + // There can be multiple executor committees per runtime. cID := cm.EncodedMembersHash() - computeCommittees[cID] = ci + executorCommittees[cID] = ci if role != scheduler.Invalid { - if computeCommittee != nil { - return fmt.Errorf("member of multiple compute committees") + if executorCommittee != nil { + return fmt.Errorf("member of multiple executor committees") } - computeCommittee = ci - computeCommitteeID = cID + executorCommittee = ci + executorCommitteeID = cID } for _, n := range nodes { - computeCommitteesByPeer[n.P2P.ID] = true + executorCommitteesByPeer[n.P2P.ID] = true } case scheduler.KindTransactionScheduler: txnSchedulerCommittee = ci @@ -337,8 +337,8 @@ func (g *Group) EpochTransition(ctx context.Context, height int64) error { storageCommittee = ci } } - if len(computeCommittees) == 0 { - return fmt.Errorf("no compute committees") + if len(executorCommittees) == 0 { + return fmt.Errorf("no executor committees") } if txnSchedulerCommittee == nil { return fmt.Errorf("no transaction scheduler committee") @@ -364,10 +364,10 @@ func (g *Group) EpochTransition(ctx context.Context, height int64) error { roundCtx, cancel, height, - computeCommittee, - computeCommitteeID, - computeCommittees, - computeCommitteesByPeer, + executorCommittee, + executorCommitteeID, + executorCommittees, + executorCommitteesByPeer, txnSchedulerCommittee, txnSchedulerLeaderPeerID, mergeCommittee, @@ -375,15 +375,15 @@ func (g *Group) EpochTransition(ctx context.Context, height int64) error { runtime, } - // Compute committee may be nil in case we are not a member of any committee. - var computeRole scheduler.Role - if computeCommittee != nil { - computeRole = computeCommittee.Role + // Executor committee may be nil in case we are not a member of any committee. + var executorRole scheduler.Role + if executorCommittee != nil { + executorRole = executorCommittee.Role } g.logger.Info("epoch transition complete", "group_version", height, - "compute_role", computeRole, + "executor_role", executorRole, "txn_scheduler_role", txnSchedulerCommittee.Role, "merge_role", mergeCommittee.Role, ) @@ -406,17 +406,17 @@ func (g *Group) GetEpochSnapshot() *EpochSnapshot { txnSchedulerRole: g.activeEpoch.txnSchedulerCommittee.Role, mergeRole: g.activeEpoch.mergeCommittee.Role, runtime: g.activeEpoch.runtime, - computeCommittees: g.activeEpoch.computeCommittees, + executorCommittees: g.activeEpoch.executorCommittees, txnSchedulerCommittee: g.activeEpoch.txnSchedulerCommittee, mergeCommittee: g.activeEpoch.mergeCommittee, storageCommittee: g.activeEpoch.storageCommittee, } - // Compute committee may be nil in case we are not a member of any committee. - cc := g.activeEpoch.computeCommittee - if cc != nil { - s.computeRole = cc.Role - s.computeCommitteeID = g.activeEpoch.computeCommitteeID + // Executor committee may be nil in case we are not a member of any committee. + xc := g.activeEpoch.executorCommittee + if xc != nil { + s.executorRole = xc.Role + s.executorCommitteeID = g.activeEpoch.executorCommitteeID } return s @@ -435,15 +435,15 @@ func (g *Group) IsPeerAuthorized(peerID signature.PublicKey) bool { // Assume the peer is not authorized. var authorized bool - // If we are in the compute committee, we accept messages from the transaction + // If we are in the executor committee, we accept messages from the transaction // scheduler committee leader. - if g.activeEpoch.computeCommittee != nil && g.activeEpoch.txnSchedulerLeaderPeerID.IsValid() { + if g.activeEpoch.executorCommittee != nil && g.activeEpoch.txnSchedulerLeaderPeerID.IsValid() { authorized = authorized || peerID.Equal(g.activeEpoch.txnSchedulerLeaderPeerID) } - // If we are in the merge committee, we accept messages from any compute committee member. + // If we are in the merge committee, we accept messages from any executor committee member. if g.activeEpoch.mergeCommittee.Role != scheduler.Invalid { - authorized = authorized || g.activeEpoch.computeCommitteesByPeer[peerID] + authorized = authorized || g.activeEpoch.executorCommitteesByPeer[peerID] } return authorized @@ -522,7 +522,7 @@ func (g *Group) publishLocked( return nil } -// PublishScheduledBatch publishes a batch to all members in the compute committee. +// PublishScheduledBatch publishes a batch to all members in the executor committee. // Returns the transaction scheduler's signature for this batch. func (g *Group) PublishScheduledBatch( spanCtx opentracing.SpanContext, @@ -538,9 +538,9 @@ func (g *Group) PublishScheduledBatch( return nil, fmt.Errorf("group: not leader of txn scheduler committee") } - cc := g.activeEpoch.computeCommittees[committeeID] - if cc == nil { - return nil, fmt.Errorf("group: invalid compute committee") + xc := g.activeEpoch.executorCommittees[committeeID] + if xc == nil { + return nil, fmt.Errorf("group: invalid executor committee") } dispatchMsg := &commitment.TxnSchedulerBatchDispatch{ @@ -557,28 +557,28 @@ func (g *Group) PublishScheduledBatch( return &signedDispatchMsg.Signature, g.publishLocked( spanCtx, - cc, + xc, &p2p.Message{ SignedTxnSchedulerBatchDispatch: signedDispatchMsg, }, ) } -// PublishComputeFinished publishes a compute commitment to all members in the merge +// PublishExecuteFinished publishes an execute commitment to all members in the merge // committee. -func (g *Group) PublishComputeFinished(spanCtx opentracing.SpanContext, c *commitment.ComputeCommitment) error { +func (g *Group) PublishExecuteFinished(spanCtx opentracing.SpanContext, c *commitment.ExecutorCommitment) error { g.RLock() defer g.RUnlock() - if g.activeEpoch == nil || g.activeEpoch.computeCommittee == nil { - return fmt.Errorf("group: not member of compute committee") + if g.activeEpoch == nil || g.activeEpoch.executorCommittee == nil { + return fmt.Errorf("group: not member of executor committee") } return g.publishLocked( spanCtx, g.activeEpoch.mergeCommittee, &p2p.Message{ - ComputeWorkerFinished: &p2p.ComputeWorkerFinished{ + ExecutorWorkerFinished: &p2p.ExecutorWorkerFinished{ Commitment: *c, }, }, diff --git a/go/worker/common/p2p/types.go b/go/worker/common/p2p/types.go index cdce90f7d67..b51b6786d36 100644 --- a/go/worker/common/p2p/types.go +++ b/go/worker/common/p2p/types.go @@ -28,15 +28,15 @@ type Message struct { Error *Error `json:"err,omitempty"` SignedTxnSchedulerBatchDispatch *commitment.SignedTxnSchedulerBatchDispatch `json:",omitempty"` - ComputeWorkerFinished *ComputeWorkerFinished `json:",omitempty"` + ExecutorWorkerFinished *ExecutorWorkerFinished `json:",omitempty"` } -// ComputeWorkerFinished is the message sent from the compute workers to +// ExecutorWorkerFinished is the message sent from the executor workers to // the merge committee after a batch has been processed and is ready to // be merged. -type ComputeWorkerFinished struct { - // Commitment is a compute worker commitment. - Commitment commitment.ComputeCommitment +type ExecutorWorkerFinished struct { + // Commitment is an executor worker commitment. + Commitment commitment.ExecutorCommitment } // Ack is an acknowledgement that a message was received. diff --git a/go/worker/compute/committee/init.go b/go/worker/compute/committee/init.go deleted file mode 100644 index 01960c00dee..00000000000 --- a/go/worker/compute/committee/init.go +++ /dev/null @@ -1,27 +0,0 @@ -package committee - -import ( - "github.com/oasislabs/oasis-core/go/common/crash" -) - -const ( - crashPointBatchReceiveAfter = "worker.compute.batch.receive.after" - crashPointBatchProcessStartAfter = "worker.compute.batch.process_start.after" - crashPointBatchAbortAfter = "worker.compute.batch.abort.after" - crashPointBatchProposeBefore = "worker.compute.batch.propose.before" - crashPointBatchProposeAfter = "worker.compute.batch.propose.after" - crashPointDiscrepancyDetectedAfter = "worker.compute.batch.discrepancy_detected.after" - crashPointRoothashReceiveAfter = "worker.compute.batch.roothash.receive.after" -) - -func init() { - crash.RegisterCrashPoints( - crashPointBatchReceiveAfter, - crashPointBatchProcessStartAfter, - crashPointBatchAbortAfter, - crashPointBatchProposeBefore, - crashPointBatchProposeAfter, - crashPointDiscrepancyDetectedAfter, - crashPointRoothashReceiveAfter, - ) -} diff --git a/go/worker/compute/init.go b/go/worker/compute/init.go index 437aa6034b0..228fd029ec2 100644 --- a/go/worker/compute/init.go +++ b/go/worker/compute/init.go @@ -1,18 +1,25 @@ package compute import ( - workerCommon "github.com/oasislabs/oasis-core/go/worker/common" - "github.com/oasislabs/oasis-core/go/worker/computeenable" - "github.com/oasislabs/oasis-core/go/worker/merge" - "github.com/oasislabs/oasis-core/go/worker/registration" + flag "github.com/spf13/pflag" + "github.com/spf13/viper" ) -// New creates a new compute worker. -func New( - dataDir string, - commonWorker *workerCommon.Worker, - mergeWorker *merge.Worker, - registration *registration.Worker, -) (*Worker, error) { - return newWorker(dataDir, computeenable.Enabled(), commonWorker, mergeWorker, registration) +const ( + // CfgWorkerEnabled enables the compute worker, tx scheduler worker, and merge worker. + CfgWorkerEnabled = "worker.compute.enabled" +) + +// Flags has the configuration flags. +var Flags = flag.NewFlagSet("", flag.ContinueOnError) + +// Enabled reads our enabled flag from viper. +func Enabled() bool { + return viper.GetBool(CfgWorkerEnabled) +} + +func init() { + Flags.Bool(CfgWorkerEnabled, false, "Enable compute worker processes") + + _ = viper.BindPFlags(Flags) } diff --git a/go/worker/computeenable/init.go b/go/worker/computeenable/init.go deleted file mode 100644 index 585921116bd..00000000000 --- a/go/worker/computeenable/init.go +++ /dev/null @@ -1,25 +0,0 @@ -package computeenable - -import ( - flag "github.com/spf13/pflag" - "github.com/spf13/viper" -) - -const ( - // CfgWorkerEnabled enables the compute worker, tx scheduler worker, and merge worker. - CfgWorkerEnabled = "worker.compute.enabled" -) - -// Flags has the configuration flags. -var Flags = flag.NewFlagSet("", flag.ContinueOnError) - -// Enabled reads our enabled flag from viper. -func Enabled() bool { - return viper.GetBool(CfgWorkerEnabled) -} - -func init() { - Flags.Bool(CfgWorkerEnabled, false, "Enable compute worker processes") - - _ = viper.BindPFlags(Flags) -} diff --git a/go/worker/executor/committee/init.go b/go/worker/executor/committee/init.go new file mode 100644 index 00000000000..8ce3f638163 --- /dev/null +++ b/go/worker/executor/committee/init.go @@ -0,0 +1,27 @@ +package committee + +import ( + "github.com/oasislabs/oasis-core/go/common/crash" +) + +const ( + crashPointBatchReceiveAfter = "worker.executor.batch.receive.after" + crashPointBatchProcessStartAfter = "worker.executor.batch.process_start.after" + crashPointBatchAbortAfter = "worker.executor.batch.abort.after" + crashPointBatchProposeBefore = "worker.executor.batch.propose.before" + crashPointBatchProposeAfter = "worker.executor.batch.propose.after" + crashPointDiscrepancyDetectedAfter = "worker.executor.batch.discrepancy_detected.after" + crashPointRoothashReceiveAfter = "worker.executor.batch.roothash.receive.after" +) + +func init() { + crash.RegisterCrashPoints( + crashPointBatchReceiveAfter, + crashPointBatchProcessStartAfter, + crashPointBatchAbortAfter, + crashPointBatchProposeBefore, + crashPointBatchProposeAfter, + crashPointDiscrepancyDetectedAfter, + crashPointRoothashReceiveAfter, + ) +} diff --git a/go/worker/compute/committee/node.go b/go/worker/executor/committee/node.go similarity index 92% rename from go/worker/compute/committee/node.go rename to go/worker/executor/committee/node.go index d9d197b58b7..1b77c2ce4d0 100644 --- a/go/worker/compute/committee/node.go +++ b/go/worker/executor/committee/node.go @@ -33,21 +33,21 @@ import ( ) var ( - errSeenNewerBlock = errors.New("compute: seen newer block") - errWorkerAborted = errors.New("compute: worker aborted batch processing") - errIncompatibleHeader = errors.New("compute: incompatible header") - errInvalidReceipt = errors.New("compute: invalid storage receipt") - errStorageFailed = errors.New("compute: failed to fetch from storage") - errIncorrectRole = errors.New("compute: incorrect role") - errIncorrectState = errors.New("compute: incorrect state") - errMsgFromNonTxnSched = errors.New("compute: received txn scheduler dispatch msg from non-txn scheduler") + errSeenNewerBlock = errors.New("executor: seen newer block") + errWorkerAborted = errors.New("executor: worker aborted batch processing") + errIncompatibleHeader = errors.New("executor: incompatible header") + errInvalidReceipt = errors.New("executor: invalid storage receipt") + errStorageFailed = errors.New("executor: failed to fetch from storage") + errIncorrectRole = errors.New("executor: incorrect role") + errIncorrectState = errors.New("executor: incorrect state") + errMsgFromNonTxnSched = errors.New("executor: received txn scheduler dispatch msg from non-txn scheduler") ) var ( discrepancyDetectedCount = prometheus.NewCounterVec( prometheus.CounterOpts{ - Name: "oasis_worker_compute_discrepancy_detected_count", - Help: "Number of detected compute discrepancies", + Name: "oasis_worker_execution_discrepancy_detected_count", + Help: "Number of detected execute discrepancies", }, []string{"runtime"}, ) @@ -306,7 +306,7 @@ func (n *Node) HandleBatchFromTransactionSchedulerLocked( inputStorageSigs []signature.Signature, ) { epoch := n.commonNode.Group.GetEpochSnapshot() - expectedID := epoch.GetComputeCommitteeID() + expectedID := epoch.GetExecutorCommitteeID() if !expectedID.Equal(&committeeID) { return } @@ -353,7 +353,7 @@ func (n *Node) transitionLocked(state NodeState) { // HandleEpochTransitionLocked implements NodeHooks. // Guarded by n.commonNode.CrossNode. func (n *Node) HandleEpochTransitionLocked(epoch *committee.EpochSnapshot) { - if epoch.IsComputeMember() { + if epoch.IsExecutorMember() { n.transitionLocked(StateWaitingForBatch{}) } else { n.transitionLocked(StateNotReady{}) @@ -431,10 +431,10 @@ func (n *Node) maybeStartProcessingBatchLocked( epoch := n.commonNode.Group.GetEpochSnapshot() switch { - case epoch.IsComputeWorker(): + case epoch.IsExecutorWorker(): // Worker, start processing immediately. n.startProcessingBatchLocked(ioRoot, batch, batchSpanCtx, txnSchedSig, inputStorageSigs) - case epoch.IsComputeBackupWorker(): + case epoch.IsExecutorBackupWorker(): // Backup worker, wait for discrepancy event. state, ok := n.state.(StateWaitingForBatch) if ok && state.pendingEvent != nil { @@ -452,8 +452,8 @@ func (n *Node) maybeStartProcessingBatchLocked( inputStorageSigs: inputStorageSigs, }) default: - // Currently not a member of a compute committee, log. - n.logger.Warn("not a compute committee member, ignoring batch") + // Currently not a member of an executor committee, log. + n.logger.Warn("not an executor committee member, ignoring batch") } } @@ -491,7 +491,7 @@ func (n *Node) startProcessingBatchLocked( workerHost := n.GetWorkerHostLocked() if workerHost == nil { - // This should not happen as we only register to be a compute worker + // This should not happen as we only register to be an executor worker // once the worker host is ready. n.logger.Error("received a batch while worker host is not yet initialized") n.abortBatchLocked(errWorkerAborted) @@ -598,7 +598,7 @@ func (n *Node) proposeBatchLocked(batch *protocol.ComputedBatch) { // Generate proposed compute results. proposedResults := &commitment.ComputeBody{ - CommitteeID: epoch.GetComputeCommitteeID(), + CommitteeID: epoch.GetExecutorCommitteeID(), Header: batch.Header, RakSig: batch.RakSig, TxnSchedSig: state.txnSchedSig, @@ -687,7 +687,7 @@ func (n *Node) proposeBatchLocked(batch *protocol.ComputedBatch) { } // Commit. - commit, err := commitment.SignComputeCommitment(n.commonNode.Identity.NodeSigner, proposedResults) + commit, err := commitment.SignExecutorCommitment(n.commonNode.Identity.NodeSigner, proposedResults) if err != nil { n.logger.Error("failed to sign commitment", "err", err, @@ -697,10 +697,10 @@ func (n *Node) proposeBatchLocked(batch *protocol.ComputedBatch) { } // Publish commitment to merge committee. - spanPublish := opentracing.StartSpan("PublishComputeFinished(commitment)", + spanPublish := opentracing.StartSpan("PublishExecuteFinished(commitment)", opentracing.ChildOf(state.batchSpanCtx), ) - err = n.commonNode.Group.PublishComputeFinished(state.batchSpanCtx, commit) + err = n.commonNode.Group.PublishExecuteFinished(state.batchSpanCtx, commit) if err != nil { spanPublish.Finish() n.logger.Error("failed to publish results to committee", @@ -724,7 +724,7 @@ func (n *Node) proposeBatchLocked(batch *protocol.ComputedBatch) { if n.mergeNode == nil { n.logger.Error("scheduler says we are a merge worker, but we are not") } else { - n.mergeNode.HandleResultsFromComputeWorkerLocked(state.batchSpanCtx, commit) + n.mergeNode.HandleResultsFromExecutorWorkerLocked(state.batchSpanCtx, commit) } } @@ -734,7 +734,7 @@ func (n *Node) proposeBatchLocked(batch *protocol.ComputedBatch) { // HandleNewEventLocked implements NodeHooks. // Guarded by n.commonNode.CrossNode. func (n *Node) HandleNewEventLocked(ev *roothash.Event) { - dis := ev.ComputeDiscrepancyDetected + dis := ev.ExecutionDiscrepancyDetected if dis == nil { // Ignore other events. return @@ -742,7 +742,7 @@ func (n *Node) HandleNewEventLocked(ev *roothash.Event) { // Check if the discrepancy occurred in our committee. epoch := n.commonNode.Group.GetEpochSnapshot() - expectedID := epoch.GetComputeCommitteeID() + expectedID := epoch.GetExecutorCommitteeID() if !expectedID.Equal(&dis.CommitteeID) { n.logger.Debug("ignoring discrepancy event for a different committee", "expected_committee", expectedID, @@ -751,7 +751,7 @@ func (n *Node) HandleNewEventLocked(ev *roothash.Event) { return } - n.logger.Warn("compute discrepancy detected", + n.logger.Warn("execution discrepancy detected", "committee_id", dis.CommitteeID, ) @@ -759,7 +759,7 @@ func (n *Node) HandleNewEventLocked(ev *roothash.Event) { discrepancyDetectedCount.With(n.getMetricLabels()).Inc() - if !n.commonNode.Group.GetEpochSnapshot().IsComputeBackupWorker() { + if !n.commonNode.Group.GetEpochSnapshot().IsExecutorBackupWorker() { return } @@ -802,16 +802,16 @@ func (n *Node) handleExternalBatchLocked( epoch := n.commonNode.Group.GetEpochSnapshot() - // We can only receive external batches if we are a compute member. - if !epoch.IsComputeMember() { + // We can only receive external batches if we are an executor member. + if !epoch.IsExecutorMember() { n.logger.Error("got external batch while in incorrect role") return errIncorrectRole } // We only accept batches for our own committee. - expectedID := epoch.GetComputeCommitteeID() + expectedID := epoch.GetExecutorCommitteeID() if !expectedID.Equal(&committeeID) { - n.logger.Error("got external batch for a different compute committee", + n.logger.Error("got external batch for a different executor committee", "expected_committee", expectedID, "committee", committeeID, ) @@ -940,7 +940,7 @@ func NewNode( state: StateNotReady{}, stateTransitions: pubsub.NewBroker(false), reselect: make(chan struct{}, 1), - logger: logging.GetLogger("worker/compute/committee").With("runtime_id", commonNode.Runtime.ID()), + logger: logging.GetLogger("worker/executor/committee").With("runtime_id", commonNode.Runtime.ID()), } return n, nil diff --git a/go/worker/compute/committee/state.go b/go/worker/executor/committee/state.go similarity index 98% rename from go/worker/compute/committee/state.go rename to go/worker/executor/committee/state.go index 2ce15000cd3..5815898b301 100644 --- a/go/worker/compute/committee/state.go +++ b/go/worker/executor/committee/state.go @@ -114,9 +114,9 @@ func (s StateNotReady) String() string { // StateWaitingForBatch is the waiting for batch state. type StateWaitingForBatch struct { - // Pending compute discrepancy detected event in case the node is a + // Pending execute discrepancy detected event in case the node is a // backup worker and the event was received before the batch. - pendingEvent *roothash.ComputeDiscrepancyDetectedEvent + pendingEvent *roothash.ExecutionDiscrepancyDetectedEvent } // Name returns the name of the state. diff --git a/go/worker/executor/init.go b/go/worker/executor/init.go new file mode 100644 index 00000000000..17d39e0e8df --- /dev/null +++ b/go/worker/executor/init.go @@ -0,0 +1,18 @@ +package executor + +import ( + workerCommon "github.com/oasislabs/oasis-core/go/worker/common" + "github.com/oasislabs/oasis-core/go/worker/compute" + "github.com/oasislabs/oasis-core/go/worker/merge" + "github.com/oasislabs/oasis-core/go/worker/registration" +) + +// New creates a new executor worker. +func New( + dataDir string, + commonWorker *workerCommon.Worker, + mergeWorker *merge.Worker, + registration *registration.Worker, +) (*Worker, error) { + return newWorker(dataDir, compute.Enabled(), commonWorker, mergeWorker, registration) +} diff --git a/go/worker/compute/tests/tester.go b/go/worker/executor/tests/tester.go similarity index 92% rename from go/worker/compute/tests/tester.go rename to go/worker/executor/tests/tester.go index 90862795336..f6ac5891785 100644 --- a/go/worker/compute/tests/tester.go +++ b/go/worker/executor/tests/tester.go @@ -8,8 +8,8 @@ import ( "github.com/oasislabs/oasis-core/go/common" epochtime "github.com/oasislabs/oasis-core/go/epochtime/api" epochtimeTests "github.com/oasislabs/oasis-core/go/epochtime/tests" - "github.com/oasislabs/oasis-core/go/worker/compute" - "github.com/oasislabs/oasis-core/go/worker/compute/committee" + "github.com/oasislabs/oasis-core/go/worker/executor" + "github.com/oasislabs/oasis-core/go/worker/executor/committee" ) const recvTimeout = 5 * time.Second @@ -21,7 +21,7 @@ const recvTimeout = 5 * time.Second // after the node was registered. func WorkerImplementationTests( t *testing.T, - worker *compute.Worker, + worker *executor.Worker, runtimeID common.Namespace, rtNode *committee.Node, epochtime epochtime.SetableBackend, diff --git a/go/worker/compute/worker.go b/go/worker/executor/worker.go similarity index 92% rename from go/worker/compute/worker.go rename to go/worker/executor/worker.go index dc0181119b6..e4342f683b9 100644 --- a/go/worker/compute/worker.go +++ b/go/worker/executor/worker.go @@ -1,4 +1,4 @@ -package compute +package executor import ( "context" @@ -8,12 +8,12 @@ import ( "github.com/oasislabs/oasis-core/go/common/node" workerCommon "github.com/oasislabs/oasis-core/go/worker/common" committeeCommon "github.com/oasislabs/oasis-core/go/worker/common/committee" - "github.com/oasislabs/oasis-core/go/worker/compute/committee" + "github.com/oasislabs/oasis-core/go/worker/executor/committee" "github.com/oasislabs/oasis-core/go/worker/merge" "github.com/oasislabs/oasis-core/go/worker/registration" ) -// Worker is a compute worker handling many runtimes. +// Worker is an executor worker handling many runtimes. type Worker struct { *workerCommon.RuntimeHostWorker @@ -35,13 +35,13 @@ type Worker struct { // Name returns the service name. func (w *Worker) Name() string { - return "compute worker" + return "executor worker" } // Start starts the service. func (w *Worker) Start() error { if !w.enabled { - w.logger.Info("not starting compute worker as it is disabled") + w.logger.Info("not starting executor worker as it is disabled") // In case the worker is not enabled, close the init channel immediately. close(w.initCh) @@ -122,7 +122,7 @@ func (w *Worker) Cleanup() { } } -// Initialized returns a channel that will be closed when the compute worker +// Initialized returns a channel that will be closed when the executor worker // is initialized and ready to service requests. func (w *Worker) Initialized() <-chan struct{} { return w.initCh @@ -186,12 +186,12 @@ func newWorker( cancelCtx: cancelCtx, quitCh: make(chan struct{}), initCh: make(chan struct{}), - logger: logging.GetLogger("worker/compute"), + logger: logging.GetLogger("worker/executor"), } if enabled { if !w.commonWorker.Enabled() { - panic("common worker should have been enabled for compute worker") + panic("common worker should have been enabled for executor worker") } // Create the runtime host worker. diff --git a/go/worker/keymanager/policy.go b/go/worker/keymanager/policy.go index f6279fafdfb..800bd86d848 100644 --- a/go/worker/keymanager/policy.go +++ b/go/worker/keymanager/policy.go @@ -5,12 +5,12 @@ import ( "github.com/oasislabs/oasis-core/go/worker/common/committee" ) -// Only members of the current compute committee and other key manager nodes +// Only members of the current executor committee and other key manager nodes // can make gRPC calls to the key manager. // Note that everyone can make `get_public_key` calls, as this is handled // separately (in mustAllowAccess() in worker.go). var ( - computeCommitteePolicy = &committee.AccessPolicy{ + executorCommitteePolicy = &committee.AccessPolicy{ Actions: []accessctl.Action{ "CallEnclave", }, diff --git a/go/worker/keymanager/worker.go b/go/worker/keymanager/worker.go index b5aaab8db06..d1a8d9c1b34 100644 --- a/go/worker/keymanager/worker.go +++ b/go/worker/keymanager/worker.go @@ -50,7 +50,7 @@ var ( // The key manager worker. // // It behaves differently from other workers as the key manager has its -// own runtime. It needs to keep track of compute committees for other +// own runtime. It needs to keep track of executor committees for other // runtimes in order to update the access control lists. type Worker struct { sync.RWMutex @@ -570,10 +570,10 @@ func (crw *clientRuntimeWatcher) HandleEpochTransitionLocked(snapshot *committee // Update key manager access control policy on epoch transitions. policy := accessctl.NewPolicy() - // Apply rules to current compute committee members. - for _, cc := range snapshot.GetComputeCommittees() { - if cc != nil { - computeCommitteePolicy.AddRulesForCommittee(&policy, cc) + // Apply rules to current executor committee members. + for _, xc := range snapshot.GetExecutorCommittees() { + if xc != nil { + executorCommitteePolicy.AddRulesForCommittee(&policy, xc) } } diff --git a/go/worker/merge/committee/node.go b/go/worker/merge/committee/node.go index 1e5c5d2971a..2c026214064 100644 --- a/go/worker/merge/committee/node.go +++ b/go/worker/merge/committee/node.go @@ -149,11 +149,11 @@ func (n *Node) getMetricLabels() prometheus.Labels { // HandlePeerMessage implements NodeHooks. func (n *Node) HandlePeerMessage(ctx context.Context, message *p2p.Message) (bool, error) { - if message.ComputeWorkerFinished != nil { + if message.ExecutorWorkerFinished != nil { n.commonNode.CrossNode.Lock() defer n.commonNode.CrossNode.Unlock() - m := message.ComputeWorkerFinished + m := message.ExecutorWorkerFinished err := n.handleResultsLocked(ctx, &m.Commitment) if err != nil { return false, err @@ -204,7 +204,7 @@ func (n *Node) newStateWaitingForResultsLocked(epoch *committee.EpochSnapshot) S Committees: make(map[hash.Hash]*commitment.Pool), } - for cID, ci := range epoch.GetComputeCommittees() { + for cID, ci := range epoch.GetExecutorCommittees() { nodeInfo := make(map[signature.PublicKey]commitment.NodeInfo, len(ci.Nodes)) for idx, nd := range ci.Nodes { var nodeRuntime *node.Runtime @@ -293,32 +293,32 @@ func (n *Node) HandleNewBlockLocked(blk *block.Block) { } } -// HandleResultsFromComputeWorkerLocked processes results from a compute worker. +// HandleResultsFromExecutorWorkerLocked processes results from an executor worker. // Guarded by n.commonNode.CrossNode. -func (n *Node) HandleResultsFromComputeWorkerLocked(spanCtx opentracing.SpanContext, commit *commitment.ComputeCommitment) { +func (n *Node) HandleResultsFromExecutorWorkerLocked(spanCtx opentracing.SpanContext, commit *commitment.ExecutorCommitment) { // TODO: Context. if err := n.handleResultsLocked(context.TODO(), commit); err != nil { - n.logger.Warn("failed to handle results from local compute worker", + n.logger.Warn("failed to handle results from local executor worker", "err", err, ) } } // Guarded by n.commonNode.CrossNode. -func (n *Node) handleResultsLocked(ctx context.Context, commit *commitment.ComputeCommitment) error { +func (n *Node) handleResultsLocked(ctx context.Context, commit *commitment.ExecutorCommitment) error { // If we are not waiting for results, don't do anything. state, ok := n.state.(StateWaitingForResults) if !ok { return errIncorrectState } - n.logger.Debug("received new compute commitment", + n.logger.Debug("received new executor commitment", "node_id", commit.Signature.PublicKey, ) epoch := n.commonNode.Group.GetEpochSnapshot() - sp, err := state.pool.AddComputeCommitment(n.commonNode.CurrentBlock, epoch, commit) + sp, err := state.pool.AddExecutorCommitment(n.commonNode.CurrentBlock, epoch, commit) if err != nil { return err } @@ -356,12 +356,12 @@ func (n *Node) tryFinalizeResultsLocked(pool *commitment.Pool, didTimeout bool) // // We have two kinds of timeouts -- the first is based on local monotonic time and // starts counting as soon as the first commitment for a committee is received. It - // is used to trigger submission of compute commitments to the consensus layer for + // is used to trigger submission of executor commitments to the consensus layer for // proof of timeout. The consensus layer starts its own timeout and this is the // second timeout. // // The timeout is only considered authoritative once confirmed by consensus. In - // case of a local-only timeout, we will submit what compute commitments we have + // case of a local-only timeout, we will submit what executor commitments we have // to consensus and not change the internal Discrepancy flag. cid := pool.GetCommitteeID() logger := n.logger.With("committee_id", cid) @@ -373,7 +373,7 @@ func (n *Node) tryFinalizeResultsLocked(pool *commitment.Pool, didTimeout bool) ) return } - runtimeTimeout := rt.Compute.RoundTimeout + runtimeTimeout := rt.Executor.RoundTimeout commit, err := pool.TryFinalize(now, runtimeTimeout, didTimeout, consensusTimeout) switch err { @@ -396,19 +396,19 @@ func (n *Node) tryFinalizeResultsLocked(pool *commitment.Pool, didTimeout bool) fallthrough case commitment.ErrInsufficientVotes: // Discrepancy resolution failed. - logger.Warn("insufficient votes, performing compute commit") + logger.Warn("insufficient votes, performing executor commit") - // Submit compute commit to BFT. - ccs := pool.GetComputeCommitments() + // Submit executor commit to BFT. + ccs := pool.GetExecutorCommitments() go func() { - tx := roothash.NewComputeCommitTx(0, nil, n.commonNode.Runtime.ID(), ccs) + tx := roothash.NewExecutorCommitTx(0, nil, n.commonNode.Runtime.ID(), ccs) ccErr := consensus.SignAndSubmitTx(n.roundCtx, n.commonNode.Consensus, n.commonNode.Identity.NodeSigner, tx) switch ccErr { case nil: - logger.Info("compute commit finalized") + logger.Info("executor commit finalized") default: - logger.Warn("failed to submit compute commit", + logger.Warn("failed to submit executor commit", "err", ccErr, ) } @@ -431,7 +431,7 @@ func (n *Node) tryFinalizeResultsLocked(pool *commitment.Pool, didTimeout bool) n.logger.Info("have valid commitments from all committees, merging") - commitments := state.pool.GetComputeCommitments() + commitments := state.pool.GetExecutorCommitments() if epoch.IsMergeBackupWorker() && state.pendingEvent == nil { // Backup workers only perform merge after receiving a discrepancy event. @@ -444,7 +444,7 @@ func (n *Node) tryFinalizeResultsLocked(pool *commitment.Pool, didTimeout bool) } // Guarded by n.commonNode.CrossNode. -func (n *Node) startMergeLocked(commitments []commitment.ComputeCommitment, results []*commitment.ComputeResultsHeader) { +func (n *Node) startMergeLocked(commitments []commitment.ExecutorCommitment, results []*commitment.ComputeResultsHeader) { doneCh := make(chan *commitment.MergeBody, 1) ctx, cancel := context.WithCancel(n.roundCtx) @@ -558,8 +558,8 @@ func (n *Node) startMergeLocked(commitments []commitment.ComputeCommitment, resu blk.Header.StorageSignatures = signatures doneCh <- &commitment.MergeBody{ - ComputeCommits: commitments, - Header: blk.Header, + ExecutorCommits: commitments, + Header: blk.Header, } }() } @@ -637,8 +637,8 @@ func (n *Node) HandleNewEventLocked(ev *roothash.Event) { switch { case ev.MergeDiscrepancyDetected != nil: n.handleMergeDiscrepancyLocked(ev.MergeDiscrepancyDetected) - case ev.ComputeDiscrepancyDetected != nil: - n.handleComputeDiscrepancyLocked(ev.ComputeDiscrepancyDetected) + case ev.ExecutionDiscrepancyDetected != nil: + n.handleExecutorDiscrepancyLocked(ev.ExecutionDiscrepancyDetected) default: // Ignore other events. } @@ -677,8 +677,8 @@ func (n *Node) handleMergeDiscrepancyLocked(ev *roothash.MergeDiscrepancyDetecte } // Guarded by n.commonNode.CrossNode. -func (n *Node) handleComputeDiscrepancyLocked(ev *roothash.ComputeDiscrepancyDetectedEvent) { - n.logger.Warn("compute discrepancy detected", +func (n *Node) handleExecutorDiscrepancyLocked(ev *roothash.ExecutionDiscrepancyDetectedEvent) { + n.logger.Warn("execution discrepancy detected", "committee_id", ev.CommitteeID, "timeout", ev.Timeout, ) @@ -688,7 +688,7 @@ func (n *Node) handleComputeDiscrepancyLocked(ev *roothash.ComputeDiscrepancyDet // If the discrepancy was due to a timeout, record it. pool := s.pool.Committees[ev.CommitteeID] if pool == nil { - n.logger.Error("compute discrepancy event for unknown committee", + n.logger.Error("execution discrepancy event for unknown committee", "committee_id", ev.CommitteeID, ) return diff --git a/go/worker/merge/committee/state.go b/go/worker/merge/committee/state.go index dfc906b0b8d..bd4fde6a841 100644 --- a/go/worker/merge/committee/state.go +++ b/go/worker/merge/committee/state.go @@ -115,7 +115,7 @@ func (s StateWaitingForResults) String() string { // StateWaitingForEvent is the waiting for event state. type StateWaitingForEvent struct { - commitments []commitment.ComputeCommitment + commitments []commitment.ExecutorCommitment results []*commitment.ComputeResultsHeader } diff --git a/go/worker/merge/init.go b/go/worker/merge/init.go index 35b3c37882b..0c81ce8cb4b 100644 --- a/go/worker/merge/init.go +++ b/go/worker/merge/init.go @@ -2,11 +2,11 @@ package merge import ( workerCommon "github.com/oasislabs/oasis-core/go/worker/common" - "github.com/oasislabs/oasis-core/go/worker/computeenable" + "github.com/oasislabs/oasis-core/go/worker/compute" "github.com/oasislabs/oasis-core/go/worker/registration" ) // New creates a new worker. func New(commonWorker *workerCommon.Worker, registration *registration.Worker) (*Worker, error) { - return newWorker(computeenable.Enabled(), commonWorker, registration) + return newWorker(compute.Enabled(), commonWorker, registration) } diff --git a/go/worker/storage/committee/node.go b/go/worker/storage/committee/node.go index e4a4d2d8562..92c1fe7bfc7 100644 --- a/go/worker/storage/committee/node.go +++ b/go/worker/storage/committee/node.go @@ -273,9 +273,9 @@ func (n *Node) HandlePeerMessage(context.Context, *p2p.Message) (bool, error) { func (n *Node) HandleEpochTransitionLocked(snapshot *committee.EpochSnapshot) { // Create new storage gRPC access policy for the current runtime. policy := accessctl.NewPolicy() - for _, cc := range snapshot.GetComputeCommittees() { - if cc != nil { - computeCommitteePolicy.AddRulesForCommittee(&policy, cc) + for _, xc := range snapshot.GetExecutorCommittees() { + if xc != nil { + executorCommitteePolicy.AddRulesForCommittee(&policy, xc) } } if tsc := snapshot.GetTransactionSchedulerCommittee(); tsc != nil { diff --git a/go/worker/storage/committee/policy.go b/go/worker/storage/committee/policy.go index 9e96747974f..c3b12e16f9f 100644 --- a/go/worker/storage/committee/policy.go +++ b/go/worker/storage/committee/policy.go @@ -8,7 +8,7 @@ import ( // Define storage access policies for all the relevant committees and node // groups. var ( - computeCommitteePolicy = &committee.AccessPolicy{ + executorCommitteePolicy = &committee.AccessPolicy{ Actions: []accessctl.Action{ "Apply", "ApplyBatch", diff --git a/go/worker/txnscheduler/algorithm/api/api.go b/go/worker/txnscheduler/algorithm/api/api.go index cdec4b6ff2a..ddbc00baaa3 100644 --- a/go/worker/txnscheduler/algorithm/api/api.go +++ b/go/worker/txnscheduler/algorithm/api/api.go @@ -41,8 +41,8 @@ type Algorithm interface { Clear() } -// TransactionDispatcher dispatches transactions to a scheduled compute committee. +// TransactionDispatcher dispatches transactions to a scheduled executor committee. type TransactionDispatcher interface { - // Dispatch attempts to dispatch a batch to a compute committee. + // Dispatch attempts to dispatch a batch to a executor committee. Dispatch(committeeID hash.Hash, batch transaction.RawBatch) error } diff --git a/go/worker/txnscheduler/algorithm/batching/batching.go b/go/worker/txnscheduler/algorithm/batching/batching.go index 16b77d1d232..2e1affa3197 100644 --- a/go/worker/txnscheduler/algorithm/batching/batching.go +++ b/go/worker/txnscheduler/algorithm/batching/batching.go @@ -44,7 +44,7 @@ type config struct { } func (s *batchingState) scheduleBatch(force bool) error { - // The simple batching algorithm only supports a single compute committee. Use + // The simple batching algorithm only supports a single executor committee. Use // with multiple committees will currently cause the rounds to fail as all other // committees will be idle. var committeeID *hash.Hash @@ -58,7 +58,7 @@ func (s *batchingState) scheduleBatch(force bool) error { return } - for id := range s.epoch.GetComputeCommittees() { + for id := range s.epoch.GetExecutorCommittees() { committeeID = &id break } diff --git a/go/worker/txnscheduler/api/api.go b/go/worker/txnscheduler/api/api.go index 19f1f9f2356..49ef47bdcf0 100644 --- a/go/worker/txnscheduler/api/api.go +++ b/go/worker/txnscheduler/api/api.go @@ -31,8 +31,8 @@ type TransactionScheduler interface { SubmitTx(context.Context, *SubmitTxRequest) (*SubmitTxResponse, error) // IsTransactionQueued checks if the given transaction is present in the - // transaction scheduler queue and is waiting to be dispatched to a - // compute committee. + // transaction scheduler queue and is waiting to be dispatched to an + // executor committee. IsTransactionQueued(context.Context, *IsTransactionQueuedRequest) (*IsTransactionQueuedResponse, error) } diff --git a/go/worker/txnscheduler/committee/node.go b/go/worker/txnscheduler/committee/node.go index a434939887f..4ae5fb8d705 100644 --- a/go/worker/txnscheduler/committee/node.go +++ b/go/worker/txnscheduler/committee/node.go @@ -22,7 +22,7 @@ import ( storage "github.com/oasislabs/oasis-core/go/storage/api" "github.com/oasislabs/oasis-core/go/worker/common/committee" "github.com/oasislabs/oasis-core/go/worker/common/p2p" - computeCommittee "github.com/oasislabs/oasis-core/go/worker/compute/committee" + executorCommittee "github.com/oasislabs/oasis-core/go/worker/executor/committee" txnSchedulerAlgorithm "github.com/oasislabs/oasis-core/go/worker/txnscheduler/algorithm" txnSchedulerAlgorithmApi "github.com/oasislabs/oasis-core/go/worker/txnscheduler/algorithm/api" "github.com/oasislabs/oasis-core/go/worker/txnscheduler/api" @@ -50,8 +50,8 @@ var ( // Node is a committee node. type Node struct { - commonNode *committee.Node - computeNode *computeCommittee.Node + commonNode *committee.Node + executorNode *executorCommittee.Node // The algorithm mutex is here to protect the initialization // of the algorithm variable. After initialization the variable @@ -153,7 +153,7 @@ func (n *Node) QueueCall(ctx context.Context, call []byte) error { // IsTransactionQueued checks if the given transaction is present in the // transaction scheduler queue and is waiting to be dispatched to a -// compute committee. +// executor committee. func (n *Node) IsTransactionQueued(ctx context.Context, id hash.Hash) (bool, error) { // Check if we are a leader. Note that we may be in the middle of a // transition, but this shouldn't matter as the client will retry. @@ -248,7 +248,7 @@ func (n *Node) HandleNewBlockLocked(blk *block.Block) { func (n *Node) HandleNewEventLocked(ev *roothash.Event) { } -// Dispatch dispatches a batch to the compute committee. +// Dispatch dispatches a batch to the executor committee. func (n *Node) Dispatch(committeeID hash.Hash, batch transaction.RawBatch) error { n.commonNode.CrossNode.Lock() defer n.commonNode.CrossNode.Unlock() @@ -275,7 +275,7 @@ func (n *Node) Dispatch(committeeID hash.Hash, batch transaction.RawBatch) error batchSpanCtx := batchSpan.Context() // Generate the initial I/O root containing only the inputs (outputs and - // tags will be added later by the compute nodes). + // tags will be added later by the executor nodes). emptyRoot := storage.Root{ Namespace: lastHeader.Namespace, Round: lastHeader.Round + 1, @@ -353,11 +353,11 @@ func (n *Node) Dispatch(committeeID hash.Hash, batch transaction.RawBatch) error n.transitionLocked(StateWaitingForFinalize{}) - if epoch.IsComputeMember() { - if n.computeNode == nil { - n.logger.Error("scheduler says we are a compute worker, but we are not") + if epoch.IsExecutorMember() { + if n.executorNode == nil { + n.logger.Error("scheduler says we are a executor worker, but we are not") } else { - n.computeNode.HandleBatchFromTransactionSchedulerLocked( + n.executorNode.HandleBatchFromTransactionSchedulerLocked( batchSpanCtx, committeeID, ioRoot, @@ -436,7 +436,7 @@ func (n *Node) worker() { func NewNode( commonNode *committee.Node, - computeNode *computeCommittee.Node, + executorNode *executorCommittee.Node, ) (*Node, error) { metricsOnce.Do(func() { prometheus.MustRegister(nodeCollectors...) @@ -446,7 +446,7 @@ func NewNode( n := &Node{ commonNode: commonNode, - computeNode: computeNode, + executorNode: executorNode, ctx: ctx, cancelCtx: cancel, stopCh: make(chan struct{}), diff --git a/go/worker/txnscheduler/init.go b/go/worker/txnscheduler/init.go index b06dde7427a..44fc8a3b6a7 100644 --- a/go/worker/txnscheduler/init.go +++ b/go/worker/txnscheduler/init.go @@ -5,8 +5,7 @@ import ( "github.com/spf13/viper" workerCommon "github.com/oasislabs/oasis-core/go/worker/common" - "github.com/oasislabs/oasis-core/go/worker/compute" - "github.com/oasislabs/oasis-core/go/worker/computeenable" + "github.com/oasislabs/oasis-core/go/worker/executor" "github.com/oasislabs/oasis-core/go/worker/registration" txnSchedulerAlgorithm "github.com/oasislabs/oasis-core/go/worker/txnscheduler/algorithm" ) @@ -17,10 +16,10 @@ var Flags = flag.NewFlagSet("", flag.ContinueOnError) // New creates a new worker. func New( commonWorker *workerCommon.Worker, - compute *compute.Worker, + executor *executor.Worker, registration *registration.Worker, ) (*Worker, error) { - return newWorker(computeenable.Enabled(), commonWorker, compute, registration) + return newWorker(executor.Enabled(), commonWorker, executor, registration) } func init() { diff --git a/go/worker/txnscheduler/service.go b/go/worker/txnscheduler/service.go index ac4187cff89..5654c445afb 100644 --- a/go/worker/txnscheduler/service.go +++ b/go/worker/txnscheduler/service.go @@ -23,7 +23,7 @@ func (w *Worker) SubmitTx(ctx context.Context, rq *api.SubmitTxRequest) (*api.Su // IsTransactionQueued checks if the given transaction is present in the // transaction scheduler queue and is waiting to be dispatched to a -// compute committee. +// executor committee. func (w *Worker) IsTransactionQueued(ctx context.Context, rq *api.IsTransactionQueuedRequest) (*api.IsTransactionQueuedResponse, error) { runtime, ok := w.runtimes[rq.RuntimeID] if !ok { diff --git a/go/worker/txnscheduler/worker.go b/go/worker/txnscheduler/worker.go index 21e0000c7d9..805b4027840 100644 --- a/go/worker/txnscheduler/worker.go +++ b/go/worker/txnscheduler/worker.go @@ -5,7 +5,7 @@ import ( "github.com/oasislabs/oasis-core/go/common/logging" workerCommon "github.com/oasislabs/oasis-core/go/worker/common" committeeCommon "github.com/oasislabs/oasis-core/go/worker/common/committee" - "github.com/oasislabs/oasis-core/go/worker/compute" + "github.com/oasislabs/oasis-core/go/worker/executor" "github.com/oasislabs/oasis-core/go/worker/registration" "github.com/oasislabs/oasis-core/go/worker/txnscheduler/api" "github.com/oasislabs/oasis-core/go/worker/txnscheduler/committee" @@ -17,7 +17,7 @@ type Worker struct { commonWorker *workerCommon.Worker registration *registration.Worker - compute *compute.Worker + executor *executor.Worker runtimes map[common.Namespace]*committee.Node @@ -136,9 +136,9 @@ func (w *Worker) registerRuntime(commonNode *committeeCommon.Node) error { ) // Get other nodes from this runtime. - computeNode := w.compute.GetRuntime(id) + executorNode := w.executor.GetRuntime(id) - node, err := committee.NewNode(commonNode, computeNode) + node, err := committee.NewNode(commonNode, executorNode) if err != nil { return err } @@ -156,14 +156,14 @@ func (w *Worker) registerRuntime(commonNode *committeeCommon.Node) error { func newWorker( enabled bool, commonWorker *workerCommon.Worker, - compute *compute.Worker, + executor *executor.Worker, registration *registration.Worker, ) (*Worker, error) { w := &Worker{ enabled: enabled, commonWorker: commonWorker, registration: registration, - compute: compute, + executor: executor, runtimes: make(map[common.Namespace]*committee.Node), quitCh: make(chan struct{}), initCh: make(chan struct{}),