diff --git a/.changelog/2504.breaking.md b/.changelog/2504.breaking.md new file mode 100644 index 00000000000..608e6687ac5 --- /dev/null +++ b/.changelog/2504.breaking.md @@ -0,0 +1,9 @@ +Charge gas for runtime transactions and suspend runtimes which do not pay periodic maintenance fees. + +This introduces gas fees for submitting roothash commitments from runtime nodes. Since periodic +maintenance work must be performed on each epoch transition (e.g., electing runtime committees), +fees for that maintenance are paid by any nodes that register to perform work for a specific +runtime. Fees are pre-paid for the number of epochs a node registers for. + +If the maintenance fees are not paid, the runtime gets suspended (so periodic work is not needed) +and must be resumed by registering nodes. diff --git a/go/Makefile b/go/Makefile index e4c2efb2bbb..ba1a7f40335 100644 --- a/go/Makefile +++ b/go/Makefile @@ -52,7 +52,7 @@ fmt: # Lint. lint: @$(ECHO) "$(CYAN)*** Running Go linters...$(OFF)" - @env -u GOPATH golangci-lint run + @env -u GOPATH golangci-lint run --deadline 2m # Test. test: diff --git a/go/common/crypto/signature/signature.go b/go/common/crypto/signature/signature.go index 360ee2f73c0..fb7c3c34dcb 100644 --- a/go/common/crypto/signature/signature.go +++ b/go/common/crypto/signature/signature.go @@ -222,6 +222,11 @@ func (r RawSignature) String() string { return string(data) } +// Equal compares vs another public key for equality. +func (r RawSignature) Equal(cmp RawSignature) bool { + return bytes.Equal(r[:], cmp[:]) +} + // MarshalBinary encodes a signature into binary form. func (r RawSignature) MarshalBinary() (data []byte, err error) { data = append([]byte{}, r[:]...) @@ -279,6 +284,17 @@ type Signature struct { Signature RawSignature `json:"signature"` } +// Equal compares vs another signature for equality. +func (s *Signature) Equal(cmp *Signature) bool { + if !s.PublicKey.Equal(cmp.PublicKey) { + return false + } + if !s.Signature.Equal(cmp.Signature) { + return false + } + return true +} + // Sign generates a signature with the private key over the context and // message. func Sign(signer Signer, context Context, message []byte) (*Signature, error) { diff --git a/go/consensus/tendermint/abci/context.go b/go/consensus/tendermint/abci/context.go index d7c7ecc410e..d9f6ff42b97 100644 --- a/go/consensus/tendermint/abci/context.go +++ b/go/consensus/tendermint/abci/context.go @@ -218,7 +218,9 @@ func (sc *StateCheckpoint) Rollback() { if sc.ctx == nil { return } - sc.ctx.State().ImmutableTree = &sc.ImmutableTree + st := sc.ctx.State() + st.Rollback() + st.ImmutableTree = &sc.ImmutableTree } // BlockContextKey is an interface for a block context key. diff --git a/go/consensus/tendermint/apps/beacon/beacon.go b/go/consensus/tendermint/apps/beacon/beacon.go index e5e05cc9c50..ec59822406d 100644 --- a/go/consensus/tendermint/apps/beacon/beacon.go +++ b/go/consensus/tendermint/apps/beacon/beacon.go @@ -122,7 +122,7 @@ func (app *beaconApplication) onBeaconEpochChange(ctx *abci.Context, epoch epoch case true: // UNSAFE/DEBUG - Deterministic beacon. entropyCtx = debugEntropyCtx - entropy = []byte("If you change this, you will fuck up the byzantine tests!!") + entropy = []byte("If you change this, you will fuck up the byzantine tests!!!") } b := getBeacon(epoch, entropyCtx, entropy) diff --git a/go/consensus/tendermint/apps/registry/genesis.go b/go/consensus/tendermint/apps/registry/genesis.go index 8762ba462ac..a6099023d39 100644 --- a/go/consensus/tendermint/apps/registry/genesis.go +++ b/go/consensus/tendermint/apps/registry/genesis.go @@ -75,6 +75,25 @@ func (app *registryApplication) InitChain(ctx *abci.Context, request types.Reque } } } + for _, v := range st.SuspendedRuntimes { + app.logger.Debug("InitChain: Registering genesis suspended runtime", + "runtime_owner", v.Signature.PublicKey, + ) + if err := app.registerRuntime(ctx, state, v); err != nil { + app.logger.Error("InitChain: failed to register runtime", + "err", err, + "runtime", v, + ) + return errors.Wrap(err, "registry: genesis suspended runtime registration failure") + } + var rt registry.Runtime + if err := cbor.Unmarshal(v.Blob, &rt); err != nil { + return errors.Wrap(err, "registry: malformed genesis suspended runtime") + } + if err := state.SuspendRuntime(rt.ID); err != nil { + return errors.Wrap(err, "registry: failed to suspend runtime at genesis") + } + } for _, v := range st.Nodes { app.logger.Debug("InitChain: Registering genesis node", "node_owner", v.Signature.PublicKey, @@ -120,6 +139,10 @@ func (rq *registryQuerier) Genesis(ctx context.Context) (*registry.Genesis, erro if err != nil { return nil, err } + suspendedRuntimes, err := rq.state.SuspendedRuntimes() + if err != nil { + return nil, err + } signedNodes, err := rq.state.SignedNodes() if err != nil { return nil, err @@ -149,11 +172,12 @@ func (rq *registryQuerier) Genesis(ctx context.Context) (*registry.Genesis, erro } gen := registry.Genesis{ - Parameters: *params, - Entities: signedEntities, - Runtimes: signedRuntimes, - Nodes: validatorNodes, - NodeStatuses: nodeStatuses, + Parameters: *params, + Entities: signedEntities, + Runtimes: signedRuntimes, + SuspendedRuntimes: suspendedRuntimes, + Nodes: validatorNodes, + NodeStatuses: nodeStatuses, } return &gen, nil } diff --git a/go/consensus/tendermint/apps/registry/state/state.go b/go/consensus/tendermint/apps/registry/state/state.go index 3667fb633d1..1d71820a80f 100644 --- a/go/consensus/tendermint/apps/registry/state/state.go +++ b/go/consensus/tendermint/apps/registry/state/state.go @@ -52,7 +52,7 @@ var ( nodeStatusKeyFmt = keyformat.New(0x15, &signature.PublicKey{}) // parametersKeyFmt is the key format used for consensus parameters. // - // Value is CBOR-serialized roothash.ConsensusParameters. + // Value is CBOR-serialized registry.ConsensusParameters. parametersKeyFmt = keyformat.New(0x16) // keyMapKeyFmt is the key format used for key-to-node-id map. // This stores the consensus and P2P to Node ID mappings. @@ -64,6 +64,10 @@ var ( // // Value is binary signature.PublicKey (node ID). certificateMapKeyFmt = keyformat.New(0x18, &hash.Hash{}) + // suspendedRuntimeKeyFmt is the key format used for suspended runtimes. + // + // Value is CBOR-serialized signed runtime. + suspendedRuntimeKeyFmt = keyformat.New(0x19, &common.Namespace{}) ) type ImmutableState struct { @@ -238,40 +242,77 @@ func (s *ImmutableState) SignedNodes() ([]*node.SignedNode, error) { return nodes, nil } -func (s *ImmutableState) getSignedRuntimeRaw(id common.Namespace) ([]byte, error) { - _, value := s.Snapshot.Get(signedRuntimeKeyFmt.Encode(&id)) - return value, nil -} - -func (s *ImmutableState) Runtime(id common.Namespace) (*registry.Runtime, error) { - raw, err := s.getSignedRuntimeRaw(id) - if err != nil { - return nil, err - } +func (s *ImmutableState) getSignedRuntime(keyFmt *keyformat.KeyFormat, id common.Namespace) (*registry.SignedRuntime, error) { + _, raw := s.Snapshot.Get(keyFmt.Encode(&id)) if raw == nil { return nil, registry.ErrNoSuchRuntime } var signedRuntime registry.SignedRuntime - if err = cbor.Unmarshal(raw, &signedRuntime); err != nil { + if err := cbor.Unmarshal(raw, &signedRuntime); err != nil { + return nil, err + } + return &signedRuntime, nil +} + +func (s *ImmutableState) getRuntime(keyFmt *keyformat.KeyFormat, id common.Namespace) (*registry.Runtime, error) { + signedRuntime, err := s.getSignedRuntime(keyFmt, id) + if err != nil { return nil, err } var runtime registry.Runtime if err = cbor.Unmarshal(signedRuntime.Blob, &runtime); err != nil { return nil, err } - return &runtime, err + return &runtime, nil } -// GetRuntimes returns a list of all registered runtimes. -func (s *ImmutableState) Runtimes() ([]*registry.Runtime, error) { - var runtimes []*registry.Runtime +// Runtime looks up a runtime by its identifier and returns it. +// +// This excludes any suspended runtimes, use SuspendedRuntime to query +// suspended runtimes only. +func (s *ImmutableState) Runtime(id common.Namespace) (*registry.Runtime, error) { + return s.getRuntime(signedRuntimeKeyFmt, id) +} + +// SuspendedRuntime looks up a suspended runtime by its identifier and +// returns it. +func (s *ImmutableState) SuspendedRuntime(id common.Namespace) (*registry.Runtime, error) { + return s.getRuntime(suspendedRuntimeKeyFmt, id) +} + +// AnyRuntime looks up either an active or suspended runtime by its identifier and returns it. +func (s *ImmutableState) AnyRuntime(id common.Namespace) (rt *registry.Runtime, err error) { + rt, err = s.Runtime(id) + if err == registry.ErrNoSuchRuntime { + rt, err = s.SuspendedRuntime(id) + } + return +} + +// SignedRuntime looks up a (signed) runtime by its identifier and returns it. +// +// This excludes any suspended runtimes, use SuspendedSignedRuntime to query +// suspended runtimes only. +func (s *ImmutableState) SignedRuntime(id common.Namespace) (*registry.SignedRuntime, error) { + return s.getSignedRuntime(signedRuntimeKeyFmt, id) +} + +// SignedSuspendedRuntime looks up a (signed) suspended runtime by its identifier and returns it. +func (s *ImmutableState) SignedSuspendedRuntime(id common.Namespace) (*registry.SignedRuntime, error) { + return s.getSignedRuntime(suspendedRuntimeKeyFmt, id) +} + +func (s *ImmutableState) iterateRuntimes( + keyFmt *keyformat.KeyFormat, + cb func(*registry.SignedRuntime), +) { s.Snapshot.IterateRange( - signedRuntimeKeyFmt.Encode(), + keyFmt.Encode(), nil, true, func(key, value []byte) bool { - if !signedRuntimeKeyFmt.Decode(key) { + if !keyFmt.Decode(key) { return true } @@ -279,41 +320,77 @@ func (s *ImmutableState) Runtimes() ([]*registry.Runtime, error) { if err := cbor.Unmarshal(value, &signedRt); err != nil { panic("tendermint/registry: corrupted state: " + err.Error()) } - var runtime registry.Runtime - if err := cbor.Unmarshal(signedRt.Blob, &runtime); err != nil { - panic("tendermint/registry: corrupted state: " + err.Error()) - } - runtimes = append(runtimes, &runtime) + cb(&signedRt) return false }, ) +} + +// SignedRuntimes returns a list of all registered runtimes (signed). +// +// This excludes any suspended runtimes. +func (s *ImmutableState) SignedRuntimes() ([]*registry.SignedRuntime, error) { + var runtimes []*registry.SignedRuntime + s.iterateRuntimes(signedRuntimeKeyFmt, func(rt *registry.SignedRuntime) { + runtimes = append(runtimes, rt) + }) return runtimes, nil } -func (s *ImmutableState) SignedRuntimes() ([]*registry.SignedRuntime, error) { +// SuspendedRuntimes returns a list of all suspended runtimes (signed). +func (s *ImmutableState) SuspendedRuntimes() ([]*registry.SignedRuntime, error) { var runtimes []*registry.SignedRuntime - s.Snapshot.IterateRange( - signedRuntimeKeyFmt.Encode(), - nil, - true, - func(key, value []byte) bool { - if !signedRuntimeKeyFmt.Decode(key) { - return true - } + s.iterateRuntimes(suspendedRuntimeKeyFmt, func(rt *registry.SignedRuntime) { + runtimes = append(runtimes, rt) + }) - var signedRt registry.SignedRuntime - if err := cbor.Unmarshal(value, &signedRt); err != nil { - panic("tendermint/registry: corrupted state: " + err.Error()) - } + return runtimes, nil +} - runtimes = append(runtimes, &signedRt) +// AllSignedRuntimes returns a list of all runtimes (suspended included). +func (s *ImmutableState) AllSignedRuntimes() ([]*registry.SignedRuntime, error) { + var runtimes []*registry.SignedRuntime + s.iterateRuntimes(signedRuntimeKeyFmt, func(rt *registry.SignedRuntime) { + runtimes = append(runtimes, rt) + }) + s.iterateRuntimes(suspendedRuntimeKeyFmt, func(rt *registry.SignedRuntime) { + runtimes = append(runtimes, rt) + }) - return false - }, - ) + return runtimes, nil +} + +// Runtimes returns a list of all registered runtimes. +// +// This excludes any suspended runtimes. +func (s *ImmutableState) Runtimes() ([]*registry.Runtime, error) { + var runtimes []*registry.Runtime + s.iterateRuntimes(signedRuntimeKeyFmt, func(sigRt *registry.SignedRuntime) { + var rt registry.Runtime + if err := cbor.Unmarshal(sigRt.Blob, &rt); err != nil { + panic("tendermint/registry: corrupted state: " + err.Error()) + } + runtimes = append(runtimes, &rt) + }) + + return runtimes, nil +} + +// AllRuntimes returns a list of all registered runtimes (suspended included). +func (s *ImmutableState) AllRuntimes() ([]*registry.Runtime, error) { + var runtimes []*registry.Runtime + unpackFn := func(sigRt *registry.SignedRuntime) { + var rt registry.Runtime + if err := cbor.Unmarshal(sigRt.Blob, &rt); err != nil { + panic("tendermint/registry: corrupted state: " + err.Error()) + } + runtimes = append(runtimes, &rt) + } + s.iterateRuntimes(signedRuntimeKeyFmt, unpackFn) + s.iterateRuntimes(suspendedRuntimeKeyFmt, unpackFn) return runtimes, nil } @@ -456,7 +533,7 @@ type MutableState struct { tree *iavl.MutableTree } -func (s *MutableState) CreateEntity(ent *entity.Entity, sigEnt *entity.SignedEntity) { +func (s *MutableState) SetEntity(ent *entity.Entity, sigEnt *entity.SignedEntity) { s.tree.Set(signedEntityKeyFmt.Encode(&ent.ID), cbor.Marshal(sigEnt)) } @@ -474,7 +551,7 @@ func (s *MutableState) RemoveEntity(id signature.PublicKey) (*entity.Entity, err return nil, registry.ErrNoSuchEntity } -func (s *MutableState) CreateNode(node *node.Node, signedNode *node.SignedNode) error { +func (s *MutableState) SetNode(node *node.Node, signedNode *node.SignedNode) error { // Ensure that the entity exists. ent, err := s.getSignedEntityRaw(node.EntityID) if ent == nil || err != nil { @@ -515,15 +592,41 @@ func (s *MutableState) RemoveNode(node *node.Node) { s.tree.Remove(certificateMapKeyFmt.Encode(&certHash)) } -func (s *MutableState) CreateRuntime(rt *registry.Runtime, sigRt *registry.SignedRuntime) error { +func (s *MutableState) SetRuntime(rt *registry.Runtime, sigRt *registry.SignedRuntime, suspended bool) error { entID := sigRt.Signature.PublicKey ent, err := s.getSignedEntityRaw(entID) if ent == nil || err != nil { return registry.ErrNoSuchEntity } - s.tree.Set(signedRuntimeKeyFmt.Encode(&rt.ID), cbor.Marshal(sigRt)) + if suspended { + s.tree.Set(suspendedRuntimeKeyFmt.Encode(&rt.ID), cbor.Marshal(sigRt)) + } else { + s.tree.Set(signedRuntimeKeyFmt.Encode(&rt.ID), cbor.Marshal(sigRt)) + } + + return nil +} + +func (s *MutableState) SuspendRuntime(id common.Namespace) error { + _, raw := s.Snapshot.Get(signedRuntimeKeyFmt.Encode(&id)) + if raw == nil { + return registry.ErrNoSuchRuntime + } + + s.tree.Remove(signedRuntimeKeyFmt.Encode(&id)) + s.tree.Set(suspendedRuntimeKeyFmt.Encode(&id), raw) + return nil +} + +func (s *MutableState) ResumeRuntime(id common.Namespace) error { + _, raw := s.Snapshot.Get(suspendedRuntimeKeyFmt.Encode(&id)) + if raw == nil { + return registry.ErrNoSuchRuntime + } + s.tree.Remove(suspendedRuntimeKeyFmt.Encode(&id)) + s.tree.Set(signedRuntimeKeyFmt.Encode(&id), raw) return nil } diff --git a/go/consensus/tendermint/apps/registry/transactions.go b/go/consensus/tendermint/apps/registry/transactions.go index fa9b24c7dd3..ef3251682e5 100644 --- a/go/consensus/tendermint/apps/registry/transactions.go +++ b/go/consensus/tendermint/apps/registry/transactions.go @@ -1,6 +1,8 @@ package registry import ( + "fmt" + "github.com/oasislabs/oasis-core/go/common/cbor" "github.com/oasislabs/oasis-core/go/common/entity" "github.com/oasislabs/oasis-core/go/common/node" @@ -58,7 +60,7 @@ func (app *registryApplication) registerEntity( return registry.ErrIncorrectTxSigner } - state.CreateEntity(ent, sigEnt) + state.SetEntity(ent, sigEnt) app.logger.Debug("RegisterEntity: registered", "entity", ent, @@ -154,7 +156,8 @@ func (app *registryApplication) registerNode( // nolint: gocyclo ) return err } - regRuntimes, err := state.Runtimes() + // TODO: Avoid loading a list of all runtimes. + regRuntimes, err := state.AllRuntimes() if err != nil { app.logger.Error("RegisterNode: failed to obtain registry runtimes", "err", err, @@ -169,7 +172,7 @@ func (app *registryApplication) registerNode( // nolint: gocyclo // Charge gas for node registration if signed by entity. For node-signed // registrations, the gas charges are pre-paid by the entity. - if sigNode.Signature.PublicKey.Equal(untrustedNode.EntityID) { + if sigNode.Signature.PublicKey.Equal(newNode.EntityID) { if err = ctx.Gas().UseGas(1, registry.GasOpRegisterNode, params.GasCosts); err != nil { return err } @@ -212,12 +215,14 @@ func (app *registryApplication) registerNode( // nolint: gocyclo } } - // Ensure node is not expired. + // Ensure node is not expired. Even though the expiration in the current epoch is technically + // not yet expired, we treat it as expired as it doesn't make sense to have a new node that will + // immediately expire. epoch, err := app.state.GetEpoch(ctx.Ctx(), ctx.BlockHeight()+1) if err != nil { return err } - if newNode.IsExpired(uint64(epoch)) { + if newNode.Expiration <= uint64(epoch) { return registry.ErrNodeExpired } @@ -225,6 +230,56 @@ func (app *registryApplication) registerNode( // nolint: gocyclo existingNode, err := state.Node(newNode.ID) isNewNode := err == registry.ErrNoSuchNode isExpiredNode := err == nil && existingNode.IsExpired(uint64(epoch)) + if !isNewNode && err != nil { + // Something went horribly wrong, and we failed to query the node. + app.logger.Error("RegisterNode: failed to query node", + "err", err, + "new_node", newNode, + "existing_node", existingNode, + "entity", newNode.EntityID, + ) + return registry.ErrInvalidArgument + } + + // For each runtime the node registers for, require it to pay a maintenance fee for + // each epoch the node is registered in. + additionalEpochs := newNode.Expiration - uint64(epoch) + if !isNewNode && !isExpiredNode { + // Remaining epochs are credited so the node doesn't end up paying twice. + // NOTE: This assumes that changing runtimes is not allowed as otherwise we + // would need to account this per-runtime. + remainingEpochs := existingNode.Expiration - uint64(epoch) + if additionalEpochs > remainingEpochs { + additionalEpochs = additionalEpochs - remainingEpochs + } else { + additionalEpochs = 0 + } + } + var paidRuntimes []*registry.Runtime + for _, nodeRt := range newNode.Runtimes { + var rt *registry.Runtime + rt, err = state.AnyRuntime(nodeRt.ID) + if err != nil { + return fmt.Errorf("failed to fetch runtime: %w", err) + } + + paidRuntimes = append(paidRuntimes, rt) + } + feeCount := len(paidRuntimes) * int(additionalEpochs) + if err = ctx.Gas().UseGas(feeCount, registry.GasOpRuntimeEpochMaintenance, params.GasCosts); err != nil { + return err + } + + // Create a new state checkpoint and rollback in case we fail. + var ok bool + sc := ctx.NewStateCheckpoint() + defer func() { + if !ok { + sc.Rollback() + } + sc.Close() + }() + if isNewNode || isExpiredNode { // Check that the entity has enough stake for this node registration. if !params.DebugBypassStake { @@ -238,7 +293,7 @@ func (app *registryApplication) registerNode( // nolint: gocyclo } // Node doesn't exist (or is expired). Create node. - if err = state.CreateNode(newNode, sigNode); err != nil { + if err = state.SetNode(newNode, sigNode); err != nil { app.logger.Error("RegisterNode: failed to create node", "err", err, "node", newNode, @@ -270,15 +325,6 @@ func (app *registryApplication) registerNode( // nolint: gocyclo ) return registry.ErrInvalidArgument } - } else if err != nil { - // Something went horribly wrong, and we failed to query the node. - app.logger.Error("RegisterNode: failed to query node", - "err", err, - "new_node", newNode, - "existing_node", existingNode, - "entity", newNode.EntityID, - ) - return registry.ErrInvalidArgument } else { // Check that the entity has enough stake for the existing node // registrations. @@ -302,7 +348,7 @@ func (app *registryApplication) registerNode( // nolint: gocyclo ) return err } - if err = state.CreateNode(newNode, sigNode); err != nil { + if err = state.SetNode(newNode, sigNode); err != nil { app.logger.Error("RegisterNode: failed to update node", "err", err, "node", newNode, @@ -312,6 +358,30 @@ func (app *registryApplication) registerNode( // nolint: gocyclo } } + // If a runtime was previously suspended and this node now paid maintenance + // fees for it, resume the runtime. + for _, rt := range paidRuntimes { + err := state.ResumeRuntime(rt.ID) + switch err { + case nil: + app.logger.Debug("RegisterNode: resumed runtime", + "runtime_id", rt.ID, + ) + + ctx.EmitEvent(api.NewEventBuilder(app.Name()).Attribute(KeyRuntimeRegistered, cbor.Marshal(rt))) + case registry.ErrNoSuchRuntime: + // Runtime was not suspended. + default: + app.logger.Error("RegisterNode: failed to resume suspended runtime", + "err", err, + "runtime_id", rt.ID, + ) + return fmt.Errorf("failed to resume suspended runtime %s: %w", rt.ID, err) + } + } + + ok = true + app.logger.Debug("RegisterNode: registered", "node", newNode, "roles", newNode.Roles, @@ -446,7 +516,33 @@ func (app *registryApplication) registerRuntime( } } - if err = state.CreateRuntime(rt, sigRt); err != nil { + // Make sure the runtime doesn't exist yet. + var suspended bool + existingRt, err := state.SignedRuntime(rt.ID) + switch err { + case nil: + case registry.ErrNoSuchRuntime: + // Make sure the runtime isn't suspended. + existingRt, err = state.SignedSuspendedRuntime(rt.ID) + switch err { + case nil: + suspended = true + case registry.ErrNoSuchRuntime: + default: + return fmt.Errorf("failed to fetch suspended runtime: %w", err) + } + default: + return fmt.Errorf("failed to fetch runtime: %w", err) + } + // If there is an existing runtime, verify update. + if existingRt != nil { + err = registry.VerifyRuntimeUpdate(app.logger, existingRt, sigRt, rt) + if err != nil { + return err + } + } + + if err = state.SetRuntime(rt, sigRt, suspended); err != nil { app.logger.Error("RegisterRuntime: failed to create runtime", "err", err, "runtime", rt, @@ -455,11 +551,13 @@ func (app *registryApplication) registerRuntime( return registry.ErrBadEntityForRuntime } - app.logger.Debug("RegisterRuntime: registered", - "runtime", rt, - ) + if !suspended { + app.logger.Debug("RegisterRuntime: registered", + "runtime", rt, + ) - ctx.EmitEvent(api.NewEventBuilder(app.Name()).Attribute(KeyRuntimeRegistered, cbor.Marshal(rt))) + ctx.EmitEvent(api.NewEventBuilder(app.Name()).Attribute(KeyRuntimeRegistered, cbor.Marshal(rt))) + } return nil } diff --git a/go/consensus/tendermint/apps/roothash/genesis.go b/go/consensus/tendermint/apps/roothash/genesis.go index e80246e1f66..77e1b87bb66 100644 --- a/go/consensus/tendermint/apps/roothash/genesis.go +++ b/go/consensus/tendermint/apps/roothash/genesis.go @@ -9,6 +9,7 @@ import ( "github.com/oasislabs/oasis-core/go/common/crypto/signature" "github.com/oasislabs/oasis-core/go/consensus/tendermint/abci" registryState "github.com/oasislabs/oasis-core/go/consensus/tendermint/apps/registry/state" + roothashState "github.com/oasislabs/oasis-core/go/consensus/tendermint/apps/roothash/state" genesisAPI "github.com/oasislabs/oasis-core/go/genesis/api" "github.com/oasislabs/oasis-core/go/registry/api" roothashAPI "github.com/oasislabs/oasis-core/go/roothash/api" @@ -18,6 +19,9 @@ import ( func (app *rootHashApplication) InitChain(ctx *abci.Context, request types.RequestInitChain, doc *genesisAPI.Document) error { st := doc.RootHash + state := roothashState.NewMutableState(ctx.State()) + state.SetConsensusParameters(&st.Parameters) + // The per-runtime roothash state is done primarily via DeliverTx, but // also needs to be done here since the genesis state can have runtime // registrations. @@ -54,7 +58,13 @@ func (rq *rootHashQuerier) Genesis(ctx context.Context) (*roothashAPI.Genesis, e rtStates[rt.Runtime.ID] = &rtState } + params, err := rq.state.ConsensusParameters() + if err != nil { + return nil, err + } + genesis := &roothashAPI.Genesis{ + Parameters: *params, RuntimeStates: rtStates, } return genesis, nil diff --git a/go/consensus/tendermint/apps/roothash/query.go b/go/consensus/tendermint/apps/roothash/query.go index 3782df27991..bedd55faa9a 100644 --- a/go/consensus/tendermint/apps/roothash/query.go +++ b/go/consensus/tendermint/apps/roothash/query.go @@ -57,10 +57,6 @@ func (rq *rootHashQuerier) LatestBlock(ctx context.Context, id common.Namespace) if err != nil { return nil, err } - if runtime == nil { - return nil, errNoSuchRuntime - } - return runtime.CurrentBlock, nil } @@ -69,10 +65,6 @@ func (rq *rootHashQuerier) GenesisBlock(ctx context.Context, id common.Namespace if err != nil { return nil, err } - if runtime == nil { - return nil, errNoSuchRuntime - } - return runtime.GenesisBlock, nil } diff --git a/go/consensus/tendermint/apps/roothash/roothash.go b/go/consensus/tendermint/apps/roothash/roothash.go index 1b05ea56fa9..6b137a0fb23 100644 --- a/go/consensus/tendermint/apps/roothash/roothash.go +++ b/go/consensus/tendermint/apps/roothash/roothash.go @@ -36,12 +36,7 @@ import ( // timerKindRound is the round timer kind. const timerKindRound = 0x01 -var ( - errNoSuchRuntime = errors.New("tendermint/roothash: no such runtime") - errNoRound = errors.New("tendermint/roothash: no round in progress") - - _ abci.Application = (*rootHashApplication)(nil) -) +var _ abci.Application = (*rootHashApplication)(nil) type timerContext struct { ID common.Namespace `json:"id"` @@ -94,202 +89,234 @@ func (app *rootHashApplication) BeginBlock(ctx *abci.Context, request types.Requ return nil } -func (app *rootHashApplication) onCommitteeChanged(ctx *abci.Context, epoch epochtime.EpochTime) error { // nolint: gocyclo +func (app *rootHashApplication) onCommitteeChanged(ctx *abci.Context, epoch epochtime.EpochTime) error { state := roothashState.NewMutableState(ctx.State()) - - // Query the updated runtime list. + schedState := schedulerState.NewMutableState(ctx.State()) regState := registryState.NewMutableState(ctx.State()) runtimes, _ := regState.Runtimes() - newDescriptors := make(map[common.Namespace]*registry.Runtime) - for _, v := range runtimes { - if v.Kind == registry.KindCompute { - newDescriptors[v.ID] = v - } - } - schedState := schedulerState.NewMutableState(ctx.State()) - for _, rtState := range state.Runtimes() { - rtID := rtState.Runtime.ID + params, err := state.ConsensusParameters() + if err != nil { + return fmt.Errorf("failed to get consensus parameters: %w", err) + } - if !rtState.Runtime.IsCompute() { - app.logger.Debug("checkCommittees: skipping non-compute runtime", - "runtime", rtID, + for _, rt := range runtimes { + if !rt.IsCompute() { + app.logger.Debug("skipping non-compute runtime", + "runtime", rt.ID, ) continue } - // Derive a deterministic committee identifier that depends on memberships - // of all committees. We need this to be able to quickly see if any - // committee members have changed. - // - // We first include the current epoch, then all compute committee member - // hashes and then the merge committee member hash: - // - // [little-endian epoch] - // "compute committees follow" - // [compute committe 1 members hash] - // [compute committe 2 members hash] - // ... - // [compute committe n members hash] - // "merge committee follows" - // [merge committee members hash] - // - var committeeIDParts [][]byte - var rawEpoch [8]byte - binary.LittleEndian.PutUint64(rawEpoch[:], uint64(epoch)) - committeeIDParts = append(committeeIDParts, rawEpoch[:]) - committeeIDParts = append(committeeIDParts, []byte("compute committees follow")) - - // NOTE: There will later be multiple compute committees. - var computeCommittees []*scheduler.Committee - cc1, err := schedState.Committee(scheduler.KindCompute, rtID) + rtState, err := state.RuntimeState(rt.ID) if err != nil { - app.logger.Error("checkCommittees: failed to get compute committee from scheduler", - "err", err, - "runtime", rtID, - ) - continue - } - if cc1 != nil { - computeCommittees = append(computeCommittees, cc1) + return fmt.Errorf("failed to fetch runtime state: %w", err) } - computePool := &commitment.MultiPool{ - Committees: make(map[hash.Hash]*commitment.Pool), - } - if len(computeCommittees) == 0 { - app.logger.Warn("checkCommittees: no compute committees", - "runtime", rtID, - ) - } - for _, computeCommittee := range computeCommittees { - computeNodeInfo := make(map[signature.PublicKey]commitment.NodeInfo) - for idx, n := range computeCommittee.Members { - var nodeRuntime *node.Runtime - node, err1 := regState.Node(n.PublicKey) - if err1 != nil { - return errors.Wrap(err1, "checkCommittees: failed to query node") - } - for _, r := range node.Runtimes { - if !r.ID.Equal(&rtID) { - continue - } - nodeRuntime = r - break - } - if nodeRuntime == nil { - // We currently prevent this case throughout the rest of the system. - // Still, it's prudent to check. - app.logger.Warn("checkCommittees: committee member not registered with this runtime", - "node", n.PublicKey, - ) - continue - } - computeNodeInfo[n.PublicKey] = commitment.NodeInfo{ - CommitteeNode: idx, - Runtime: nodeRuntime, - } - } - computeCommitteeID := computeCommittee.EncodedMembersHash() - committeeIDParts = append(committeeIDParts, computeCommitteeID[:]) - - computePool.Committees[computeCommitteeID] = &commitment.Pool{ - Runtime: rtState.Runtime, - Committee: computeCommittee, - NodeInfo: computeNodeInfo, - } - } + // Since the runtime is in the list of active runtimes in the registry we + // can safely clear the suspended flag. + rtState.Suspended = false - var mergePool commitment.Pool - committeeIDParts = append(committeeIDParts, []byte("merge committee follows")) - mergeCommittee, err := schedState.Committee(scheduler.KindMerge, rtID) + // Prepare new runtime committees based on what the scheduler did. + committeeID, computePool, mergePool, empty, err := app.prepareNewCommittees(ctx, epoch, rtState, schedState, regState) if err != nil { - app.logger.Error("checkCommittees: failed to get merge committee from scheduler", - "err", err, - "runtime", rtID, - ) - continue + return err } - if mergeCommittee == nil { - app.logger.Warn("checkCommittees: no merge committee", - "runtime", rtID, - ) - } else { - mergeNodeInfo := make(map[signature.PublicKey]commitment.NodeInfo) - for idx, n := range mergeCommittee.Members { - mergeNodeInfo[n.PublicKey] = commitment.NodeInfo{ - CommitteeNode: idx, - } - } - mergePool = commitment.Pool{ - Runtime: rtState.Runtime, - Committee: mergeCommittee, - NodeInfo: mergeNodeInfo, + + // If there are no committees for this runtime, suspend the runtime as this + // means that there is noone to pay the maintenance fees. + if empty && !params.DebugDoNotSuspendRuntimes { + if err := app.suspendUnpaidRuntime(ctx, rtState, regState); err != nil { + return err } - mergeCommitteeID := mergeCommittee.EncodedMembersHash() - committeeIDParts = append(committeeIDParts, mergeCommitteeID[:]) } - app.logger.Debug("checkCommittees: updating committee for runtime", - "runtime", rtID, - ) + // If the committee has actually changed, force a new round. + if !rtState.Suspended && (rtState.Round == nil || !rtState.Round.CommitteeID.Equal(&committeeID)) { + app.logger.Debug("updating committee for runtime", + "runtime_id", rt.ID, + ) - // If the committee is the "same", ignore this. - var committeeID hash.Hash - committeeID.FromBytes(committeeIDParts...) + // Transition the round. + blk := rtState.CurrentBlock + blockNr := blk.Header.Round - round := rtState.Round - if round != nil && round.CommitteeID.Equal(&committeeID) { - app.logger.Debug("checkCommittees: duplicate committee, ignoring", - "runtime", rtID, + app.logger.Debug("new committee, transitioning round", + "runtime_id", rt.ID, "committee_id", committeeID, + "round", blockNr, ) - delete(newDescriptors, rtID) - continue + + // Emit an empty epoch transition block in the new round. This is required so that + // the clients can be sure what state is final when an epoch transition occurs. + app.emitEmptyBlock(ctx, rtState, block.EpochTransition) + + // Create a new round. + rtState.Round = roothashState.NewRound(committeeID, computePool, mergePool, rtState.CurrentBlock) } - // Transition the round. - blk := rtState.CurrentBlock - blockNr := blk.Header.Round + // Update the runtime descriptor to the latest per-epoch value. + rtState.Runtime = rt - app.logger.Debug("checkCommittees: new committee, transitioning round", - "runtime", rtID, - "committee_id", committeeID, - "round", blockNr, - ) + state.SetRuntimeState(rtState) + } - rtState.Timer.Stop(ctx) - rtState.Round = roothashState.NewRound(committeeID, computePool, &mergePool, blk) + return nil +} - // Emit an empty epoch transition block in the new round. This is required so that - // the clients can be sure what state is final when an epoch transition occurs. - app.emitEmptyBlock(ctx, rtState, block.EpochTransition) +func (app *rootHashApplication) suspendUnpaidRuntime( + ctx *abci.Context, + rtState *roothashState.RuntimeState, + regState *registryState.MutableState, +) error { + app.logger.Warn("maintenance fees not paid for runtime, suspending", + "runtime_id", rtState.Runtime.ID, + ) - if rt, ok := newDescriptors[rtID]; ok { - // Update the runtime descriptor to the latest per-epoch value. - rtState.Runtime = rt - delete(newDescriptors, rtID) - } + if err := regState.SuspendRuntime(rtState.Runtime.ID); err != nil { + return err + } - state.SetRuntimeState(rtState) + rtState.Suspended = true + rtState.Round = nil + + // Emity an empty block signalling that the runtime was suspended. + app.emitEmptyBlock(ctx, rtState, block.Suspended) + + return nil +} + +func (app *rootHashApplication) prepareNewCommittees( + ctx *abci.Context, + epoch epochtime.EpochTime, + rtState *roothashState.RuntimeState, + schedState *schedulerState.MutableState, + regState *registryState.MutableState, +) ( + committeeID hash.Hash, + computePool *commitment.MultiPool, + mergePool *commitment.Pool, + empty bool, + err error, +) { + rtID := rtState.Runtime.ID + + // Derive a deterministic committee identifier that depends on memberships + // of all committees. We need this to be able to quickly see if any + // committee members have changed. + // + // We first include the current epoch, then all compute committee member + // hashes and then the merge committee member hash: + // + // [little-endian epoch] + // "compute committees follow" + // [compute committe 1 members hash] + // [compute committe 2 members hash] + // ... + // [compute committe n members hash] + // "merge committee follows" + // [merge committee members hash] + // + var committeeIDParts [][]byte + var rawEpoch [8]byte + binary.LittleEndian.PutUint64(rawEpoch[:], uint64(epoch)) + committeeIDParts = append(committeeIDParts, rawEpoch[:]) + committeeIDParts = append(committeeIDParts, []byte("compute committees follow")) + + // NOTE: There will later be multiple compute committees. + var computeCommittees []*scheduler.Committee + cc1, err := schedState.Committee(scheduler.KindCompute, rtID) + if err != nil { + app.logger.Error("checkCommittees: failed to get compute committee from scheduler", + "err", err, + "runtime", rtID, + ) + return + } + if cc1 != nil { + computeCommittees = append(computeCommittees, cc1) } - // Just because a runtime didn't have committees, it doesn't mean that - // it's state does not need to be updated. Do so now where possible. - for _, v := range newDescriptors { - rtState, err := state.RuntimeState(v.ID) - if err != nil { - app.logger.Warn("onEpochChange: unknown runtime in update pass", - "runtime", v, - ) - continue + computePool = &commitment.MultiPool{ + Committees: make(map[hash.Hash]*commitment.Pool), + } + if len(computeCommittees) == 0 { + app.logger.Warn("checkCommittees: no compute committees", + "runtime", rtID, + ) + empty = true + } + for _, computeCommittee := range computeCommittees { + computeNodeInfo := make(map[signature.PublicKey]commitment.NodeInfo) + for idx, n := range computeCommittee.Members { + var nodeRuntime *node.Runtime + node, err1 := regState.Node(n.PublicKey) + if err1 != nil { + return hash.Hash{}, nil, nil, false, errors.Wrap(err1, "checkCommittees: failed to query node") + } + for _, r := range node.Runtimes { + if !r.ID.Equal(&rtID) { + continue + } + nodeRuntime = r + break + } + if nodeRuntime == nil { + // We currently prevent this case throughout the rest of the system. + // Still, it's prudent to check. + app.logger.Warn("checkCommittees: committee member not registered with this runtime", + "node", n.PublicKey, + ) + continue + } + computeNodeInfo[n.PublicKey] = commitment.NodeInfo{ + CommitteeNode: idx, + Runtime: nodeRuntime, + } } + computeCommitteeID := computeCommittee.EncodedMembersHash() + committeeIDParts = append(committeeIDParts, computeCommitteeID[:]) - rtState.Runtime = v - state.SetRuntimeState(rtState) + computePool.Committees[computeCommitteeID] = &commitment.Pool{ + Runtime: rtState.Runtime, + Committee: computeCommittee, + NodeInfo: computeNodeInfo, + } } - return nil + mergePool = new(commitment.Pool) + committeeIDParts = append(committeeIDParts, []byte("merge committee follows")) + mergeCommittee, err := schedState.Committee(scheduler.KindMerge, rtID) + if err != nil { + app.logger.Error("checkCommittees: failed to get merge committee from scheduler", + "err", err, + "runtime", rtID, + ) + return + } + if mergeCommittee == nil { + app.logger.Warn("checkCommittees: no merge committee", + "runtime", rtID, + ) + empty = true + } else { + mergeNodeInfo := make(map[signature.PublicKey]commitment.NodeInfo) + for idx, n := range mergeCommittee.Members { + mergeNodeInfo[n.PublicKey] = commitment.NodeInfo{ + CommitteeNode: idx, + } + } + mergePool = &commitment.Pool{ + Runtime: rtState.Runtime, + Committee: mergeCommittee, + NodeInfo: mergeNodeInfo, + } + mergeCommitteeID := mergeCommittee.EncodedMembersHash() + committeeIDParts = append(committeeIDParts, mergeCommitteeID[:]) + } + + committeeID.FromBytes(committeeIDParts...) + return } func (app *rootHashApplication) emitEmptyBlock(ctx *abci.Context, runtime *roothashState.RuntimeState, hdrType block.HeaderType) { @@ -315,14 +342,14 @@ func (app *rootHashApplication) ExecuteTx(ctx *abci.Context, tx *transaction.Tra return err } - return app.commit(ctx, state, cc.ID, &cc) + return app.computeCommit(ctx, state, &cc) case roothash.MethodMergeCommit: var mc roothash.MergeCommit if err := cbor.Unmarshal(tx.Body, &mc); err != nil { return err } - return app.commit(ctx, state, mc.ID, &mc) + return app.mergeCommit(ctx, state, &mc) default: return roothash.ErrInvalidArgument } @@ -373,9 +400,7 @@ func (app *rootHashApplication) onNewRuntime(ctx *abci.Context, runtime *registr } // Check if state already exists for the given runtime. - rtState, _ := state.RuntimeState(runtime.ID) - if rtState != nil { - // Do not propagate the error as this would fail the transaction. + if _, err := state.RuntimeState(runtime.ID); err != roothash.ErrInvalidRuntime { app.logger.Warn("onNewRuntime: state for runtime already exists", "runtime", runtime, ) @@ -445,7 +470,6 @@ func (app *rootHashApplication) FireTimer(ctx *abci.Context, timer *abci.Timer) ) return err } - runtime := rtState.Runtime latestBlock := rtState.CurrentBlock if latestBlock.Header.Round != tCtx.Round { @@ -469,7 +493,7 @@ func (app *rootHashApplication) FireTimer(ctx *abci.Context, timer *abci.Timer) defer state.SetRuntimeState(rtState) if rtState.Round.MergePool.IsTimeout(ctx.Now()) { - if err := app.tryFinalizeBlock(ctx, runtime, rtState, true); err != nil { + if err := app.tryFinalizeBlock(ctx, rtState, true); err != nil { app.logger.Error("failed to finalize block", "err", err, ) @@ -477,135 +501,7 @@ func (app *rootHashApplication) FireTimer(ctx *abci.Context, timer *abci.Timer) } } for _, pool := range rtState.Round.ComputePool.GetTimeoutCommittees(ctx.Now()) { - app.tryFinalizeCompute(ctx, runtime, rtState, pool, true) - } - - return nil -} - -type roothashSignatureVerifier struct { - runtimeID common.Namespace - scheduler *schedulerState.MutableState -} - -// VerifyCommitteeSignatures verifies that the given signatures come from -// the current committee members of the given kind. -// -// Implements commitment.SignatureVerifier. -func (sv *roothashSignatureVerifier) VerifyCommitteeSignatures(kind scheduler.CommitteeKind, sigs []signature.Signature) error { - if len(sigs) == 0 { - return nil - } - - committee, err := sv.scheduler.Committee(kind, sv.runtimeID) - if err != nil { - return err - } - if committee == nil { - return errors.New("roothash: no committee with which to verify signatures") - } - - // TODO: Consider caching this set? - pks := make(map[signature.PublicKey]bool) - for _, m := range committee.Members { - pks[m.PublicKey] = true - } - - for _, sig := range sigs { - if !pks[sig.PublicKey] { - return errors.New("roothash: signature is not from a valid committee member") - } - } - return nil -} - -func (app *rootHashApplication) commit( - ctx *abci.Context, - state *roothashState.MutableState, - id common.Namespace, - msg interface{}, -) error { - logger := app.logger.With("is_check_only", ctx.IsCheckOnly()) - - rtState, err := state.RuntimeState(id) - if err != nil { - return errors.Wrap(err, "roothash: failed to fetch runtime state") - } - if rtState == nil { - return errNoSuchRuntime - } - runtime := rtState.Runtime - - if rtState.Round == nil { - logger.Error("commit recevied when no round in progress") - return errNoRound - } - - latestBlock := rtState.CurrentBlock - blockNr := latestBlock.Header.Round - - defer state.SetRuntimeState(rtState) - - // If the round was finalized, transition. - if rtState.Round.CurrentBlock.Header.Round != latestBlock.Header.Round { - logger.Debug("round was finalized, transitioning round", - "round", blockNr, - ) - - rtState.Round.Transition(latestBlock) - } - - // Create storage signature verifier. - sv := &roothashSignatureVerifier{ - runtimeID: id, - scheduler: schedulerState.NewMutableState(ctx.State()), - } - - // Add the commitments. - switch c := msg.(type) { - case *roothash.MergeCommit: - for _, commit := range c.Commits { - if err = rtState.Round.AddMergeCommitment(&commit, sv); err != nil { - logger.Error("failed to add merge commitment to round", - "err", err, - "round", blockNr, - ) - return err - } - } - - // Try to finalize round. - if !ctx.IsCheckOnly() { - if err = app.tryFinalizeBlock(ctx, runtime, rtState, false); err != nil { - logger.Error("failed to finalize block", - "err", err, - ) - return err - } - } - case *roothash.ComputeCommit: - pools := make(map[*commitment.Pool]bool) - for _, commit := range c.Commits { - var pool *commitment.Pool - if pool, err = rtState.Round.AddComputeCommitment(&commit, sv); err != nil { - logger.Error("failed to add compute commitment to round", - "err", err, - "round", blockNr, - ) - return err - } - - pools[pool] = true - } - - // Try to finalize compute rounds. - if !ctx.IsCheckOnly() { - for pool := range pools { - app.tryFinalizeCompute(ctx, runtime, rtState, pool, false) - } - } - default: - panic(fmt.Errorf("roothash: invalid type passed to commit(): %T", c)) + app.tryFinalizeCompute(ctx, rtState, pool, true) } return nil @@ -613,7 +509,6 @@ func (app *rootHashApplication) commit( func (app *rootHashApplication) updateTimer( ctx *abci.Context, - runtime *registry.Runtime, rtState *roothashState.RuntimeState, blockNr uint64, ) { @@ -632,7 +527,7 @@ func (app *rootHashApplication) updateTimer( app.logger.Debug("(re-)arming round timeout") timerCtx := &timerContext{ - ID: runtime.ID, + ID: rtState.Runtime.ID, Round: blockNr, } rtState.Timer.Reset(ctx, nextTimeout.Sub(ctx.Now()), cbor.Marshal(timerCtx)) @@ -641,17 +536,16 @@ func (app *rootHashApplication) updateTimer( func (app *rootHashApplication) tryFinalizeCompute( ctx *abci.Context, - runtime *registry.Runtime, rtState *roothashState.RuntimeState, pool *commitment.Pool, forced bool, ) { + runtime := rtState.Runtime latestBlock := rtState.CurrentBlock blockNr := latestBlock.Header.Round - //id, _ := runtime.ID.MarshalBinary() committeeID := pool.GetCommitteeID() - defer app.updateTimer(ctx, runtime, rtState, blockNr) + defer app.updateTimer(ctx, rtState, blockNr) if rtState.Round.Finalized { app.logger.Error("attempted to finalize compute when block already finalized", @@ -706,7 +600,7 @@ func (app *rootHashApplication) tryFinalizeCompute( // to abort everything even if only one committee failed to finalize as // there is otherwise no way to make progress as merge committees will // refuse to merge if there are discrepancies. - app.logger.Error("worker: round failed", + app.logger.Error("round failed", "round", blockNr, "err", err, logging.LogEvent, roothash.LogEventRoundFailed, @@ -717,14 +611,14 @@ func (app *rootHashApplication) tryFinalizeCompute( func (app *rootHashApplication) tryFinalizeMerge( ctx *abci.Context, - runtime *registry.Runtime, rtState *roothashState.RuntimeState, forced bool, ) *block.Block { + runtime := rtState.Runtime latestBlock := rtState.CurrentBlock blockNr := latestBlock.Header.Round - defer app.updateTimer(ctx, runtime, rtState, blockNr) + defer app.updateTimer(ctx, rtState, blockNr) if rtState.Round.Finalized { app.logger.Error("attempted to finalize merge when block already finalized", @@ -775,7 +669,7 @@ func (app *rootHashApplication) tryFinalizeMerge( } // Something else went wrong, emit empty error block. - app.logger.Error("worker: round failed", + app.logger.Error("round failed", "round", blockNr, "err", err, logging.LogEvent, roothash.LogEventRoundFailed, @@ -827,11 +721,10 @@ func (app *rootHashApplication) postProcessFinalizedBlock(ctx *abci.Context, rtS func (app *rootHashApplication) tryFinalizeBlock( ctx *abci.Context, - runtime *registry.Runtime, rtState *roothashState.RuntimeState, mergeForced bool, ) error { - finalizedBlock := app.tryFinalizeMerge(ctx, runtime, rtState, mergeForced) + finalizedBlock := app.tryFinalizeMerge(ctx, rtState, mergeForced) if finalizedBlock == nil { return nil } diff --git a/go/consensus/tendermint/apps/roothash/state/state.go b/go/consensus/tendermint/apps/roothash/state/state.go index ce59a448a4b..69360011596 100644 --- a/go/consensus/tendermint/apps/roothash/state/state.go +++ b/go/consensus/tendermint/apps/roothash/state/state.go @@ -1,6 +1,8 @@ package state import ( + "fmt" + "github.com/tendermint/iavl" "github.com/oasislabs/oasis-core/go/common" @@ -8,6 +10,7 @@ import ( "github.com/oasislabs/oasis-core/go/common/keyformat" "github.com/oasislabs/oasis-core/go/consensus/tendermint/abci" registry "github.com/oasislabs/oasis-core/go/registry/api" + roothash "github.com/oasislabs/oasis-core/go/roothash/api" "github.com/oasislabs/oasis-core/go/roothash/api/block" ) @@ -16,14 +19,22 @@ var ( // // Value is CBOR-serialized runtime state. runtimeKeyFmt = keyformat.New(0x20, &common.Namespace{}) + // parametersKeyFmt is the key format used for consensus parameters. + // + // Value is CBOR-serialized roothash.ConsensusParameters. + parametersKeyFmt = keyformat.New(0x21) ) +// RuntimeState is the per-runtime roothash state. type RuntimeState struct { - Runtime *registry.Runtime `json:"runtime"` - CurrentBlock *block.Block `json:"current_block"` - GenesisBlock *block.Block `json:"genesis_block"` - Round *Round `json:"round"` - Timer abci.Timer `json:"timer"` + Runtime *registry.Runtime `json:"runtime"` + Suspended bool `json:"suspended,omitempty"` + + CurrentBlock *block.Block `json:"current_block"` + GenesisBlock *block.Block `json:"genesis_block"` + + Round *Round `json:"round"` + Timer abci.Timer `json:"timer"` } type ImmutableState struct { @@ -42,7 +53,7 @@ func NewImmutableState(state *abci.ApplicationState, version int64) (*ImmutableS func (s *ImmutableState) RuntimeState(id common.Namespace) (*RuntimeState, error) { _, raw := s.Snapshot.Get(runtimeKeyFmt.Encode(&id)) if raw == nil { - return nil, nil + return nil, roothash.ErrInvalidRuntime } var state RuntimeState @@ -72,6 +83,17 @@ func (s *ImmutableState) Runtimes() []*RuntimeState { return runtimes } +func (s *ImmutableState) ConsensusParameters() (*roothash.ConsensusParameters, error) { + _, raw := s.Snapshot.Get(parametersKeyFmt.Encode()) + if raw == nil { + return nil, fmt.Errorf("tendermint/roothash: expected consensus parameters to be present in app state") + } + + var params roothash.ConsensusParameters + err := cbor.Unmarshal(raw, ¶ms) + return ¶ms, err +} + type MutableState struct { *ImmutableState @@ -90,3 +112,7 @@ func NewMutableState(tree *iavl.MutableTree) *MutableState { func (s *MutableState) SetRuntimeState(state *RuntimeState) { s.tree.Set(runtimeKeyFmt.Encode(&state.Runtime.ID), cbor.Marshal(state)) } + +func (s *MutableState) SetConsensusParameters(params *roothash.ConsensusParameters) { + s.tree.Set(parametersKeyFmt.Encode(), cbor.Marshal(params)) +} diff --git a/go/consensus/tendermint/apps/roothash/transactions.go b/go/consensus/tendermint/apps/roothash/transactions.go new file mode 100644 index 00000000000..1dc21b12c6d --- /dev/null +++ b/go/consensus/tendermint/apps/roothash/transactions.go @@ -0,0 +1,189 @@ +package roothash + +import ( + "fmt" + + "github.com/pkg/errors" + + "github.com/oasislabs/oasis-core/go/common" + "github.com/oasislabs/oasis-core/go/common/crypto/signature" + "github.com/oasislabs/oasis-core/go/consensus/tendermint/abci" + roothashState "github.com/oasislabs/oasis-core/go/consensus/tendermint/apps/roothash/state" + schedulerState "github.com/oasislabs/oasis-core/go/consensus/tendermint/apps/scheduler/state" + roothash "github.com/oasislabs/oasis-core/go/roothash/api" + "github.com/oasislabs/oasis-core/go/roothash/api/commitment" + scheduler "github.com/oasislabs/oasis-core/go/scheduler/api" +) + +var _ commitment.SignatureVerifier = (*roothashSignatureVerifier)(nil) + +type roothashSignatureVerifier struct { + runtimeID common.Namespace + scheduler *schedulerState.MutableState +} + +// VerifyCommitteeSignatures verifies that the given signatures come from +// the current committee members of the given kind. +// +// Implements commitment.SignatureVerifier. +func (sv *roothashSignatureVerifier) VerifyCommitteeSignatures(kind scheduler.CommitteeKind, sigs []signature.Signature) error { + if len(sigs) == 0 { + return nil + } + + committee, err := sv.scheduler.Committee(kind, sv.runtimeID) + if err != nil { + return err + } + if committee == nil { + return roothash.ErrInvalidRuntime + } + + // TODO: Consider caching this set? + pks := make(map[signature.PublicKey]bool) + for _, m := range committee.Members { + pks[m.PublicKey] = true + } + + for _, sig := range sigs { + if !pks[sig.PublicKey] { + return errors.New("roothash: signature is not from a valid committee member") + } + } + return nil +} + +// getRuntimeState fetches the current runtime state and performs common +// processing and error handling. +func (app *rootHashApplication) getRuntimeState( + ctx *abci.Context, + state *roothashState.MutableState, + id common.Namespace, +) (*roothashState.RuntimeState, commitment.SignatureVerifier, error) { + // Fetch current runtime state. + rtState, err := state.RuntimeState(id) + if err != nil { + return nil, nil, fmt.Errorf("roothash: failed to fetch runtime state: %w", err) + } + if rtState.Suspended { + return nil, nil, roothash.ErrRuntimeSuspended + } + if rtState.Round == nil { + return nil, nil, roothash.ErrNoRound + } + + // Create signature verifier. + sv := &roothashSignatureVerifier{ + runtimeID: id, + scheduler: schedulerState.NewMutableState(ctx.State()), + } + + // If the round was finalized, transition. + if rtState.Round.CurrentBlock.Header.Round != rtState.CurrentBlock.Header.Round { + app.logger.Debug("round was finalized, transitioning round", + "round", rtState.CurrentBlock.Header.Round, + ) + + rtState.Round.Transition(rtState.CurrentBlock) + } + + return rtState, sv, nil +} + +func (app *rootHashApplication) computeCommit( + ctx *abci.Context, + state *roothashState.MutableState, + cc *roothash.ComputeCommit, +) error { + if ctx.IsCheckOnly() { + return nil + } + + // Charge gas for this transaction. + params, err := state.ConsensusParameters() + if err != nil { + app.logger.Error("ComputeCommit: failed to fetch consensus parameters", + "err", err, + ) + return err + } + if err = ctx.Gas().UseGas(1, roothash.GasOpComputeCommit, params.GasCosts); err != nil { + return err + } + + rtState, sv, err := app.getRuntimeState(ctx, state, cc.ID) + if err != nil { + return err + } + defer state.SetRuntimeState(rtState) + + pools := make(map[*commitment.Pool]bool) + for _, commit := range cc.Commits { + var pool *commitment.Pool + if pool, err = rtState.Round.AddComputeCommitment(&commit, sv); err != nil { + app.logger.Error("failed to add compute commitment to round", + "err", err, + "round", rtState.CurrentBlock.Header.Round, + ) + return err + } + + pools[pool] = true + } + + // Try to finalize compute rounds. + for pool := range pools { + app.tryFinalizeCompute(ctx, rtState, pool, false) + } + + return nil +} + +func (app *rootHashApplication) mergeCommit( + ctx *abci.Context, + state *roothashState.MutableState, + mc *roothash.MergeCommit, +) error { + if ctx.IsCheckOnly() { + return nil + } + + // Charge gas for this transaction. + params, err := state.ConsensusParameters() + if err != nil { + app.logger.Error("MergeCommit: failed to fetch consensus parameters", + "err", err, + ) + return err + } + if err = ctx.Gas().UseGas(1, roothash.GasOpMergeCommit, params.GasCosts); err != nil { + return err + } + + rtState, sv, err := app.getRuntimeState(ctx, state, mc.ID) + if err != nil { + return err + } + defer state.SetRuntimeState(rtState) + + // Add commitments. + for _, commit := range mc.Commits { + if err = rtState.Round.AddMergeCommitment(&commit, sv); err != nil { + app.logger.Error("failed to add merge commitment to round", + "err", err, + "round", rtState.CurrentBlock.Header.Round, + ) + return err + } + } + + // Try to finalize round. + if err = app.tryFinalizeBlock(ctx, rtState, false); err != nil { + app.logger.Error("failed to finalize block", + "err", err, + ) + return err + } + + return nil +} diff --git a/go/consensus/tendermint/apps/supplementarysanity/checks.go b/go/consensus/tendermint/apps/supplementarysanity/checks.go index 05007a36198..06bc47f131e 100644 --- a/go/consensus/tendermint/apps/supplementarysanity/checks.go +++ b/go/consensus/tendermint/apps/supplementarysanity/checks.go @@ -42,9 +42,9 @@ func checkRegistry(state *iavl.MutableTree, now epochtime.EpochTime) error { } // Check runtimes. - runtimes, err := st.SignedRuntimes() + runtimes, err := st.AllSignedRuntimes() if err != nil { - return fmt.Errorf("SignedRuntimes: %w", err) + return fmt.Errorf("AllSignedRuntimes: %w", err) } seenRuntimes, err := registry.SanityCheckRuntimes(runtimes) if err != nil { diff --git a/go/consensus/tendermint/tests/genesis/genesis.go b/go/consensus/tendermint/tests/genesis/genesis.go index 495965980fe..302e1ecab2a 100644 --- a/go/consensus/tendermint/tests/genesis/genesis.go +++ b/go/consensus/tendermint/tests/genesis/genesis.go @@ -16,6 +16,7 @@ import ( genesis "github.com/oasislabs/oasis-core/go/genesis/api" genesisTestHelpers "github.com/oasislabs/oasis-core/go/genesis/tests/helpers" registry "github.com/oasislabs/oasis-core/go/registry/api" + roothash "github.com/oasislabs/oasis-core/go/roothash/api" scheduler "github.com/oasislabs/oasis-core/go/scheduler/api" stakingTests "github.com/oasislabs/oasis-core/go/staking/tests/debug" ) @@ -65,6 +66,11 @@ func NewTestNodeGenesisProvider(identity *identity.Identity) (genesis.Provider, DebugStaticValidators: true, }, }, + RootHash: roothash.Genesis{ + Parameters: roothash.ConsensusParameters{ + DebugDoNotSuspendRuntimes: true, + }, + }, Consensus: consensus.Genesis{ Backend: tendermint.BackendName, Parameters: consensus.Parameters{ diff --git a/go/keymanager/client/client.go b/go/keymanager/client/client.go index 76182763d94..0bf576dff78 100644 --- a/go/keymanager/client/client.go +++ b/go/keymanager/client/client.go @@ -25,6 +25,7 @@ import ( "github.com/oasislabs/oasis-core/go/common/identity" "github.com/oasislabs/oasis-core/go/common/logging" "github.com/oasislabs/oasis-core/go/common/node" + "github.com/oasislabs/oasis-core/go/common/pubsub" consensus "github.com/oasislabs/oasis-core/go/consensus/api" "github.com/oasislabs/oasis-core/go/keymanager/api" registry "github.com/oasislabs/oasis-core/go/registry/api" @@ -36,8 +37,11 @@ const ( maxRetries = 15 ) +// ErrKeyManagerNotAvailable is the error when a key manager is not available. var ErrKeyManagerNotAvailable = errors.New("keymanager/client: key manager not available") +// TODO: Consider making the key manager client per-runtime instead of it tracking all runtimes. + // Client is a key manager client instance. type Client struct { sync.RWMutex @@ -52,7 +56,7 @@ type Client struct { state map[common.Namespace]*clientState kmMap map[common.Namespace]common.Namespace - debugClient enclaverpc.Transport + readyNotifier *pubsub.Broker } type clientState struct { @@ -73,16 +77,42 @@ func (st *clientState) kill() { } } -// CallRemote calls a runtime-specific key manager via remote EnclaveRPC. -func (c *Client) CallRemote(ctx context.Context, runtimeID common.Namespace, data []byte) ([]byte, error) { - if c.debugClient != nil { - return c.debugClient.CallEnclave(ctx, &enclaverpc.CallEnclaveRequest{ - RuntimeID: runtimeID, - Endpoint: api.EnclaveRPCEndpoint, - Payload: data, - }) +// WaitReady waits for the key manager for the specific runtime to become ready. +func (c *Client) WaitReady(ctx context.Context, runtimeID common.Namespace) error { + sub := func() *pubsub.Subscription { + c.RLock() + defer c.RUnlock() + + if kmID, ok := c.kmMap[runtimeID]; ok && c.state[kmID] != nil { + return nil + } + + return c.readyNotifier.Subscribe() + }() + if sub == nil { + return nil } + defer sub.Close() + typedCh := make(chan common.Namespace) + sub.Unwrap(typedCh) + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case rtID := <-typedCh: + if !rtID.Equal(&runtimeID) { + continue + } + + return nil + } + } +} + +// CallRemote calls a runtime-specific key manager via remote EnclaveRPC. +func (c *Client) CallRemote(ctx context.Context, runtimeID common.Namespace, data []byte) ([]byte, error) { c.logger.Debug("remote query", "id", runtimeID, "data", base64.StdEncoding.EncodeToString(data), @@ -185,6 +215,11 @@ func (c *Client) updateRuntime(rt *registry.Runtime) { case registry.KindCompute: if rt.KeyManager != nil { c.kmMap[rt.ID] = *rt.KeyManager + + // Notify subscribers if a key manager is now available. + if st := c.state[*rt.KeyManager]; st != nil { + c.readyNotifier.Broadcast(rt.ID) + } } c.logger.Debug("set new runtime key manager", "id", rt.ID, @@ -294,6 +329,12 @@ func (c *Client) updateState(status *api.Status, nodeList []*node.Node) { client: enclaverpc.NewTransportClient(conn), resolverCleanupFn: cleanupFn, } + + for k, v := range c.kmMap { + if v.Equal(&status.ID) { + c.readyNotifier.Broadcast(k) + } + } } func (c *Client) updateNodes(nodeList []*node.Node) { @@ -315,16 +356,14 @@ func (c *Client) updateNodes(nodeList []*node.Node) { // New creates a new key manager client instance. func New(backend api.Backend, registryBackend registry.Backend, nodeIdentity *identity.Identity) (*Client, error) { c := &Client{ - logger: logging.GetLogger("keymanager/client"), - nodeIdentity: nodeIdentity, - state: make(map[common.Namespace]*clientState), - kmMap: make(map[common.Namespace]common.Namespace), + logger: logging.GetLogger("keymanager/client"), + nodeIdentity: nodeIdentity, + state: make(map[common.Namespace]*clientState), + kmMap: make(map[common.Namespace]common.Namespace), + backend: backend, + registry: registryBackend, + readyNotifier: pubsub.NewBroker(false), } - - // Standard configuration watches the various backends. - c.backend = backend - c.registry = registryBackend - go c.worker() return c, nil diff --git a/go/oasis-node/cmd/debug/byzantine/byzantine.go b/go/oasis-node/cmd/debug/byzantine/byzantine.go index ab2cf68cf12..32ae6bc56eb 100644 --- a/go/oasis-node/cmd/debug/byzantine/byzantine.go +++ b/go/oasis-node/cmd/debug/byzantine/byzantine.go @@ -14,6 +14,7 @@ import ( "github.com/oasislabs/oasis-core/go/common/node" "github.com/oasislabs/oasis-core/go/common/sgx/ias" "github.com/oasislabs/oasis-core/go/consensus/tendermint" + epochtime "github.com/oasislabs/oasis-core/go/epochtime/api" "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common" "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common/flags" "github.com/oasislabs/oasis-core/go/roothash/api/commitment" @@ -28,6 +29,8 @@ const ( CfgFakeSGX = "fake_sgx" // CfgVersionFakeEnclaveID configures runtime's EnclaveIdentity. CfgVersionFakeEnclaveID = "runtime.version.fake_enclave_id" + // CfgActivationEpoch configures the epoch at which the Byzantine node activates. + CfgActivationEpoch = "activation_epoch" ) var ( @@ -106,6 +109,10 @@ func doComputeHonest(cmd *cobra.Command, args []string) { } }() + if err = epochtimeWaitForEpoch(ht.service, epochtime.EpochTime(viper.GetUint64(CfgActivationEpoch))); err != nil { + panic(fmt.Sprintf("epochtimeWaitForEpoch: %+v", err)) + } + var capabilities *node.Capabilities var rak signature.Signer if viper.GetBool(CfgFakeSGX) { @@ -224,6 +231,10 @@ func doComputeWrong(cmd *cobra.Command, args []string) { } }() + if err = epochtimeWaitForEpoch(ht.service, epochtime.EpochTime(viper.GetUint64(CfgActivationEpoch))); err != nil { + panic(fmt.Sprintf("epochtimeWaitForEpoch: %+v", err)) + } + var capabilities *node.Capabilities var rak signature.Signer if viper.GetBool(CfgFakeSGX) { @@ -342,6 +353,10 @@ func doComputeStraggler(cmd *cobra.Command, args []string) { } }() + if err = epochtimeWaitForEpoch(ht.service, epochtime.EpochTime(viper.GetUint64(CfgActivationEpoch))); err != nil { + panic(fmt.Sprintf("epochtimeWaitForEpoch: %+v", err)) + } + var capabilities *node.Capabilities if viper.GetBool(CfgFakeSGX) { if _, capabilities, err = initFakeCapabilitiesSGX(); err != nil { @@ -405,6 +420,10 @@ func doMergeHonest(cmd *cobra.Command, args []string) { } }() + if err = epochtimeWaitForEpoch(ht.service, epochtime.EpochTime(viper.GetUint64(CfgActivationEpoch))); err != nil { + panic(fmt.Sprintf("epochtimeWaitForEpoch: %+v", err)) + } + if err = registryRegisterNode(ht.service, defaultIdentity, common.DataDir(), fakeAddresses, ph.service.Addresses(), defaultRuntimeID, nil, node.RoleMergeWorker); err != nil { panic(fmt.Sprintf("registryRegisterNode: %+v", err)) } @@ -495,6 +514,10 @@ func doMergeWrong(cmd *cobra.Command, args []string) { } }() + if err = epochtimeWaitForEpoch(ht.service, epochtime.EpochTime(viper.GetUint64(CfgActivationEpoch))); err != nil { + panic(fmt.Sprintf("epochtimeWaitForEpoch: %+v", err)) + } + if err = registryRegisterNode(ht.service, defaultIdentity, common.DataDir(), fakeAddresses, ph.service.Addresses(), defaultRuntimeID, nil, node.RoleMergeWorker); err != nil { panic(fmt.Sprintf("registryRegisterNode: %+v", err)) } @@ -609,6 +632,10 @@ func doMergeStraggler(cmd *cobra.Command, args []string) { } }() + if err = epochtimeWaitForEpoch(ht.service, epochtime.EpochTime(viper.GetUint64(CfgActivationEpoch))); err != nil { + panic(fmt.Sprintf("epochtimeWaitForEpoch: %+v", err)) + } + if err = registryRegisterNode(ht.service, defaultIdentity, common.DataDir(), fakeAddresses, ph.service.Addresses(), defaultRuntimeID, nil, node.RoleMergeWorker); err != nil { panic(fmt.Sprintf("registryRegisterNode: %+v", err)) } @@ -651,7 +678,8 @@ func Register(parentCmd *cobra.Command) { func init() { fs := flag.NewFlagSet("", flag.ContinueOnError) fs.Bool(CfgFakeSGX, false, "register with SGX capability") - fs.String(CfgVersionFakeEnclaveID, "", "Fake runtime enclave identity") + fs.String(CfgVersionFakeEnclaveID, "", "fake runtime enclave identity") + fs.Uint64(CfgActivationEpoch, 0, "epoch at which the Byzantine node should activate") _ = viper.BindPFlags(fs) byzantineCmd.PersistentFlags().AddFlagSet(fs) diff --git a/go/oasis-node/cmd/debug/byzantine/epochtime.go b/go/oasis-node/cmd/debug/byzantine/epochtime.go new file mode 100644 index 00000000000..99e7cba4401 --- /dev/null +++ b/go/oasis-node/cmd/debug/byzantine/epochtime.go @@ -0,0 +1,18 @@ +package byzantine + +import ( + "github.com/oasislabs/oasis-core/go/consensus/tendermint/service" + epochtime "github.com/oasislabs/oasis-core/go/epochtime/api" +) + +func epochtimeWaitForEpoch(svc service.TendermintService, epoch epochtime.EpochTime) error { + ch, sub := svc.EpochTime().WatchEpochs() + defer sub.Close() + + for { + currentEpoch := <-ch + if currentEpoch >= epoch { + return nil + } + } +} diff --git a/go/oasis-test-runner/oasis/args.go b/go/oasis-test-runner/oasis/args.go index 14b8083ee28..a57a6c196c5 100644 --- a/go/oasis-test-runner/oasis/args.go +++ b/go/oasis-test-runner/oasis/args.go @@ -11,6 +11,7 @@ import ( "github.com/oasislabs/oasis-core/go/common/node" "github.com/oasislabs/oasis-core/go/common/sgx" "github.com/oasislabs/oasis-core/go/consensus/tendermint" + epochtime "github.com/oasislabs/oasis-core/go/epochtime/api" "github.com/oasislabs/oasis-core/go/ias" cmdCommon "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common" "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common/flags" @@ -34,6 +35,11 @@ type argBuilder struct { vec []string } +func (args *argBuilder) internalSocketAddress(path string) *argBuilder { + args.vec = append(args.vec, "--"+grpc.CfgAddress, "unix:"+path) + return args +} + func (args *argBuilder) debugDontBlameOasis() *argBuilder { args.vec = append(args.vec, "--"+flags.CfgDebugDontBlameOasis) return args @@ -398,6 +404,11 @@ func (args *argBuilder) byzantineVersionFakeEnclaveID(rt *Runtime) *argBuilder { return args } +func (args *argBuilder) byzantineActivationEpoch(epoch epochtime.EpochTime) *argBuilder { + args.vec = append(args.vec, "--"+byzantine.CfgActivationEpoch, strconv.FormatUint(uint64(epoch), 10)) + return args +} + func newArgBuilder() *argBuilder { return &argBuilder{} } diff --git a/go/oasis-test-runner/oasis/byzantine.go b/go/oasis-test-runner/oasis/byzantine.go index a9f90c057d5..1d9af4d866d 100644 --- a/go/oasis-test-runner/oasis/byzantine.go +++ b/go/oasis-test-runner/oasis/byzantine.go @@ -6,6 +6,7 @@ import ( "github.com/pkg/errors" "github.com/oasislabs/oasis-core/go/common/node" + epochtime "github.com/oasislabs/oasis-core/go/epochtime/api" registry "github.com/oasislabs/oasis-core/go/registry/api" ) @@ -16,8 +17,9 @@ type Byzantine struct { script string entity *Entity - consensusPort uint16 - p2pPort uint16 + consensusPort uint16 + p2pPort uint16 + activationEpoch epochtime.EpochTime } // ByzantineCfg is the Oasis byzantine node configuration. @@ -27,6 +29,8 @@ type ByzantineCfg struct { Script string IdentitySeed string Entity *Entity + + ActivationEpoch epochtime.EpochTime } func (worker *Byzantine) startNode() error { @@ -37,7 +41,8 @@ func (worker *Byzantine) startNode() error { tendermintDebugAddrBookLenient(). workerP2pPort(worker.p2pPort). appendSeedNodes(worker.net). - appendEntity(worker.entity) + appendEntity(worker.entity). + byzantineActivationEpoch(worker.activationEpoch) for _, v := range worker.net.Runtimes() { if v.kind == registry.KindCompute && v.teeHardware == node.TEEHardwareIntelSGX { @@ -104,10 +109,11 @@ func (net *Network) NewByzantine(cfg *ByzantineCfg) (*Byzantine, error) { disableDefaultLogWatcherHandlerFactories: cfg.DisableDefaultLogWatcherHandlerFactories, logWatcherHandlerFactories: cfg.LogWatcherHandlerFactories, }, - script: cfg.Script, - entity: cfg.Entity, - consensusPort: net.nextNodePort, - p2pPort: net.nextNodePort + 1, + script: cfg.Script, + entity: cfg.Entity, + consensusPort: net.nextNodePort, + p2pPort: net.nextNodePort + 1, + activationEpoch: cfg.ActivationEpoch, } worker.doStartNode = worker.startNode diff --git a/go/oasis-test-runner/oasis/cli/cli.go b/go/oasis-test-runner/oasis/cli/cli.go new file mode 100644 index 00000000000..ce0cf5e1019 --- /dev/null +++ b/go/oasis-test-runner/oasis/cli/cli.go @@ -0,0 +1,103 @@ +// Package cli contains helpers for various oasis-node subcommands. +package cli + +import ( + "bytes" + "io" + "os/exec" + "strings" + + "github.com/oasislabs/oasis-core/go/common/logging" + "github.com/oasislabs/oasis-core/go/oasis-test-runner/env" + "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis" +) + +type helpersBase struct { + env *env.Env + net *oasis.Network + + logger *logging.Logger +} + +func (b *helpersBase) runSubCommand(name string, args []string) error { + return RunSubCommand(b.env, b.logger, name, b.net.Config().NodeBinary, args) +} + +// Helpers are the oasis-node cli helpers. +type Helpers struct { + Consensus *ConsensusHelpers + Registry *RegistryHelpers +} + +// New creates new oasis-node cli helpers. +func New(env *env.Env, net *oasis.Network, logger *logging.Logger) *Helpers { + base := &helpersBase{ + env: env, + net: net, + logger: logger, + } + + return &Helpers{ + Consensus: &ConsensusHelpers{base}, + Registry: &RegistryHelpers{base}, + } +} + +// StartSubCommand launches an oasis-node subcommand. +// +// It does not wait for the subcommand to complete. +func StartSubCommand(env *env.Env, logger *logging.Logger, name, binary string, args []string, stdout io.Writer, stderr io.Writer) (*exec.Cmd, error) { + cmd := exec.Command(binary, args...) + cmd.SysProcAttr = oasis.CmdAttrs + cmd.Stdout = stdout + cmd.Stderr = stderr + + logger.Info("launching subcommand", + "binary", binary, + "args", strings.Join(args, " "), + ) + + if err := cmd.Start(); err != nil { + return nil, err + } + return cmd, nil +} + +// RunSubCommand launches an oasis-node subcommand and waits for it to complete. +// +// Stdout and stderr are redirected into a command-specific file. +func RunSubCommand(env *env.Env, logger *logging.Logger, name, binary string, args []string) error { + d, err := env.NewSubDir(name) + if err != nil { + return err + } + + w, err := d.NewLogWriter("command.log") + if err != nil { + return err + } + + cmd, err := StartSubCommand(env, logger, name, binary, args, w, w) + if err != nil { + return err + } + if err = cmd.Wait(); err != nil { + return err + } + return nil +} + +// RunSubCommandWithOutput launches an oasis-node subcommand and waits for it to complete. +// +// Stdout and stderr are redirected into a buffer. +func RunSubCommandWithOutput(env *env.Env, logger *logging.Logger, name, binary string, args []string) (bytes.Buffer, error) { + var b bytes.Buffer + cmd, err := StartSubCommand(env, logger, name, binary, args, &b, &b) + if err != nil { + return b, err + } + if err = cmd.Wait(); err != nil { + return b, err + } + return b, nil +} diff --git a/go/oasis-test-runner/oasis/cli/consensus.go b/go/oasis-test-runner/oasis/cli/consensus.go new file mode 100644 index 00000000000..237c7f883c4 --- /dev/null +++ b/go/oasis-test-runner/oasis/cli/consensus.go @@ -0,0 +1,30 @@ +package cli + +import ( + "fmt" + + "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common" + "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common/consensus" + "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common/grpc" +) + +// ConsensusHelpers contains the oasis-node consensus CLI helpers. +type ConsensusHelpers struct { + *helpersBase +} + +// SubmitTx is a wrapper for "consensus submit_tx" subcommand. +func (c *ConsensusHelpers) SubmitTx(txPath string) error { + c.logger.Info("submitting tx", consensus.CfgTxFile, txPath) + + args := []string{ + "consensus", "submit_tx", + "--" + consensus.CfgTxFile, txPath, + "--" + grpc.CfgAddress, "unix:" + c.net.Validators()[0].SocketPath(), + "--" + common.CfgDebugAllowTestKeys, + } + if err := c.runSubCommand("consensus-submit_tx", args); err != nil { + return fmt.Errorf("failed to submit tx: %w", err) + } + return nil +} diff --git a/go/oasis-test-runner/oasis/cli/registry.go b/go/oasis-test-runner/oasis/cli/registry.go new file mode 100644 index 00000000000..454408a0549 --- /dev/null +++ b/go/oasis-test-runner/oasis/cli/registry.go @@ -0,0 +1,91 @@ +package cli + +import ( + "fmt" + "strconv" + + "github.com/oasislabs/oasis-core/go/common/cbor" + "github.com/oasislabs/oasis-core/go/common/node" + cmdCommon "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common" + "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common/consensus" + "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common/flags" + cmdRegRt "github.com/oasislabs/oasis-core/go/oasis-node/cmd/registry/runtime" + registry "github.com/oasislabs/oasis-core/go/registry/api" +) + +// RegistryHelpers contains the oasis-node registry CLI helpers. +type RegistryHelpers struct { + *helpersBase +} + +// GenerateRegisterRuntimeTx is a wrapper for "registry runtime gen_register" subcommand. +func (r *RegistryHelpers) GenerateRegisterRuntimeTx( + nonce uint64, + runtime registry.Runtime, + txPath, genesisStateFile string, +) error { + r.logger.Info("generating register runtime tx") + + // Generate a runtime register transaction file with debug test entity. + args := []string{ + "registry", "runtime", "gen_register", + "--" + cmdRegRt.CfgID, runtime.ID.String(), + "--" + cmdRegRt.CfgTEEHardware, runtime.TEEHardware.String(), + "--" + cmdRegRt.CfgKind, runtime.Kind.String(), + "--" + cmdRegRt.CfgVersion, runtime.Version.Version.String(), + "--" + consensus.CfgTxNonce, strconv.FormatUint(nonce, 10), + "--" + consensus.CfgTxFile, txPath, + "--" + consensus.CfgTxFeeAmount, strconv.Itoa(0), // TODO: Make fee configurable. + "--" + consensus.CfgTxFeeGas, strconv.Itoa(10), // TODO: Make fee configurable. + "--" + flags.CfgDebugDontBlameOasis, + "--" + cmdCommon.CfgDebugAllowTestKeys, + "--" + flags.CfgDebugTestEntity, + "--" + flags.CfgGenesisFile, r.net.GenesisPath(), + } + + switch runtime.TEEHardware { + case node.TEEHardwareInvalid: + case node.TEEHardwareIntelSGX: + var versionIntelSGX registry.VersionInfoIntelSGX + if err := cbor.Unmarshal(runtime.Version.TEE, &versionIntelSGX); err != nil { + return fmt.Errorf("failed to unmarshal Intel SGX TEE version: %w", err) + } + + for _, e := range versionIntelSGX.Enclaves { + args = append(args, + "--"+cmdRegRt.CfgVersionEnclave, e.String(), + ) + } + default: + return fmt.Errorf("unsupported TEE hardware: %s", runtime.TEEHardware) + } + + if runtime.Kind == registry.KindCompute { + args = append(args, + "--"+cmdRegRt.CfgGenesisState, genesisStateFile, + "--"+cmdRegRt.CfgGenesisRound, strconv.FormatUint(runtime.Genesis.Round, 10), + "--"+cmdRegRt.CfgComputeGroupSize, strconv.FormatUint(runtime.Compute.GroupSize, 10), + "--"+cmdRegRt.CfgComputeGroupBackupSize, strconv.FormatUint(runtime.Compute.GroupBackupSize, 10), + "--"+cmdRegRt.CfgComputeAllowedStragglers, strconv.FormatUint(runtime.Compute.AllowedStragglers, 10), + "--"+cmdRegRt.CfgComputeRoundTimeout, runtime.Compute.RoundTimeout.String(), + "--"+cmdRegRt.CfgMergeGroupSize, strconv.FormatUint(runtime.Merge.GroupSize, 10), + "--"+cmdRegRt.CfgMergeGroupBackupSize, strconv.FormatUint(runtime.Merge.GroupBackupSize, 10), + "--"+cmdRegRt.CfgMergeAllowedStragglers, strconv.FormatUint(runtime.Merge.AllowedStragglers, 10), + "--"+cmdRegRt.CfgMergeRoundTimeout, runtime.Merge.RoundTimeout.String(), + "--"+cmdRegRt.CfgStorageGroupSize, strconv.FormatUint(runtime.Storage.GroupSize, 10), + "--"+cmdRegRt.CfgTxnSchedulerGroupSize, strconv.FormatUint(runtime.TxnScheduler.GroupSize, 10), + "--"+cmdRegRt.CfgTxnSchedulerAlgorithm, runtime.TxnScheduler.Algorithm, + "--"+cmdRegRt.CfgTxnSchedulerBatchFlushTimeout, runtime.TxnScheduler.BatchFlushTimeout.String(), + "--"+cmdRegRt.CfgTxnSchedulerMaxBatchSize, strconv.FormatUint(runtime.TxnScheduler.MaxBatchSize, 10), + "--"+cmdRegRt.CfgTxnSchedulerMaxBatchSizeBytes, strconv.FormatUint(runtime.TxnScheduler.MaxBatchSizeBytes, 10), + ) + } + if runtime.KeyManager != nil { + args = append(args, "--"+cmdRegRt.CfgKeyManager, runtime.KeyManager.String()) + } + if err := r.runSubCommand("registry-runtime-gen_register", args); err != nil { + return fmt.Errorf("failed to generate register runtime tx: %w", err) + } + + return nil +} diff --git a/go/oasis-test-runner/oasis/client.go b/go/oasis-test-runner/oasis/client.go index 9d522dde993..432958ae81d 100644 --- a/go/oasis-test-runner/oasis/client.go +++ b/go/oasis-test-runner/oasis/client.go @@ -47,6 +47,11 @@ func (client *Client) startNode() error { return nil } +// Start starts an Oasis node. +func (client *Client) Start() error { + return client.startNode() +} + // NewClient provisions a new client node and adds it to the network. func (net *Network) NewClient() (*Client, error) { clientName := fmt.Sprintf("client-%d", len(net.clients)) diff --git a/go/oasis-test-runner/oasis/compute.go b/go/oasis-test-runner/oasis/compute.go index 7386d3c6f14..10d2110512a 100644 --- a/go/oasis-test-runner/oasis/compute.go +++ b/go/oasis-test-runner/oasis/compute.go @@ -64,6 +64,11 @@ func (worker *Compute) ExportsPath() string { return nodeExportsPath(worker.dir) } +// Start starts an Oasis node. +func (worker *Compute) Start() error { + return worker.startNode() +} + func (worker *Compute) startNode() error { args := newArgBuilder(). debugDontBlameOasis(). diff --git a/go/oasis-test-runner/oasis/controller.go b/go/oasis-test-runner/oasis/controller.go index b6bec203951..ff837058ed5 100644 --- a/go/oasis-test-runner/oasis/controller.go +++ b/go/oasis-test-runner/oasis/controller.go @@ -6,6 +6,7 @@ import ( cmnGrpc "github.com/oasislabs/oasis-core/go/common/grpc" consensus "github.com/oasislabs/oasis-core/go/consensus/api" control "github.com/oasislabs/oasis-core/go/control/api" + registry "github.com/oasislabs/oasis-core/go/registry/api" runtimeClient "github.com/oasislabs/oasis-core/go/runtime/client/api" staking "github.com/oasislabs/oasis-core/go/staking/api" ) @@ -16,9 +17,17 @@ type Controller struct { control.DebugController control.NodeController - Staking staking.Backend Consensus consensus.ClientBackend + Staking staking.Backend + Registry registry.Backend RuntimeClient runtimeClient.RuntimeClient + + conn *grpc.ClientConn +} + +// Close closes the gRPC connection with the node the controller is controlling. +func (c *Controller) Close() { + c.conn.Close() } // NewController creates a new node controller given the path to @@ -36,8 +45,11 @@ func NewController(socketPath string) (*Controller, error) { return &Controller{ DebugController: control.NewDebugControllerClient(conn), NodeController: control.NewNodeControllerClient(conn), - Staking: staking.NewStakingClient(conn), Consensus: consensus.NewConsensusClient(conn), + Staking: staking.NewStakingClient(conn), + Registry: registry.NewRegistryClient(conn), RuntimeClient: runtimeClient.NewRuntimeClient(conn), + + conn: conn, }, nil } diff --git a/go/oasis-test-runner/oasis/fixture.go b/go/oasis-test-runner/oasis/fixture.go index dddfec595e7..dc292cbcf8d 100644 --- a/go/oasis-test-runner/oasis/fixture.go +++ b/go/oasis-test-runner/oasis/fixture.go @@ -7,6 +7,7 @@ import ( "github.com/oasislabs/oasis-core/go/common/node" "github.com/oasislabs/oasis-core/go/common/sgx" "github.com/oasislabs/oasis-core/go/common/sgx/ias" + epochtime "github.com/oasislabs/oasis-core/go/epochtime/api" "github.com/oasislabs/oasis-core/go/oasis-test-runner/env" "github.com/oasislabs/oasis-core/go/oasis-test-runner/log" registry "github.com/oasislabs/oasis-core/go/registry/api" @@ -150,7 +151,7 @@ func (f *ValidatorFixture) Create(net *Network) (*Validator, error) { } // RuntimeFixture is a runtime fixture. -type RuntimeFixture struct { +type RuntimeFixture struct { // nolint: maligned ID common.Namespace `json:"id"` Kind registry.RuntimeKind `json:"kind"` Entity int `json:"entity"` @@ -166,6 +167,8 @@ type RuntimeFixture struct { Storage registry.StorageParameters `json:"storage"` Pruner RuntimePrunerCfg `json:"pruner,omitempty"` + + ExcludeFromGenesis bool `json:"exclude_from_genesis,omitempty"` } // Create instantiates the runtime described by the fixture. @@ -188,20 +191,21 @@ func (f *RuntimeFixture) Create(netFixture *NetworkFixture, net *Network) (*Runt } return net.NewRuntime(&RuntimeCfg{ - ID: f.ID, - Kind: f.Kind, - Entity: entity, - Keymanager: km, - TEEHardware: netFixture.TEE.Hardware, - MrSigner: netFixture.TEE.MrSigner, - Compute: f.Compute, - Merge: f.Merge, - TxnScheduler: f.TxnScheduler, - Storage: f.Storage, - Binary: f.Binary, - GenesisState: f.GenesisState, - GenesisRound: f.GenesisRound, - Pruner: f.Pruner, + ID: f.ID, + Kind: f.Kind, + Entity: entity, + Keymanager: km, + TEEHardware: netFixture.TEE.Hardware, + MrSigner: netFixture.TEE.MrSigner, + Compute: f.Compute, + Merge: f.Merge, + TxnScheduler: f.TxnScheduler, + Storage: f.Storage, + Binary: f.Binary, + GenesisState: f.GenesisState, + GenesisRound: f.GenesisRound, + Pruner: f.Pruner, + ExcludeFromGenesis: f.ExcludeFromGenesis, }) } @@ -237,10 +241,12 @@ func (f *KeymanagerFixture) Create(net *Network) (*Keymanager, error) { } // StorageWorkerFixture is a storage worker fixture. -type StorageWorkerFixture struct { +type StorageWorkerFixture struct { // nolint: maligned Backend string `json:"backend"` Entity int `json:"entity"` + Restartable bool `json:"restartable"` + LogWatcherHandlerFactories []log.WatcherHandlerFactory `json:"-"` IgnoreApplies bool `json:"ignore_applies,omitempty"` @@ -255,6 +261,7 @@ func (f *StorageWorkerFixture) Create(net *Network) (*Storage, error) { return net.NewStorage(&StorageCfg{ NodeCfg: NodeCfg{ + Restartable: f.Restartable, LogWatcherHandlerFactories: f.LogWatcherHandlerFactories, }, Backend: f.Backend, @@ -323,6 +330,8 @@ type ByzantineFixture struct { IdentitySeed string `json:"identity_seed"` Entity int `json:"entity"` + ActivationEpoch epochtime.EpochTime `json:"activation_epoch"` + EnableDefaultLogWatcherHandlerFactories bool `json:"enable_default_log_fac"` LogWatcherHandlerFactories []log.WatcherHandlerFactory `json:"-"` } @@ -339,9 +348,10 @@ func (f *ByzantineFixture) Create(net *Network) (*Byzantine, error) { DisableDefaultLogWatcherHandlerFactories: !f.EnableDefaultLogWatcherHandlerFactories, LogWatcherHandlerFactories: f.LogWatcherHandlerFactories, }, - Script: f.Script, - IdentitySeed: f.IdentitySeed, - Entity: entity, + Script: f.Script, + IdentitySeed: f.IdentitySeed, + Entity: entity, + ActivationEpoch: f.ActivationEpoch, }) } diff --git a/go/oasis-test-runner/oasis/ias.go b/go/oasis-test-runner/oasis/ias.go index 39b293ce056..cea1b605064 100644 --- a/go/oasis-test-runner/oasis/ias.go +++ b/go/oasis-test-runner/oasis/ias.go @@ -16,7 +16,8 @@ var mockSPID []byte type iasProxy struct { Node - grpcPort uint16 + useRegistry bool + grpcPort uint16 } func (ias *iasProxy) tlsCertPath() string { @@ -29,9 +30,13 @@ func (ias *iasProxy) startNode() error { debugDontBlameOasis(). debugAllowTestKeys(). grpcServerPort(ias.grpcPort). - iasUseGenesis(). iasDebugMock(). iasSPID(mockSPID) + if ias.useRegistry { + args = args.internalSocketAddress(ias.net.validators[0].SocketPath()) + } else { + args = args.iasUseGenesis() + } var err error if ias.cmd, ias.exitCh, err = ias.net.startOasisNode( @@ -79,7 +84,8 @@ func (net *Network) newIASProxy() (*iasProxy, error) { net: net, dir: iasDir, }, - grpcPort: net.nextNodePort, + useRegistry: net.cfg.IASUseRegistry, + grpcPort: net.nextNodePort, } net.iasProxy.doStartNode = net.iasProxy.startNode diff --git a/go/oasis-test-runner/oasis/keymanager.go b/go/oasis-test-runner/oasis/keymanager.go index 39c4390b0e7..fd38a4ee0df 100644 --- a/go/oasis-test-runner/oasis/keymanager.go +++ b/go/oasis-test-runner/oasis/keymanager.go @@ -59,11 +59,16 @@ func (km *Keymanager) TLSCertPath() string { return nodeTLSCertPath(km.dir) } -// Exports path returns the path to the node's exports data dir. +// ExportsPath returns the path to the node's exports data dir. func (km *Keymanager) ExportsPath() string { return nodeExportsPath(km.dir) } +// Start starts an Oasis node. +func (km *Keymanager) Start() error { + return km.startNode() +} + func (km *Keymanager) provisionGenesis() error { // Provision status and policy. We can only provision this here as we need // a list of runtimes allowed to query the key manager. diff --git a/go/oasis-test-runner/oasis/oasis.go b/go/oasis-test-runner/oasis/oasis.go index 78e37e95bad..104045236f3 100644 --- a/go/oasis-test-runner/oasis/oasis.go +++ b/go/oasis-test-runner/oasis/oasis.go @@ -98,6 +98,11 @@ func (n *Node) stopNode() error { return nil } +// Stop stops the node. +func (n *Node) Stop() error { + return n.stopNode() +} + // Restart kills the node, waits for it to stop, and starts it again. func (n *Node) Restart() error { if err := n.stopNode(); err != nil { @@ -179,7 +184,9 @@ type NetworkCfg struct { // nolint: maligned // DeterministicIdentities is the deterministic identities flag. DeterministicIdentities bool `json:"deterministic_identities"` - // XXX: Config for IAS proxy + // IASUseRegistry specifies whether the IAS proxy should use the registry instead of the + // genesis document for authenticating runtime IDs. + IASUseRegistry bool `json:"ias_use_registry,omitempty"` // StakingGenesis is the name of a file with a staking genesis document to use if GenesisFile isn't set. StakingGenesis string `json:"staking_genesis"` @@ -259,8 +266,13 @@ func (net *Network) ClientController() *Controller { // NumRegisterNodes returns the number of all nodes that need to register. func (net *Network) NumRegisterNodes() int { + var keyManagers int + if net.keymanager != nil { + keyManagers = 1 + } + return len(net.validators) + - 1 + // Key manager. + keyManagers + len(net.storageWorkers) + len(net.computeWorkers) + len(net.byzantine) diff --git a/go/oasis-test-runner/oasis/runtime.go b/go/oasis-test-runner/oasis/runtime.go index e7411ca0a65..cb12cf058f4 100644 --- a/go/oasis-test-runner/oasis/runtime.go +++ b/go/oasis-test-runner/oasis/runtime.go @@ -9,6 +9,7 @@ import ( "github.com/pkg/errors" "github.com/oasislabs/oasis-core/go/common" + "github.com/oasislabs/oasis-core/go/common/cbor" "github.com/oasislabs/oasis-core/go/common/node" "github.com/oasislabs/oasis-core/go/common/sgx" cmdCommon "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common" @@ -32,6 +33,9 @@ type Runtime struct { // nolint: maligned mrSigner *sgx.MrSigner pruner RuntimePrunerCfg + + excludeFromGenesis bool + descriptor registry.Runtime } // RuntimeCfg is the Oasis runtime provisioning configuration. @@ -53,6 +57,8 @@ type RuntimeCfg struct { // nolint: maligned Storage registry.StorageParameters Pruner RuntimePrunerCfg + + ExcludeFromGenesis bool } // RuntimePrunerCfg is the pruner configuration for an Oasis runtime. @@ -69,13 +75,32 @@ func (rt *Runtime) ID() common.Namespace { } func (rt *Runtime) toGenesisArgs() []string { + if rt.excludeFromGenesis { + return []string{} + } + return []string{ "--runtime", filepath.Join(rt.dir.String(), rtDescriptorFile), } } +// ToRuntimeDescriptor returns a registry runtime descriptor for this runtime. +func (rt *Runtime) ToRuntimeDescriptor() registry.Runtime { + return rt.descriptor +} + // NewRuntime provisions a new runtime and adds it to the network. func (net *Network) NewRuntime(cfg *RuntimeCfg) (*Runtime, error) { + descriptor := registry.Runtime{ + ID: cfg.ID, + Kind: cfg.Kind, + TEEHardware: cfg.TEEHardware, + Compute: cfg.Compute, + Merge: cfg.Merge, + TxnScheduler: cfg.TxnScheduler, + Storage: cfg.Storage, + } + rtDir, err := net.baseDir.NewSubDir("runtime-" + cfg.ID.String()) if err != nil { net.logger.Error("failed to create runtime subdir", @@ -89,7 +114,6 @@ func (net *Network) NewRuntime(cfg *RuntimeCfg) (*Runtime, error) { "--" + cmdCommon.CfgDataDir, rtDir.String(), "--" + cmdRegRt.CfgID, cfg.ID.String(), "--" + cmdRegRt.CfgKind, cfg.Kind.String(), - "--" + cmdRegRt.CfgGenesisRound, strconv.FormatUint(cfg.GenesisRound, 10), } if cfg.Kind == registry.KindCompute { args = append(args, []string{ @@ -110,7 +134,13 @@ func (net *Network) NewRuntime(cfg *RuntimeCfg) (*Runtime, error) { }...) if cfg.GenesisState != "" { - args = append(args, "--"+cmdRegRt.CfgGenesisState, cfg.GenesisState) + args = append(args, + "--"+cmdRegRt.CfgGenesisRound, strconv.FormatUint(cfg.GenesisRound, 10), + "--"+cmdRegRt.CfgGenesisState, cfg.GenesisState, + ) + + descriptor.Genesis.Round = cfg.GenesisRound + // TODO: Support genesis state. } } var mrEnclave *sgx.MrEnclave @@ -123,11 +153,20 @@ func (net *Network) NewRuntime(cfg *RuntimeCfg) (*Runtime, error) { "--" + cmdRegRt.CfgTEEHardware, cfg.TEEHardware.String(), "--" + cmdRegRt.CfgVersionEnclave, mrEnclave.String() + cfg.MrSigner.String(), }...) + + descriptor.Version.TEE = cbor.Marshal(registry.VersionInfoIntelSGX{ + Enclaves: []sgx.EnclaveIdentity{ + {MrEnclave: *mrEnclave, MrSigner: *cfg.MrSigner}, + }, + }) } if cfg.Keymanager != nil { args = append(args, []string{ "--" + cmdRegRt.CfgKeyManager, cfg.Keymanager.id.String(), }...) + + descriptor.KeyManager = new(common.Namespace) + *descriptor.KeyManager = cfg.Keymanager.id } args = append(args, cfg.Entity.toGenesisArgs()...) @@ -145,14 +184,16 @@ func (net *Network) NewRuntime(cfg *RuntimeCfg) (*Runtime, error) { } rt := &Runtime{ - dir: rtDir, - id: cfg.ID, - kind: cfg.Kind, - binary: cfg.Binary, - teeHardware: cfg.TEEHardware, - mrEnclave: mrEnclave, - mrSigner: cfg.MrSigner, - pruner: cfg.Pruner, + dir: rtDir, + id: cfg.ID, + kind: cfg.Kind, + binary: cfg.Binary, + teeHardware: cfg.TEEHardware, + mrEnclave: mrEnclave, + mrSigner: cfg.MrSigner, + pruner: cfg.Pruner, + excludeFromGenesis: cfg.ExcludeFromGenesis, + descriptor: descriptor, } net.runtimes = append(net.runtimes, rt) diff --git a/go/oasis-test-runner/oasis/storage.go b/go/oasis-test-runner/oasis/storage.go index 10a5d2637e0..f3c8ebbcd3d 100644 --- a/go/oasis-test-runner/oasis/storage.go +++ b/go/oasis-test-runner/oasis/storage.go @@ -67,6 +67,11 @@ func (worker *Storage) DatabasePath() string { return filepath.Join(worker.dir.String(), database.DefaultFileName(worker.backend)) } +// Start starts an Oasis node. +func (worker *Storage) Start() error { + return worker.startNode() +} + func (worker *Storage) startNode() error { args := newArgBuilder(). debugDontBlameOasis(). diff --git a/go/oasis-test-runner/oasis/validator.go b/go/oasis-test-runner/oasis/validator.go index 6d3cddcbc27..0a0cdc608f0 100644 --- a/go/oasis-test-runner/oasis/validator.go +++ b/go/oasis-test-runner/oasis/validator.go @@ -68,6 +68,11 @@ func (val *Validator) ExportsPath() string { return nodeExportsPath(val.dir) } +// Start starts an Oasis node. +func (val *Validator) Start() error { + return val.startNode() +} + func (val *Validator) startNode() error { args := newArgBuilder(). debugDontBlameOasis(). diff --git a/go/oasis-test-runner/scenario/e2e/basic.go b/go/oasis-test-runner/scenario/e2e/basic.go index 45a59c6447f..19cb1afade1 100644 --- a/go/oasis-test-runner/scenario/e2e/basic.go +++ b/go/oasis-test-runner/scenario/e2e/basic.go @@ -1,11 +1,15 @@ package e2e import ( + "context" + "fmt" "os/exec" "time" "github.com/spf13/viper" + "github.com/oasislabs/oasis-core/go/common/cbor" + "github.com/oasislabs/oasis-core/go/common/logging" "github.com/oasislabs/oasis-core/go/common/node" "github.com/oasislabs/oasis-core/go/common/sgx" "github.com/oasislabs/oasis-core/go/common/sgx/ias" @@ -14,22 +18,19 @@ import ( "github.com/oasislabs/oasis-core/go/oasis-test-runner/env" "github.com/oasislabs/oasis-core/go/oasis-test-runner/log" "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis" + "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis/cli" "github.com/oasislabs/oasis-core/go/oasis-test-runner/scenario" registry "github.com/oasislabs/oasis-core/go/registry/api" + runtimeClient "github.com/oasislabs/oasis-core/go/runtime/client/api" + runtimeTransaction "github.com/oasislabs/oasis-core/go/runtime/transaction" "github.com/oasislabs/oasis-core/go/storage/database" ) var ( // Basic is the basic network + client test case. - Basic scenario.Scenario = &basicImpl{ - name: "basic", - clientBinary: "simple-keyvalue-client", - } + Basic scenario.Scenario = newBasicImpl("basic", "simple-keyvalue-client", nil) // BasicEncryption is the basic network + client with encryption test case. - BasicEncryption scenario.Scenario = &basicImpl{ - name: "basic_encryption", - clientBinary: "simple-keyvalue-enc-client", - } + BasicEncryption scenario.Scenario = newBasicImpl("basic-encryption", "simple-keyvalue-enc-client", nil) // DefaultBasicLogWatcherHandlerFactories is a list of default log watcher // handler factories for the basic scenario. @@ -41,12 +42,23 @@ var ( } ) +func newBasicImpl(name, clientBinary string, clientArgs []string) *basicImpl { + return &basicImpl{ + name: name, + clientBinary: clientBinary, + clientArgs: clientArgs, + logger: logging.GetLogger("scenario/e2e/" + name), + } +} + type basicImpl struct { net *oasis.Network name string clientBinary string clientArgs []string + + logger *logging.Logger } func (sc *basicImpl) Name() string { @@ -176,7 +188,7 @@ func (sc *basicImpl) cleanTendermintStorage(childEnv *env.Env) error { "--" + common.CfgDataDir, dataDir, }, cleanArgs...) - return runSubCommand(childEnv, "unsafe-reset", sc.net.Config().NodeBinary, args) + return cli.RunSubCommand(childEnv, logger, "unsafe-reset", sc.net.Config().NodeBinary, args) } for _, val := range sc.net.Validators() { @@ -242,3 +254,117 @@ func (sc *basicImpl) Run(childEnv *env.Env) error { return sc.wait(childEnv, cmd, clientErrCh) } + +func (sc *basicImpl) submitRuntimeTx(ctx context.Context, key, value string) error { + c := sc.net.ClientController().RuntimeClient + + // Submit a transaction and check the result. + var rsp runtimeTransaction.TxnOutput + rawRsp, err := c.SubmitTx(ctx, &runtimeClient.SubmitTxRequest{ + RuntimeID: runtimeID, + Data: cbor.Marshal(&runtimeTransaction.TxnCall{ + Method: "insert", + Args: struct { + Key string `json:"key"` + Value string `json:"value"` + }{ + Key: key, + Value: value, + }, + }), + }) + if err != nil { + return fmt.Errorf("failed to submit runtime tx: %w", err) + } + if err = cbor.Unmarshal(rawRsp, &rsp); err != nil { + return fmt.Errorf("malformed tx output from runtime: %w", err) + } + if rsp.Error != nil { + return fmt.Errorf("runtime tx failed: %s", *rsp.Error) + } + return nil +} + +func (sc *basicImpl) waitNodesSynced() error { + ctx := context.Background() + + checkSynced := func(n *oasis.Node) error { + c, err := oasis.NewController(n.SocketPath()) + if err != nil { + return fmt.Errorf("failed to create node controller: %w", err) + } + defer c.Close() + + if err = c.WaitSync(ctx); err != nil { + return fmt.Errorf("failed to wait for node to sync: %w", err) + } + return nil + } + + sc.logger.Info("waiting for all nodes to be synced") + + for _, n := range sc.net.Validators() { + if err := checkSynced(&n.Node); err != nil { + return err + } + } + for _, n := range sc.net.StorageWorkers() { + if err := checkSynced(&n.Node); err != nil { + return err + } + } + for _, n := range sc.net.ComputeWorkers() { + if err := checkSynced(&n.Node); err != nil { + return err + } + } + for _, n := range sc.net.Clients() { + if err := checkSynced(&n.Node); err != nil { + return err + } + } + + sc.logger.Info("nodes synced") + return nil +} + +func (sc *basicImpl) initialEpochTransitions() error { + ctx := context.Background() + + if sc.net.Keymanager() != nil { + // First wait for validator and key manager nodes to register. Then + // perform an epoch transition which will cause the compute nodes to + // register. + numNodes := len(sc.net.Validators()) + len(sc.net.StorageWorkers()) + 1 + sc.logger.Info("waiting for (some) nodes to register", + "num_nodes", numNodes, + ) + + if err := sc.net.Controller().WaitNodesRegistered(ctx, numNodes); err != nil { + return fmt.Errorf("failed to wait for nodes: %w", err) + } + + sc.logger.Info("triggering epoch transition") + if err := sc.net.Controller().SetEpoch(ctx, 1); err != nil { + return fmt.Errorf("failed to set epoch: %w", err) + } + sc.logger.Info("epoch transition done") + } + + // Wait for all nodes to register. + sc.logger.Info("waiting for (all) nodes to register", + "num_nodes", sc.net.NumRegisterNodes(), + ) + + if err := sc.net.Controller().WaitNodesRegistered(ctx, sc.net.NumRegisterNodes()); err != nil { + return fmt.Errorf("failed to wait for nodes: %w", err) + } + + // Then perform another epoch transition to elect the committees. + sc.logger.Info("triggering epoch transition") + if err := sc.net.Controller().SetEpoch(ctx, 2); err != nil { + return fmt.Errorf("failed to set epoch: %w", err) + } + sc.logger.Info("epoch transition done") + return nil +} diff --git a/go/oasis-test-runner/scenario/e2e/byzantine.go b/go/oasis-test-runner/scenario/e2e/byzantine.go index 7e4ffba2f9e..0075f153e7b 100644 --- a/go/oasis-test-runner/scenario/e2e/byzantine.go +++ b/go/oasis-test-runner/scenario/e2e/byzantine.go @@ -1,10 +1,6 @@ package e2e import ( - "context" - "fmt" - - "github.com/oasislabs/oasis-core/go/common/logging" "github.com/oasislabs/oasis-core/go/oasis-test-runner/env" "github.com/oasislabs/oasis-core/go/oasis-test-runner/log" "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis" @@ -13,7 +9,7 @@ import ( // TODO: Consider referencing script names directly from the Byzantine node. -const byzantineDefaultIdentitySeed = "ekiden byzantine node worker" +const byzantineDefaultIdentitySeed = "ekiden byzantine node worker, luck=1" var ( // ByzantineComputeHonest is the byzantine compute honest scenario. @@ -57,26 +53,19 @@ type byzantineImpl struct { script string identitySeed string logWatcherHandlerFactories []log.WatcherHandlerFactory - - logger *logging.Logger } func newByzantineImpl(script string, logWatcherHandlerFactories []log.WatcherHandlerFactory) scenario.Scenario { - sc := &byzantineImpl{ - basicImpl: basicImpl{ - clientBinary: "simple-keyvalue-ops-client", - clientArgs: []string{"set", "hello_key", "hello_value"}, - }, + return &byzantineImpl{ + basicImpl: *newBasicImpl( + "byzantine/"+script, + "simple-keyvalue-ops-client", + []string{"set", "hello_key", "hello_value"}, + ), script: script, identitySeed: byzantineDefaultIdentitySeed, logWatcherHandlerFactories: logWatcherHandlerFactories, - logger: logging.GetLogger("scenario/e2e/byzantine/" + script), } - return sc -} - -func (sc *byzantineImpl) Name() string { - return "byzantine/" + sc.script } func (sc *byzantineImpl) Fixture() (*oasis.NetworkFixture, error) { @@ -97,9 +86,10 @@ func (sc *byzantineImpl) Fixture() (*oasis.NetworkFixture, error) { // Provision a Byzantine node. f.ByzantineNodes = []oasis.ByzantineFixture{ oasis.ByzantineFixture{ - Script: sc.script, - IdentitySeed: sc.identitySeed, - Entity: 1, + Script: sc.script, + IdentitySeed: sc.identitySeed, + Entity: 1, + ActivationEpoch: 1, }, } return f, nil @@ -111,23 +101,9 @@ func (sc *byzantineImpl) Run(childEnv *env.Env) error { return err } - // Wait for the nodes to register and then perform an epoch transition - // as the byzantine node cannot handle intermediate epochs in which it - // is not elected. - sc.logger.Info("waiting for nodes to register", - "num_nodes", sc.net.NumRegisterNodes(), - ) - - ctx := context.Background() - if err = sc.net.Controller().WaitNodesRegistered(ctx, sc.net.NumRegisterNodes()); err != nil { - return fmt.Errorf("failed to wait for nodes: %w", err) - } - - sc.logger.Info("triggering epoch transition") - if err = sc.net.Controller().SetEpoch(ctx, 1); err != nil { - return fmt.Errorf("failed to set epoch: %w", err) + if err = sc.initialEpochTransitions(); err != nil { + return err } - sc.logger.Info("epoch transition done") return sc.wait(childEnv, cmd, clientErrCh) } diff --git a/go/oasis-test-runner/scenario/e2e/common.go b/go/oasis-test-runner/scenario/e2e/common.go index b31b4df0362..89bc6b41651 100644 --- a/go/oasis-test-runner/scenario/e2e/common.go +++ b/go/oasis-test-runner/scenario/e2e/common.go @@ -2,9 +2,7 @@ package e2e import ( - "bytes" "fmt" - "io" "math" "os/exec" "path/filepath" @@ -105,56 +103,6 @@ func startClient(env *env.Env, net *oasis.Network, binary string, clientArgs []s return cmd, nil } -func startSubCommand(env *env.Env, name, binary string, args []string, stdout io.Writer, stderr io.Writer) (*exec.Cmd, error) { - cmd := exec.Command(binary, args...) - cmd.SysProcAttr = oasis.CmdAttrs - cmd.Stdout = stdout - cmd.Stderr = stderr - - logger.Info("launching subcommand", - "binary", binary, - "args", strings.Join(args, " "), - ) - - if err := cmd.Start(); err != nil { - return nil, err - } - return cmd, nil -} - -func runSubCommand(env *env.Env, name, binary string, args []string) error { - d, err := env.NewSubDir(name) - if err != nil { - return err - } - - w, err := d.NewLogWriter("command.log") - if err != nil { - return err - } - - cmd, err := startSubCommand(env, name, binary, args, w, w) - if err != nil { - return err - } - if err = cmd.Wait(); err != nil { - return err - } - return nil -} - -func runSubCommandWithOutput(env *env.Env, name, binary string, args []string) (bytes.Buffer, error) { - var b bytes.Buffer - cmd, err := startSubCommand(env, name, binary, args, &b, &b) - if err != nil { - return b, err - } - if err = cmd.Wait(); err != nil { - return b, err - } - return b, nil -} - func init() { Flags.String(cfgNodeBinary, "oasis-node", "path to the node binary") Flags.String(cfgClientBinaryDir, "", "path to the client binaries directory") diff --git a/go/oasis-test-runner/scenario/e2e/consensus_cli.go b/go/oasis-test-runner/scenario/e2e/consensus_cli.go deleted file mode 100644 index 809ae84e75b..00000000000 --- a/go/oasis-test-runner/scenario/e2e/consensus_cli.go +++ /dev/null @@ -1,26 +0,0 @@ -package e2e - -import ( - "fmt" - - "github.com/oasislabs/oasis-core/go/common/logging" - "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common" - "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common/consensus" - "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common/grpc" - "github.com/oasislabs/oasis-core/go/oasis-test-runner/env" -) - -// submitTx is a wrapper for consensus submit_tx command. -func submitTx(childEnv *env.Env, txPath string, logger *logging.Logger, socketPath string, nodeBinary string) error { - logger.Info("submitting tx", consensus.CfgTxFile, txPath) - args := []string{ - "consensus", "submit_tx", - "--" + consensus.CfgTxFile, txPath, - "--" + grpc.CfgAddress, "unix:" + socketPath, - "--" + common.CfgDebugAllowTestKeys, - } - if err := runSubCommand(childEnv, "submit", nodeBinary, args); err != nil { - return fmt.Errorf("failed to submit tx: %w", err) - } - return nil -} diff --git a/go/oasis-test-runner/scenario/e2e/dump_restore.go b/go/oasis-test-runner/scenario/e2e/dump_restore.go index 1d81beb7685..ca2c58dae6b 100644 --- a/go/oasis-test-runner/scenario/e2e/dump_restore.go +++ b/go/oasis-test-runner/scenario/e2e/dump_restore.go @@ -4,8 +4,8 @@ import ( "fmt" "path/filepath" - "github.com/oasislabs/oasis-core/go/common/logging" "github.com/oasislabs/oasis-core/go/oasis-test-runner/env" + "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis/cli" "github.com/oasislabs/oasis-core/go/oasis-test-runner/scenario" ) @@ -16,25 +16,19 @@ var ( type dumpRestoreImpl struct { basicImpl - - logger *logging.Logger } func newDumpRestoreImpl() scenario.Scenario { sc := &dumpRestoreImpl{ - basicImpl: basicImpl{ - clientBinary: "test-long-term-client", - clientArgs: []string{"--mode", "part1"}, - }, - logger: logging.GetLogger("scenario/e2e/dump_restore"), + basicImpl: *newBasicImpl( + "dump-restore", + "test-long-term-client", + []string{"--mode", "part1"}, + ), } return sc } -func (sc *dumpRestoreImpl) Name() string { - return "dump-restore" -} - func (sc *dumpRestoreImpl) Run(childEnv *env.Env) error { clientErrCh, cmd, err := sc.basicImpl.start(childEnv) if err != nil { @@ -61,7 +55,7 @@ func (sc *dumpRestoreImpl) Run(childEnv *env.Env) error { "--genesis.file", dumpPath, "--address", "unix:" + sc.basicImpl.net.Validators()[0].SocketPath(), } - if err = runSubCommand(childEnv, "genesis-dump", sc.basicImpl.net.Config().NodeBinary, args); err != nil { + if err = cli.RunSubCommand(childEnv, sc.logger, "genesis-dump", sc.basicImpl.net.Config().NodeBinary, args); err != nil { return fmt.Errorf("scenario/e2e/dump_restore: failed to dump state: %w", err) } @@ -78,7 +72,7 @@ func (sc *dumpRestoreImpl) Run(childEnv *env.Env) error { "--debug.dont_blame_oasis", "--debug.allow_test_keys", } - if err = runSubCommand(childEnv, "storage-dump", sc.basicImpl.net.Config().NodeBinary, args); err != nil { + if err = cli.RunSubCommand(childEnv, sc.logger, "storage-dump", sc.basicImpl.net.Config().NodeBinary, args); err != nil { return fmt.Errorf("scenario/e2e/dump_restore: failed to dump storage: %w", err) } diff --git a/go/oasis-test-runner/scenario/e2e/halt_restore.go b/go/oasis-test-runner/scenario/e2e/halt_restore.go index dc2ffd3972f..000df4bebe4 100644 --- a/go/oasis-test-runner/scenario/e2e/halt_restore.go +++ b/go/oasis-test-runner/scenario/e2e/halt_restore.go @@ -11,7 +11,6 @@ import ( "path/filepath" "reflect" - "github.com/oasislabs/oasis-core/go/common/logging" genesis "github.com/oasislabs/oasis-core/go/genesis/file" "github.com/oasislabs/oasis-core/go/oasis-test-runner/env" "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis" @@ -29,19 +28,16 @@ const haltEpoch = 3 type haltRestoreImpl struct { basicImpl - - logger *logging.Logger } func newHaltRestoreImpl() scenario.Scenario { - sc := &haltRestoreImpl{ - basicImpl: basicImpl{ - clientBinary: "test-long-term-client", - clientArgs: []string{"--mode", "part1"}, - }, - logger: logging.GetLogger("scenario/e2e/halt_restore"), - } - return sc + return &haltRestoreImpl{ + basicImpl: *newBasicImpl( + "halt-restore", + "test-long-term-client", + []string{"--mode", "part1"}, + ), + } } func (sc *haltRestoreImpl) Fixture() (*oasis.NetworkFixture, error) { @@ -56,10 +52,6 @@ func (sc *haltRestoreImpl) Fixture() (*oasis.NetworkFixture, error) { return f, nil } -func (sc *haltRestoreImpl) Name() string { - return "halt-restore" -} - func (sc *haltRestoreImpl) getExportedGenesisFiles() ([]string, error) { // Gather all nodes. var nodes []interface { diff --git a/go/oasis-test-runner/scenario/e2e/identity_cli.go b/go/oasis-test-runner/scenario/e2e/identity_cli.go index 5c96cd199ec..7432304759c 100644 --- a/go/oasis-test-runner/scenario/e2e/identity_cli.go +++ b/go/oasis-test-runner/scenario/e2e/identity_cli.go @@ -12,6 +12,7 @@ import ( "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common" "github.com/oasislabs/oasis-core/go/oasis-test-runner/env" "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis" + "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis/cli" "github.com/oasislabs/oasis-core/go/oasis-test-runner/scenario" ) @@ -29,38 +30,38 @@ type identityCLIImpl struct { logger *logging.Logger } -func (i *identityCLIImpl) Name() string { +func (ident *identityCLIImpl) Name() string { return "identity-cli" } -func (i *identityCLIImpl) Init(childEnv *env.Env, net *oasis.Network) error { - i.nodeBinary = viper.GetString(cfgNodeBinary) +func (ident *identityCLIImpl) Init(childEnv *env.Env, net *oasis.Network) error { + ident.nodeBinary = viper.GetString(cfgNodeBinary) dataDir, err := childEnv.NewSubDir("test-identity") if err != nil { return fmt.Errorf("scenario/e2e/identity_cli: init failed to create subdir: %w", err) } - i.dataDir = dataDir.String() + ident.dataDir = dataDir.String() return nil } -func (i *identityCLIImpl) Fixture() (*oasis.NetworkFixture, error) { +func (ident *identityCLIImpl) Fixture() (*oasis.NetworkFixture, error) { return nil, nil } -func (i *identityCLIImpl) Run(childEnv *env.Env) error { +func (ident *identityCLIImpl) Run(childEnv *env.Env) error { args := []string{ "identity", "init", - "--" + common.CfgDataDir, i.dataDir, + "--" + common.CfgDataDir, ident.dataDir, } - if err := runSubCommand(childEnv, "identity-init", i.nodeBinary, args); err != nil { + if err := cli.RunSubCommand(childEnv, ident.logger, "identity-init", ident.nodeBinary, args); err != nil { return fmt.Errorf("scenario/e2e/identity_cli: failed provision node identity: %w", err) } // Load created identity. - factory := fileSigner.NewFactory(i.dataDir, signature.SignerNode, signature.SignerP2P, signature.SignerConsensus) - if _, err := identity.Load(i.dataDir, factory); err != nil { + factory := fileSigner.NewFactory(ident.dataDir, signature.SignerNode, signature.SignerP2P, signature.SignerConsensus) + if _, err := identity.Load(ident.dataDir, factory); err != nil { return fmt.Errorf("scenario/e2e/identity_cli: failed to load node initialized identity: %w", err) } diff --git a/go/oasis-test-runner/scenario/e2e/keymanager_restart.go b/go/oasis-test-runner/scenario/e2e/keymanager_restart.go index 372011531fb..a661d7d9ad8 100644 --- a/go/oasis-test-runner/scenario/e2e/keymanager_restart.go +++ b/go/oasis-test-runner/scenario/e2e/keymanager_restart.go @@ -3,7 +3,6 @@ package e2e import ( "context" - "github.com/oasislabs/oasis-core/go/common/logging" "github.com/oasislabs/oasis-core/go/oasis-test-runner/env" "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis" "github.com/oasislabs/oasis-core/go/oasis-test-runner/scenario" @@ -16,23 +15,16 @@ var ( type kmRestartImpl struct { basicImpl - - logger *logging.Logger } func newKmRestartImpl() scenario.Scenario { - sc := &kmRestartImpl{ - basicImpl: basicImpl{ - clientBinary: "simple-keyvalue-enc-client", - clientArgs: []string{"--key", "key1"}, - }, - logger: logging.GetLogger("scenario/e2e/keymanager_restart"), + return &kmRestartImpl{ + basicImpl: *newBasicImpl( + "keymanager-restart", + "simple-keyvalue-enc-client", + []string{"--key", "key1"}, + ), } - return sc -} - -func (sc *kmRestartImpl) Name() string { - return "keymanager-restart" } func (sc *kmRestartImpl) Fixture() (*oasis.NetworkFixture, error) { diff --git a/go/oasis-test-runner/scenario/e2e/node_shutdown.go b/go/oasis-test-runner/scenario/e2e/node_shutdown.go index fcb7f56d2ff..3854a9f664a 100644 --- a/go/oasis-test-runner/scenario/e2e/node_shutdown.go +++ b/go/oasis-test-runner/scenario/e2e/node_shutdown.go @@ -5,9 +5,9 @@ import ( "github.com/pkg/errors" - "github.com/oasislabs/oasis-core/go/common/logging" "github.com/oasislabs/oasis-core/go/oasis-test-runner/env" "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis" + "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis/cli" "github.com/oasislabs/oasis-core/go/oasis-test-runner/scenario" ) @@ -18,14 +18,11 @@ var ( type nodeShutdownImpl struct { basicImpl - - logger *logging.Logger } func newNodeShutdownImpl() scenario.Scenario { sc := &nodeShutdownImpl{ - basicImpl: basicImpl{}, - logger: logging.GetLogger("scenario/e2e/node_shutdown"), + basicImpl: *newBasicImpl("node-shutdown", "", nil), } return sc } @@ -69,7 +66,7 @@ func (sc *nodeShutdownImpl) Run(childEnv *env.Env) error { "--log.level", "debug", "--address", "unix:" + computeWorker.SocketPath(), } - if err = runSubCommand(childEnv, "control-shutdown", sc.basicImpl.net.Config().NodeBinary, args); err != nil { + if err = cli.RunSubCommand(childEnv, sc.logger, "control-shutdown", sc.basicImpl.net.Config().NodeBinary, args); err != nil { return errors.Wrap(err, "scenario/e2e/node_shutdown: send request failed") } diff --git a/go/oasis-test-runner/scenario/e2e/registry_cli.go b/go/oasis-test-runner/scenario/e2e/registry_cli.go index 273eaea7502..79cabbb462c 100644 --- a/go/oasis-test-runner/scenario/e2e/registry_cli.go +++ b/go/oasis-test-runner/scenario/e2e/registry_cli.go @@ -17,7 +17,6 @@ import ( "github.com/oasislabs/oasis-core/go/common/crypto/signature" fileSigner "github.com/oasislabs/oasis-core/go/common/crypto/signature/signers/file" "github.com/oasislabs/oasis-core/go/common/entity" - "github.com/oasislabs/oasis-core/go/common/logging" "github.com/oasislabs/oasis-core/go/common/node" cmdCommon "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common" "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common/consensus" @@ -25,31 +24,22 @@ import ( "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common/grpc" cmdRegEnt "github.com/oasislabs/oasis-core/go/oasis-node/cmd/registry/entity" cmdRegNode "github.com/oasislabs/oasis-core/go/oasis-node/cmd/registry/node" - cmdRegRt "github.com/oasislabs/oasis-core/go/oasis-node/cmd/registry/runtime" "github.com/oasislabs/oasis-core/go/oasis-test-runner/env" "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis" + "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis/cli" "github.com/oasislabs/oasis-core/go/oasis-test-runner/scenario" registry "github.com/oasislabs/oasis-core/go/registry/api" ) -const () - var ( // RegistryCLI is the staking scenario. RegistryCLI scenario.Scenario = ®istryCLIImpl{ - basicImpl: basicImpl{}, - logger: logging.GetLogger("scenario/e2e/registry"), + basicImpl: *newBasicImpl("registry-cli", "", nil), } ) type registryCLIImpl struct { basicImpl - - logger *logging.Logger -} - -func (r *registryCLIImpl) Name() string { - return "registry-cli" } func (r *registryCLIImpl) Fixture() (*oasis.NetworkFixture, error) { @@ -79,14 +69,16 @@ func (r *registryCLIImpl) Run(childEnv *env.Env) error { } logger.Info("nodes registered") + cli := cli.New(childEnv, r.net, r.logger) + // Run the tests // registry entity and registry node subcommands - if err := r.testEntityAndNode(childEnv); err != nil { + if err := r.testEntityAndNode(childEnv, cli); err != nil { return fmt.Errorf("scenario/e2e/registry: error while running registry entity and node test: %w", err) } // registry runtime subcommands - if err := r.testRuntime(childEnv); err != nil { + if err := r.testRuntime(childEnv, cli); err != nil { return fmt.Errorf("scenario/e2e/registry: error while running registry runtime test: %w", err) } @@ -98,7 +90,7 @@ func (r *registryCLIImpl) Run(childEnv *env.Env) error { } // testEntity tests registry entity subcommands. -func (r *registryCLIImpl) testEntityAndNode(childEnv *env.Env) error { +func (r *registryCLIImpl) testEntityAndNode(childEnv *env.Env, cli *cli.Helpers) error { // List entities. entities, err := r.listEntities(childEnv) if err != nil { @@ -172,7 +164,7 @@ func (r *registryCLIImpl) testEntityAndNode(childEnv *env.Env) error { } // Submit register entity transaction. - if err = r.submitTx(childEnv, registerTxPath); err != nil { + if err = cli.Consensus.SubmitTx(registerTxPath); err != nil { return fmt.Errorf("scenario/e2e/registry/entity: failed to submit entity register tx: %w", err) } @@ -193,7 +185,7 @@ func (r *registryCLIImpl) testEntityAndNode(childEnv *env.Env) error { } // Submit deregister entity transaction. - if err = r.submitTx(childEnv, deregisterTxPath); err != nil { + if err = cli.Consensus.SubmitTx(deregisterTxPath); err != nil { return fmt.Errorf("scenario/e2e/registry/entity: failed to submit entity deregister tx: %w", err) } @@ -217,7 +209,7 @@ func (r *registryCLIImpl) listEntities(childEnv *env.Env) ([]signature.PublicKey "registry", "entity", "list", "--" + grpc.CfgAddress, "unix:" + r.basicImpl.net.Validators()[0].SocketPath(), } - b, err := runSubCommandWithOutput(childEnv, "list", r.basicImpl.net.Config().NodeBinary, args) + b, err := cli.RunSubCommandWithOutput(childEnv, r.logger, "list", r.basicImpl.net.Config().NodeBinary, args) if err != nil { return nil, fmt.Errorf("scenario/e2e/registry/entity: failed to list entities: %s error: %w", b.String(), err) } @@ -260,7 +252,7 @@ func (r *registryCLIImpl) initEntity(childEnv *env.Env, entDir string) (*entity. "--" + flags.CfgSigner, fileSigner.SignerName, "--" + flags.CfgSignerDir, entDir, } - _, err := runSubCommandWithOutput(childEnv, "entity-init", r.basicImpl.net.Config().NodeBinary, args) + _, err := cli.RunSubCommandWithOutput(childEnv, r.logger, "entity-init", r.basicImpl.net.Config().NodeBinary, args) if err != nil { return nil, fmt.Errorf("scenario/e2e/registry/entity: failed to init entity: %w", err) } @@ -284,7 +276,7 @@ func (r *registryCLIImpl) updateEntity(childEnv *env.Env, nodes []*node.Node, no "--" + cmdRegEnt.CfgNodeID, strings.Join(nodeIDs, ","), "--" + cmdRegEnt.CfgNodeDescriptor, strings.Join(nodeGenesisFiles, ","), } - _, err := runSubCommandWithOutput(childEnv, "entity-update", r.basicImpl.net.Config().NodeBinary, args) + _, err := cli.RunSubCommandWithOutput(childEnv, r.logger, "entity-update", r.basicImpl.net.Config().NodeBinary, args) if err != nil { return nil, fmt.Errorf("scenario/e2e/registry/entity: failed to update entity: %w", err) } @@ -299,7 +291,7 @@ func (r *registryCLIImpl) listNodes(childEnv *env.Env) ([]signature.PublicKey, e "registry", "node", "list", "--" + grpc.CfgAddress, "unix:" + r.basicImpl.net.Validators()[0].SocketPath(), } - b, err := runSubCommandWithOutput(childEnv, "node-list", r.basicImpl.net.Config().NodeBinary, args) + b, err := cli.RunSubCommandWithOutput(childEnv, r.logger, "node-list", r.basicImpl.net.Config().NodeBinary, args) if err != nil { return nil, fmt.Errorf("scenario/e2e/registry/entity: failed to list nodes: %s error: %w", b.String(), err) } @@ -418,7 +410,7 @@ func (r *registryCLIImpl) initNode(childEnv *env.Env, ent *entity.Entity, entDir "--" + flags.CfgSignerDir, entDir, "--" + cmdCommon.CfgDataDir, dataDir, } - _, err = runSubCommandWithOutput(childEnv, "init-node", r.basicImpl.net.Config().NodeBinary, args) + _, err = cli.RunSubCommandWithOutput(childEnv, r.logger, "init-node", r.basicImpl.net.Config().NodeBinary, args) if err != nil { return nil, fmt.Errorf("scenario/e2e/registry: failed to init node: %w", err) } @@ -490,11 +482,6 @@ func (r *registryCLIImpl) initNode(childEnv *env.Env, ent *entity.Entity, entDir return n, nil } -// submitTx is a wrapper for consensus submit_tx command. -func (r *registryCLIImpl) submitTx(childEnv *env.Env, txPath string) error { - return submitTx(childEnv, txPath, r.logger, r.basicImpl.net.Validators()[0].SocketPath(), r.basicImpl.net.Config().NodeBinary) -} - // genRegisterEntityTx calls registry entity gen_register. func (r *registryCLIImpl) genRegisterEntityTx(childEnv *env.Env, nonce int, txPath string, entDir string) error { r.logger.Info("generating register entity tx") @@ -511,7 +498,7 @@ func (r *registryCLIImpl) genRegisterEntityTx(childEnv *env.Env, nonce int, txPa "--" + flags.CfgSignerDir, entDir, "--" + flags.CfgGenesisFile, r.basicImpl.net.GenesisPath(), } - if err := runSubCommand(childEnv, "gen_register", r.basicImpl.net.Config().NodeBinary, args); err != nil { + if err := cli.RunSubCommand(childEnv, r.logger, "gen_register", r.basicImpl.net.Config().NodeBinary, args); err != nil { return fmt.Errorf("genRegisterEntityTx: failed to generate register entity tx: %w", err) } @@ -534,7 +521,7 @@ func (r *registryCLIImpl) genDeregisterEntityTx(childEnv *env.Env, nonce int, tx "--" + flags.CfgSignerDir, entDir, "--" + flags.CfgGenesisFile, r.basicImpl.net.GenesisPath(), } - if err := runSubCommand(childEnv, "gen_deregister", r.basicImpl.net.Config().NodeBinary, args); err != nil { + if err := cli.RunSubCommand(childEnv, r.logger, "gen_deregister", r.basicImpl.net.Config().NodeBinary, args); err != nil { return fmt.Errorf("genDeregisterEntityTx: failed to generate deregister entity tx: %w", err) } @@ -542,7 +529,7 @@ func (r *registryCLIImpl) genDeregisterEntityTx(childEnv *env.Env, nonce int, tx } // testRuntime tests registry runtime subcommands. -func (r *registryCLIImpl) testRuntime(childEnv *env.Env) error { +func (r *registryCLIImpl) testRuntime(childEnv *env.Env, cli *cli.Helpers) error { // List runtimes. runtimes, err := r.listRuntimes(childEnv) if err != nil { @@ -593,12 +580,12 @@ func (r *registryCLIImpl) testRuntime(childEnv *env.Env) error { if err = ioutil.WriteFile(genesisStatePath, genesisStateStr, 0600); err != nil { return err } - if err = r.genRegisterRuntimeTx(childEnv, testRuntime, registerTxPath, genesisStatePath); err != nil { + if err = cli.Registry.GenerateRegisterRuntimeTx(0, testRuntime, registerTxPath, genesisStatePath); err != nil { return fmt.Errorf("scenario/e2e/registry/runtime: failed to generate runtime register tx: %w", err) } // Submit register runtime transaction. - if err = r.submitTx(childEnv, registerTxPath); err != nil { + if err = cli.Consensus.SubmitTx(registerTxPath); err != nil { return fmt.Errorf("scenario/e2e/registry/runtime: failed to submit runtime register tx: %w", err) } @@ -631,7 +618,7 @@ func (r *registryCLIImpl) listRuntimes(childEnv *env.Env) (map[common.Namespace] "-v", "--" + grpc.CfgAddress, "unix:" + r.basicImpl.net.Validators()[0].SocketPath(), } - b, err := runSubCommandWithOutput(childEnv, "list", r.basicImpl.net.Config().NodeBinary, args) + b, err := cli.RunSubCommandWithOutput(childEnv, r.logger, "list", r.basicImpl.net.Config().NodeBinary, args) if err != nil { return nil, fmt.Errorf("scenario/e2e/registry/runtime: failed to list runtimes: %s error: %w", b.String(), err) } @@ -653,50 +640,3 @@ func (r *registryCLIImpl) listRuntimes(childEnv *env.Env) (map[common.Namespace] return runtimes, nil } - -// genRegisterRuntimeTx calls registry entity gen_register. -func (r *registryCLIImpl) genRegisterRuntimeTx(childEnv *env.Env, runtime registry.Runtime, txPath string, genesisStateFile string) error { - r.logger.Info("generating register entity tx") - - // Generate a runtime register transaction file with debug test entity. - args := []string{ - "registry", "runtime", "gen_register", - "--" + cmdRegRt.CfgID, runtime.ID.String(), - "--" + cmdRegRt.CfgTEEHardware, runtime.TEEHardware.String(), - "--" + cmdRegRt.CfgGenesisState, genesisStateFile, - "--" + cmdRegRt.CfgGenesisRound, strconv.FormatUint(runtime.Genesis.Round, 10), - "--" + cmdRegRt.CfgKind, runtime.Kind.String(), - "--" + cmdRegRt.CfgVersion, runtime.Version.Version.String(), - "--" + cmdRegRt.CfgVersionEnclave, string(runtime.Version.TEE), - "--" + cmdRegRt.CfgComputeGroupSize, strconv.FormatUint(runtime.Compute.GroupSize, 10), - "--" + cmdRegRt.CfgComputeGroupBackupSize, strconv.FormatUint(runtime.Compute.GroupBackupSize, 10), - "--" + cmdRegRt.CfgComputeAllowedStragglers, strconv.FormatUint(runtime.Compute.AllowedStragglers, 10), - "--" + cmdRegRt.CfgComputeRoundTimeout, runtime.Compute.RoundTimeout.String(), - "--" + cmdRegRt.CfgMergeGroupSize, strconv.FormatUint(runtime.Merge.GroupSize, 10), - "--" + cmdRegRt.CfgMergeGroupBackupSize, strconv.FormatUint(runtime.Merge.GroupBackupSize, 10), - "--" + cmdRegRt.CfgMergeAllowedStragglers, strconv.FormatUint(runtime.Merge.AllowedStragglers, 10), - "--" + cmdRegRt.CfgMergeRoundTimeout, runtime.Merge.RoundTimeout.String(), - "--" + cmdRegRt.CfgStorageGroupSize, strconv.FormatUint(runtime.Storage.GroupSize, 10), - "--" + cmdRegRt.CfgTxnSchedulerGroupSize, strconv.FormatUint(runtime.TxnScheduler.GroupSize, 10), - "--" + cmdRegRt.CfgTxnSchedulerAlgorithm, runtime.TxnScheduler.Algorithm, - "--" + cmdRegRt.CfgTxnSchedulerBatchFlushTimeout, runtime.TxnScheduler.BatchFlushTimeout.String(), - "--" + cmdRegRt.CfgTxnSchedulerMaxBatchSize, strconv.FormatUint(runtime.TxnScheduler.MaxBatchSize, 10), - "--" + cmdRegRt.CfgTxnSchedulerMaxBatchSizeBytes, strconv.FormatUint(runtime.TxnScheduler.MaxBatchSizeBytes, 10), - "--" + consensus.CfgTxNonce, strconv.Itoa(0), - "--" + consensus.CfgTxFile, txPath, - "--" + consensus.CfgTxFeeAmount, strconv.Itoa(0), - "--" + consensus.CfgTxFeeGas, strconv.Itoa(feeGas), - "--" + flags.CfgDebugDontBlameOasis, - "--" + cmdCommon.CfgDebugAllowTestKeys, - "--" + flags.CfgDebugTestEntity, - "--" + flags.CfgGenesisFile, r.basicImpl.net.GenesisPath(), - } - if runtime.KeyManager != nil { - args = append(args, "--"+cmdRegRt.CfgKeyManager, runtime.KeyManager.String()) - } - if err := runSubCommand(childEnv, "gen_register", r.basicImpl.net.Config().NodeBinary, args); err != nil { - return fmt.Errorf("genRegisterRuntimeTx: failed to generate register runtime tx: %w", err) - } - - return nil -} diff --git a/go/oasis-test-runner/scenario/e2e/runtime_dynamic.go b/go/oasis-test-runner/scenario/e2e/runtime_dynamic.go new file mode 100644 index 00000000000..86c440304a3 --- /dev/null +++ b/go/oasis-test-runner/scenario/e2e/runtime_dynamic.go @@ -0,0 +1,224 @@ +package e2e + +import ( + "context" + "fmt" + "path/filepath" + "time" + + consensus "github.com/oasislabs/oasis-core/go/consensus/api" + epochtime "github.com/oasislabs/oasis-core/go/epochtime/api" + "github.com/oasislabs/oasis-core/go/oasis-test-runner/env" + "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis" + "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis/cli" + "github.com/oasislabs/oasis-core/go/oasis-test-runner/scenario" + registry "github.com/oasislabs/oasis-core/go/registry/api" +) + +var ( + // RuntimeDynamic is the dynamic runtime registration scenario. + RuntimeDynamic scenario.Scenario = newRuntimeDynamicImpl() +) + +type runtimeDynamicImpl struct { + basicImpl + + epoch epochtime.EpochTime +} + +func newRuntimeDynamicImpl() scenario.Scenario { + return &runtimeDynamicImpl{ + basicImpl: *newBasicImpl("runtime-dynamic", "", nil), + } +} + +func (sc *runtimeDynamicImpl) Fixture() (*oasis.NetworkFixture, error) { + f, err := sc.basicImpl.Fixture() + if err != nil { + return nil, err + } + + // We need IAS proxy to use the registry as we are registering runtimes dynamically. + f.Network.IASUseRegistry = true + // Avoid unexpected blocks. + f.Network.EpochtimeMock = true + // We need runtime registration to be enabled. + // TODO: This should not be needed once this is the default (no longer debug). + f.Network.RegistryDebugAllowRuntimeRegistration = true + // Exclude all runtimes from genesis as we will register those dynamically. + for i, rt := range f.Runtimes { + // TODO: This should not be needed once dynamic keymanager policy document registration + // is supported (see oasis-core#2516). + if rt.Kind != registry.KindCompute { + continue + } + f.Runtimes[i].ExcludeFromGenesis = true + } + // All runtime nodes should be restartable as we are going to restart them. + for i := range f.StorageWorkers { + f.StorageWorkers[i].Restartable = true + } + for i := range f.ComputeWorkers { + f.ComputeWorkers[i].Restartable = true + } + + return f, nil +} + +func (sc *runtimeDynamicImpl) epochTransition(ctx context.Context) error { + sc.epoch++ + + sc.logger.Info("triggering epoch transition", + "epoch", sc.epoch, + ) + if err := sc.net.Controller().SetEpoch(ctx, sc.epoch); err != nil { + return fmt.Errorf("failed to set epoch: %w", err) + } + sc.logger.Info("epoch transition done") + return nil +} + +func (sc *runtimeDynamicImpl) Run(childEnv *env.Env) error { + if err := sc.net.Start(); err != nil { + return err + } + + ctx := context.Background() + cli := cli.New(childEnv, sc.net, sc.logger) + + // Wait for all nodes to be synced before we proceed. + if err := sc.waitNodesSynced(); err != nil { + return err + } + + // NOTE: We also wait for storage workers as they can currently register even before the + // runtime is registered in the registry. If this changes, node count needs update. + numNodes := len(sc.net.Validators()) + len(sc.net.StorageWorkers()) + if sc.net.Keymanager() != nil { + numNodes++ + } + sc.logger.Info("waiting for (some) nodes to register", + "num_nodes", numNodes, + ) + if err := sc.net.Controller().WaitNodesRegistered(ctx, numNodes); err != nil { + return err + } + + // Perform an initial epoch transition to make sure that the nodes can handle it even though + // there are no runtimes registered yet. + if err := sc.epochTransition(ctx); err != nil { + return err + } + + // TODO: Register a new key manager runtime and status (see oasis-core#2516). + + // Register a new compute runtime. + compRt := sc.net.Runtimes()[1].ToRuntimeDescriptor() + txPath := filepath.Join(childEnv.Dir(), "register_compute_runtime.json") + if err := cli.Registry.GenerateRegisterRuntimeTx(0, compRt, txPath, ""); err != nil { + return fmt.Errorf("failed to generate register runtime tx: %w", err) + } + if err := cli.Consensus.SubmitTx(txPath); err != nil { + return fmt.Errorf("failed to register compute runtime: %w", err) + } + + // Wait for all nodes to register. + sc.logger.Info("waiting for runtime nodes to register", + "num_nodes", sc.net.NumRegisterNodes(), + ) + if err := sc.net.Controller().WaitNodesRegistered(ctx, sc.net.NumRegisterNodes()); err != nil { + return err + } + + for i := 0; i < 5; i++ { + // Perform another epoch transition to elect compute runtime committees. + if err := sc.epochTransition(ctx); err != nil { + return err + } + + // Wait a bit after epoch transitions. + time.Sleep(1 * time.Second) + + // Submit a runtime transaction. + sc.logger.Info("submitting transaction to runtime", + "seq", i, + ) + if err := sc.submitRuntimeTx(ctx, "hello", fmt.Sprintf("world %d", i)); err != nil { + return err + } + } + + // Stop all runtime nodes, so they will not re-register, causing the nodes to expire. + sc.logger.Info("stopping storage nodes") + for _, n := range sc.net.StorageWorkers() { + if err := n.Stop(); err != nil { + return fmt.Errorf("failed to stop node: %w", err) + } + } + sc.logger.Info("stopping compute nodes") + for _, n := range sc.net.ComputeWorkers() { + if err := n.Stop(); err != nil { + return fmt.Errorf("failed to stop node: %w", err) + } + } + + // Epoch transitions so nodes expire. + sc.logger.Info("performing epoch transitions so nodes expire") + for i := 0; i < 3; i++ { + if err := sc.epochTransition(ctx); err != nil { + return err + } + + // Wait a bit between epoch transitions. + time.Sleep(1 * time.Second) + } + + // Ensure that runtime got suspended. + sc.logger.Info("checking that runtime got suspended") + _, err := sc.net.Controller().Registry.GetRuntime(ctx, ®istry.NamespaceQuery{ + Height: consensus.HeightLatest, + ID: compRt.ID, + }) + switch err { + case nil: + return fmt.Errorf("runtime should be suspended but it is not") + case registry.ErrNoSuchRuntime: + // Runtime is suspended. + default: + return fmt.Errorf("unexpected error while fetching runtime: %w", err) + } + + // Start runtime nodes, make sure they register. + sc.logger.Info("starting storage nodes") + for _, n := range sc.net.StorageWorkers() { + if err := n.Start(); err != nil { + return fmt.Errorf("failed to start node: %w", err) + } + } + sc.logger.Info("starting compute nodes") + for _, n := range sc.net.ComputeWorkers() { + if err := n.Start(); err != nil { + return fmt.Errorf("failed to start node: %w", err) + } + } + + sc.logger.Info("waiting for runtime nodes to register", + "num_nodes", sc.net.NumRegisterNodes(), + ) + if err := sc.net.Controller().WaitNodesRegistered(ctx, sc.net.NumRegisterNodes()); err != nil { + return err + } + + // Epoch transition. + if err := sc.epochTransition(ctx); err != nil { + return err + } + + // Submit a runtime transaction to check whether the runtimes got resumed. + sc.logger.Info("submitting transaction to runtime") + if err := sc.submitRuntimeTx(ctx, "hello", "final world"); err != nil { + return err + } + + return nil +} diff --git a/go/oasis-test-runner/scenario/e2e/runtime_prune.go b/go/oasis-test-runner/scenario/e2e/runtime_prune.go index 352c1f73b77..89e4bc43b66 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime_prune.go +++ b/go/oasis-test-runner/scenario/e2e/runtime_prune.go @@ -5,14 +5,11 @@ import ( "fmt" "time" - "github.com/oasislabs/oasis-core/go/common/cbor" - "github.com/oasislabs/oasis-core/go/common/logging" "github.com/oasislabs/oasis-core/go/oasis-test-runner/env" "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis" "github.com/oasislabs/oasis-core/go/oasis-test-runner/scenario" "github.com/oasislabs/oasis-core/go/runtime/client/api" "github.com/oasislabs/oasis-core/go/runtime/history" - "github.com/oasislabs/oasis-core/go/runtime/transaction" ) var ( @@ -32,22 +29,12 @@ const ( type runtimePruneImpl struct { basicImpl - - logger *logging.Logger } func newRuntimePruneImpl() scenario.Scenario { - sc := &runtimePruneImpl{ - basicImpl: basicImpl{ - clientBinary: "", // We use a Go client. - }, - logger: logging.GetLogger("scenario/e2e/runtime_prune"), + return &runtimePruneImpl{ + basicImpl: *newBasicImpl("runtime-prune", "", nil), } - return sc -} - -func (sc *runtimePruneImpl) Name() string { - return "runtime-prune" } func (sc *runtimePruneImpl) Fixture() (*oasis.NetworkFixture, error) { @@ -68,32 +55,16 @@ func (sc *runtimePruneImpl) Fixture() (*oasis.NetworkFixture, error) { return f, nil } -// keyValue is a key/value argument for the simple-keyvalue runtime. -type keyValue struct { - Key string `json:"key"` - Value string `json:"value"` -} - func (sc *runtimePruneImpl) Run(childEnv *env.Env) error { if err := sc.net.Start(); err != nil { return err } - ctx := context.Background() - - sc.logger.Info("waiting for nodes to register", - "num_nodes", sc.net.NumRegisterNodes(), - ) - if err := sc.net.Controller().WaitNodesRegistered(ctx, sc.net.NumRegisterNodes()); err != nil { + if err := sc.initialEpochTransitions(); err != nil { return err } - sc.logger.Info("triggering epoch transition") - if err := sc.net.Controller().SetEpoch(ctx, 1); err != nil { - return fmt.Errorf("failed to set epoch: %w", err) - } - sc.logger.Info("epoch transition done") - + ctx := context.Background() c := sc.net.ClientController().RuntimeClient // Submit transactions. @@ -102,26 +73,8 @@ func (sc *runtimePruneImpl) Run(childEnv *env.Env) error { "seq", i, ) - // Submit a transaction and check the result. - var rsp transaction.TxnOutput - rawRsp, err := c.SubmitTx(ctx, &api.SubmitTxRequest{ - RuntimeID: runtimeID, - Data: cbor.Marshal(&transaction.TxnCall{ - Method: "insert", - Args: keyValue{ - Key: "hello", - Value: "world", - }, - }), - }) - if err != nil { - return fmt.Errorf("failed to submit runtime tx: %w", err) - } - if err = cbor.Unmarshal(rawRsp, &rsp); err != nil { - return fmt.Errorf("malformed tx output from runtime: %w", err) - } - if rsp.Error != nil { - return fmt.Errorf("runtime tx failed: %s", *rsp.Error) + if err := sc.submitRuntimeTx(ctx, "hello", fmt.Sprintf("world %d", i)); err != nil { + return err } } diff --git a/go/oasis-test-runner/scenario/e2e/sentry.go b/go/oasis-test-runner/scenario/e2e/sentry.go index e4ca526a2e6..9d136c43b36 100644 --- a/go/oasis-test-runner/scenario/e2e/sentry.go +++ b/go/oasis-test-runner/scenario/e2e/sentry.go @@ -1,7 +1,6 @@ package e2e import ( - "github.com/oasislabs/oasis-core/go/common/logging" "github.com/oasislabs/oasis-core/go/oasis-test-runner/log" "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis" "github.com/oasislabs/oasis-core/go/oasis-test-runner/scenario" @@ -18,22 +17,12 @@ var ( type sentryImpl struct { basicImpl - - logger *logging.Logger } func newSentryImpl() scenario.Scenario { - s := &sentryImpl{ - basicImpl: basicImpl{ - clientBinary: "simple-keyvalue-client", - }, - logger: logging.GetLogger("scenario/e2e/sentry"), + return &sentryImpl{ + basicImpl: *newBasicImpl("sentry", "simple-keyvalue-client", nil), } - return s -} - -func (s *sentryImpl) Name() string { - return "sentry" } func (s *sentryImpl) Fixture() (*oasis.NetworkFixture, error) { diff --git a/go/oasis-test-runner/scenario/e2e/stake_cli.go b/go/oasis-test-runner/scenario/e2e/stake_cli.go index f1e79efced1..337a5be09a1 100644 --- a/go/oasis-test-runner/scenario/e2e/stake_cli.go +++ b/go/oasis-test-runner/scenario/e2e/stake_cli.go @@ -11,7 +11,6 @@ import ( "strings" "github.com/oasislabs/oasis-core/go/common/crypto/signature" - "github.com/oasislabs/oasis-core/go/common/logging" "github.com/oasislabs/oasis-core/go/common/quantity" "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common" "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common/consensus" @@ -20,6 +19,7 @@ import ( "github.com/oasislabs/oasis-core/go/oasis-node/cmd/stake" "github.com/oasislabs/oasis-core/go/oasis-test-runner/env" "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis" + "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis/cli" "github.com/oasislabs/oasis-core/go/oasis-test-runner/scenario" "github.com/oasislabs/oasis-core/go/staking/api" ) @@ -56,19 +56,12 @@ const ( var ( // StakeCLI is the staking scenario. StakeCLI scenario.Scenario = &stakeCLIImpl{ - basicImpl: basicImpl{}, - logger: logging.GetLogger("scenario/e2e/stake"), + basicImpl: *newBasicImpl("stake-cli", "", nil), } ) type stakeCLIImpl struct { basicImpl - - logger *logging.Logger -} - -func (s *stakeCLIImpl) Name() string { - return "stake-cli" } func (s *stakeCLIImpl) Fixture() (*oasis.NetworkFixture, error) { @@ -98,6 +91,8 @@ func (s *stakeCLIImpl) Run(childEnv *env.Env) error { } logger.Info("nodes registered") + cli := cli.New(childEnv, s.net, s.logger) + // Account list. accounts, err := s.listAccounts(childEnv) if err != nil { @@ -136,27 +131,27 @@ func (s *stakeCLIImpl) Run(childEnv *env.Env) error { // Run the tests // Transfer - if err = s.testTransfer(childEnv, src, dst); err != nil { + if err = s.testTransfer(childEnv, cli, src, dst); err != nil { return fmt.Errorf("scenario/e2e/stake: error while running Transfer test: %w", err) } // Burn - if err = s.testBurn(childEnv, src); err != nil { + if err = s.testBurn(childEnv, cli, src); err != nil { return fmt.Errorf("scenario/e2e/stake: error while running Burn test: %w", err) } // Escrow - if err = s.testEscrow(childEnv, src, escrow); err != nil { + if err = s.testEscrow(childEnv, cli, src, escrow); err != nil { return fmt.Errorf("scenario/e2e/stake: error while running Escrow test: %w", err) } // ReclaimEscrow - if err = s.testReclaimEscrow(childEnv, src, escrow); err != nil { + if err = s.testReclaimEscrow(childEnv, cli, src, escrow); err != nil { return fmt.Errorf("scenario/e2e/stake: error while running ReclaimEscrow test: %w", err) } // AmendCommissionSchedule - if err = s.testAmendCommissionSchedule(childEnv, src); err != nil { + if err = s.testAmendCommissionSchedule(childEnv, cli, src); err != nil { return fmt.Errorf("scenario/e2e/stake: error while running AmendCommissionSchedule: %w", err) } @@ -168,7 +163,7 @@ func (s *stakeCLIImpl) Run(childEnv *env.Env) error { } // testTransfer tests transfer of 1000 tokens from src to dst. -func (s *stakeCLIImpl) testTransfer(childEnv *env.Env, src signature.PublicKey, dst signature.PublicKey) error { +func (s *stakeCLIImpl) testTransfer(childEnv *env.Env, cli *cli.Helpers, src signature.PublicKey, dst signature.PublicKey) error { transferTxPath := filepath.Join(childEnv.Dir(), "stake_transfer.json") if err := s.genTransferTx(childEnv, transferAmount, 0, dst, transferTxPath); err != nil { return err @@ -183,7 +178,7 @@ func (s *stakeCLIImpl) testTransfer(childEnv *env.Env, src signature.PublicKey, return err } - if err := s.submitTx(childEnv, transferTxPath); err != nil { + if err := cli.Consensus.SubmitTx(transferTxPath); err != nil { return err } @@ -205,7 +200,7 @@ func (s *stakeCLIImpl) testTransfer(childEnv *env.Env, src signature.PublicKey, } // testBurn tests burning of 2000 tokens owned by src. -func (s *stakeCLIImpl) testBurn(childEnv *env.Env, src signature.PublicKey) error { +func (s *stakeCLIImpl) testBurn(childEnv *env.Env, cli *cli.Helpers, src signature.PublicKey) error { burnTxPath := filepath.Join(childEnv.Dir(), "stake_burn.json") if err := s.genBurnTx(childEnv, burnAmount, 1, burnTxPath); err != nil { return err @@ -214,7 +209,7 @@ func (s *stakeCLIImpl) testBurn(childEnv *env.Env, src signature.PublicKey) erro return err } - if err := s.submitTx(childEnv, burnTxPath); err != nil { + if err := cli.Consensus.SubmitTx(burnTxPath); err != nil { return err } @@ -233,7 +228,7 @@ func (s *stakeCLIImpl) testBurn(childEnv *env.Env, src signature.PublicKey) erro } // testEscrow tests escrowing of 3000 tokens from src to dst. -func (s *stakeCLIImpl) testEscrow(childEnv *env.Env, src signature.PublicKey, escrow signature.PublicKey) error { +func (s *stakeCLIImpl) testEscrow(childEnv *env.Env, cli *cli.Helpers, src signature.PublicKey, escrow signature.PublicKey) error { escrowTxPath := filepath.Join(childEnv.Dir(), "stake_escrow.json") if err := s.genEscrowTx(childEnv, escrowAmount, 2, escrow, escrowTxPath); err != nil { return err @@ -242,7 +237,7 @@ func (s *stakeCLIImpl) testEscrow(childEnv *env.Env, src signature.PublicKey, es return err } - if err := s.submitTx(childEnv, escrowTxPath); err != nil { + if err := cli.Consensus.SubmitTx(escrowTxPath); err != nil { return err } @@ -264,7 +259,7 @@ func (s *stakeCLIImpl) testEscrow(childEnv *env.Env, src signature.PublicKey, es } // testReclaimEscrow test reclaiming an escrow of 3000 tokens from escrow account. -func (s *stakeCLIImpl) testReclaimEscrow(childEnv *env.Env, src signature.PublicKey, escrow signature.PublicKey) error { +func (s *stakeCLIImpl) testReclaimEscrow(childEnv *env.Env, cli *cli.Helpers, src signature.PublicKey, escrow signature.PublicKey) error { reclaimEscrowTxPath := filepath.Join(childEnv.Dir(), "stake_reclaim_escrow.json") if err := s.genReclaimEscrowTx(childEnv, escrowAmount, 3, escrow, reclaimEscrowTxPath); err != nil { return err @@ -273,7 +268,7 @@ func (s *stakeCLIImpl) testReclaimEscrow(childEnv *env.Env, src signature.Public return err } - if err := s.submitTx(childEnv, reclaimEscrowTxPath); err != nil { + if err := cli.Consensus.SubmitTx(reclaimEscrowTxPath); err != nil { return err } @@ -306,7 +301,7 @@ func mustInitQuantity(i int64) (q quantity.Quantity) { return } -func (s *stakeCLIImpl) testAmendCommissionSchedule(childEnv *env.Env, src signature.PublicKey) error { +func (s *stakeCLIImpl) testAmendCommissionSchedule(childEnv *env.Env, cli *cli.Helpers, src signature.PublicKey) error { amendCommissionScheduleTxPath := filepath.Join(childEnv.Dir(), "amend_commission_schedule.json") if err := s.genAmendCommissionScheduleTx(childEnv, 4, &api.CommissionSchedule{ Rates: []api.CommissionRateStep{ @@ -329,7 +324,7 @@ func (s *stakeCLIImpl) testAmendCommissionSchedule(childEnv *env.Env, src signat return err } - if err := s.submitTx(childEnv, amendCommissionScheduleTxPath); err != nil { + if err := cli.Consensus.SubmitTx(amendCommissionScheduleTxPath); err != nil { return err } @@ -344,7 +339,7 @@ func (s *stakeCLIImpl) listAccounts(childEnv *env.Env) ([]signature.PublicKey, e "stake", "list", "--" + grpc.CfgAddress, "unix:" + s.basicImpl.net.Validators()[0].SocketPath(), } - b, err := runSubCommandWithOutput(childEnv, "list", s.basicImpl.net.Config().NodeBinary, args) + b, err := cli.RunSubCommandWithOutput(childEnv, s.logger, "list", s.basicImpl.net.Config().NodeBinary, args) if err != nil { return nil, fmt.Errorf("scenario/e2e/stake: failed to list accounts: %s error: %w", b.String(), err) } @@ -367,11 +362,6 @@ func (s *stakeCLIImpl) listAccounts(childEnv *env.Env) ([]signature.PublicKey, e return accounts, nil } -// submitTx is a wrapper for consensus submit_tx command. -func (s *stakeCLIImpl) submitTx(childEnv *env.Env, txPath string) error { - return submitTx(childEnv, txPath, s.logger, s.basicImpl.net.Validators()[0].SocketPath(), s.basicImpl.net.Config().NodeBinary) -} - func (s *stakeCLIImpl) getAccountInfo(childEnv *env.Env, src signature.PublicKey) (*api.Account, error) { s.logger.Info("checking account balance", stake.CfgAccountID, src.String()) args := []string{ @@ -380,7 +370,7 @@ func (s *stakeCLIImpl) getAccountInfo(childEnv *env.Env, src signature.PublicKey "--" + grpc.CfgAddress, "unix:" + s.basicImpl.net.Validators()[0].SocketPath(), } - b, err := runSubCommandWithOutput(childEnv, "info", s.basicImpl.net.Config().NodeBinary, args) + b, err := cli.RunSubCommandWithOutput(childEnv, s.logger, "info", s.basicImpl.net.Config().NodeBinary, args) if err != nil { return nil, fmt.Errorf("scenario/e2e/stake: failed to check account info: %w", err) } @@ -437,7 +427,7 @@ func (s *stakeCLIImpl) showTx(childEnv *env.Env, txPath string) error { "--" + flags.CfgDebugDontBlameOasis, "--" + flags.CfgGenesisFile, s.basicImpl.net.GenesisPath(), } - if err := runSubCommand(childEnv, "show_tx", s.basicImpl.net.Config().NodeBinary, args); err != nil { + if err := cli.RunSubCommand(childEnv, s.logger, "show_tx", s.basicImpl.net.Config().NodeBinary, args); err != nil { return fmt.Errorf("showTx: failed to show tx: %w", err) } return nil @@ -459,7 +449,7 @@ func (s *stakeCLIImpl) genTransferTx(childEnv *env.Env, amount int, nonce int, d "--" + common.CfgDebugAllowTestKeys, "--" + flags.CfgGenesisFile, s.basicImpl.net.GenesisPath(), } - if err := runSubCommand(childEnv, "gen_transfer", s.basicImpl.net.Config().NodeBinary, args); err != nil { + if err := cli.RunSubCommand(childEnv, s.logger, "gen_transfer", s.basicImpl.net.Config().NodeBinary, args); err != nil { return fmt.Errorf("genTransferTx: failed to generate transfer tx: %w", err) } return nil @@ -480,7 +470,7 @@ func (s *stakeCLIImpl) genBurnTx(childEnv *env.Env, amount int, nonce int, txPat "--" + common.CfgDebugAllowTestKeys, "--" + flags.CfgGenesisFile, s.basicImpl.net.GenesisPath(), } - if err := runSubCommand(childEnv, "gen_burn", s.basicImpl.net.Config().NodeBinary, args); err != nil { + if err := cli.RunSubCommand(childEnv, s.logger, "gen_burn", s.basicImpl.net.Config().NodeBinary, args); err != nil { return fmt.Errorf("genBurnTx: failed to generate burn tx: %w", err) } return nil @@ -502,7 +492,7 @@ func (s *stakeCLIImpl) genEscrowTx(childEnv *env.Env, amount int, nonce int, esc "--" + common.CfgDebugAllowTestKeys, "--" + flags.CfgGenesisFile, s.basicImpl.net.GenesisPath(), } - if err := runSubCommand(childEnv, "gen_escrow", s.basicImpl.net.Config().NodeBinary, args); err != nil { + if err := cli.RunSubCommand(childEnv, s.logger, "gen_escrow", s.basicImpl.net.Config().NodeBinary, args); err != nil { return fmt.Errorf("genEscrowTx: failed to generate escrow tx: %w", err) } return nil @@ -524,7 +514,7 @@ func (s *stakeCLIImpl) genReclaimEscrowTx(childEnv *env.Env, amount int, nonce i "--" + common.CfgDebugAllowTestKeys, "--" + flags.CfgGenesisFile, s.basicImpl.net.GenesisPath(), } - if err := runSubCommand(childEnv, "gen_reclaim_escrow", s.basicImpl.net.Config().NodeBinary, args); err != nil { + if err := cli.RunSubCommand(childEnv, s.logger, "gen_reclaim_escrow", s.basicImpl.net.Config().NodeBinary, args); err != nil { return fmt.Errorf("genReclaimEscrowTx: failed to generate reclaim escrow tx: %w", err) } return nil @@ -550,7 +540,7 @@ func (s *stakeCLIImpl) genAmendCommissionScheduleTx(childEnv *env.Env, nonce int for _, step := range cs.Bounds { args = append(args, "--"+stake.CfgCommissionScheduleBounds, fmt.Sprintf("%d/%d/%d", step.Start, step.RateMin.ToBigInt(), step.RateMax.ToBigInt())) } - if err := runSubCommand(childEnv, "gen_amend_commission_schedule", s.basicImpl.net.Config().NodeBinary, args); err != nil { + if err := cli.RunSubCommand(childEnv, s.logger, "gen_amend_commission_schedule", s.basicImpl.net.Config().NodeBinary, args); err != nil { return fmt.Errorf("genAmendCommissionScheduleTx: failed to generate amend commission schedule tx: %w", err) } return nil diff --git a/go/oasis-test-runner/scenario/e2e/storage_sync.go b/go/oasis-test-runner/scenario/e2e/storage_sync.go index 4ea866fb04e..9fe4beb66d7 100644 --- a/go/oasis-test-runner/scenario/e2e/storage_sync.go +++ b/go/oasis-test-runner/scenario/e2e/storage_sync.go @@ -3,9 +3,9 @@ package e2e import ( "github.com/pkg/errors" - "github.com/oasislabs/oasis-core/go/common/logging" "github.com/oasislabs/oasis-core/go/oasis-test-runner/env" "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis" + "github.com/oasislabs/oasis-core/go/oasis-test-runner/oasis/cli" "github.com/oasislabs/oasis-core/go/oasis-test-runner/scenario" "github.com/oasislabs/oasis-core/go/storage/database" ) @@ -17,22 +17,12 @@ var ( type storageSyncImpl struct { basicImpl - - logger *logging.Logger } func newStorageSyncImpl() scenario.Scenario { - sc := &storageSyncImpl{ - basicImpl: basicImpl{ - clientBinary: "simple-keyvalue-client", - }, - logger: logging.GetLogger("scenario/e2e/storage_sync"), + return &storageSyncImpl{ + basicImpl: *newBasicImpl("storage-sync", "simple-keyvalue-client", nil), } - return sc -} - -func (sc *storageSyncImpl) Name() string { - return "storage-sync" } func (sc *storageSyncImpl) Fixture() (*oasis.NetworkFixture, error) { @@ -73,7 +63,7 @@ func (sc *storageSyncImpl) Run(childEnv *env.Env) error { "--address", "unix:" + storageNode.SocketPath(), sc.basicImpl.net.Runtimes()[1].ID().String(), } - if err = runSubCommand(childEnv, "storage-check-roots", sc.basicImpl.net.Config().NodeBinary, args); err != nil { + if err = cli.RunSubCommand(childEnv, sc.logger, "storage-check-roots", sc.basicImpl.net.Config().NodeBinary, args); err != nil { return errors.Wrap(err, "scenario/e2e/storage_sync: root check failed after sync") } diff --git a/go/oasis-test-runner/scenario/e2e/txsource.go b/go/oasis-test-runner/scenario/e2e/txsource.go index c6aaa55864d..96b4943d4ac 100644 --- a/go/oasis-test-runner/scenario/e2e/txsource.go +++ b/go/oasis-test-runner/scenario/e2e/txsource.go @@ -21,18 +21,14 @@ const ( // TxSourceTransferShort uses the transfer workload for a short time. var TxSourceTransferShort scenario.Scenario = &txSourceImpl{ - basicImpl: basicImpl{ - name: "txsource-transfer-short", - }, + basicImpl: *newBasicImpl("txsource-transfer-short", "", nil), workload: workload.NameTransfer, timeLimit: timeLimitShort, } // TxSourceTransfer uses the transfer workload. var TxSourceTransfer scenario.Scenario = &txSourceImpl{ - basicImpl: basicImpl{ - name: "txsource-transfer", - }, + basicImpl: *newBasicImpl("txsource-transfer", "", nil), workload: workload.NameTransfer, timeLimit: timeLimitLong, } diff --git a/go/oasis-test-runner/test-runner.go b/go/oasis-test-runner/test-runner.go index 771b38bb330..f0afcfc57f0 100644 --- a/go/oasis-test-runner/test-runner.go +++ b/go/oasis-test-runner/test-runner.go @@ -51,6 +51,8 @@ func main() { _ = cmd.Register(e2e.IdentityCLI) // Runtime prune test. _ = cmd.Register(e2e.RuntimePrune) + // Runtime dynamic registration test. + _ = cmd.Register(e2e.RuntimeDynamic) // Transaction source test. _ = cmd.Register(e2e.TxSourceTransferShort) _ = cmd.RegisterNondefault(e2e.TxSourceTransfer) diff --git a/go/registry/api/api.go b/go/registry/api/api.go index 93ed71c0c9b..1dfe0a70dbc 100644 --- a/go/registry/api/api.go +++ b/go/registry/api/api.go @@ -118,9 +118,13 @@ var ( // policy. ErrForbidden = errors.New(ModuleName, 16, "registry: forbidden by policy") - // ErrNodeUpdateNotAllowed is the error returned when trying to update an existing node with unallowed changes. + // ErrNodeUpdateNotAllowed is the error returned when trying to update an existing node with + // disallowed changes. ErrNodeUpdateNotAllowed = errors.New(ModuleName, 17, "registry: node update not allowed") + // ErrRuntimeUpdateNotAllowed is the error returned when trying to update an existing runtime. + ErrRuntimeUpdateNotAllowed = errors.New(ModuleName, 18, "registry: runtime update not allowed") + // MethodRegisterEntity is the method name for entity registrations. MethodRegisterEntity = transaction.NewMethodName(ModuleName, "RegisterEntity", entity.SignedEntity{}) // MethodDeregisterEntity is the method name for entity deregistrations. @@ -998,6 +1002,64 @@ func VerifyRegisterComputeRuntimeArgs(logger *logging.Logger, rt *Runtime, runti return nil } +// VerifyRuntimeUpdate verifies changes while updating the runtime. +// +// The function assumes that the signature on the current runtime is valid and thus does not perform +// re-verification. In case the passed current runtime descriptor is corrupted, this method will +// panic as this indicates state corruption. +func VerifyRuntimeUpdate(logger *logging.Logger, currentSigRt, newSigRt *SignedRuntime, newRt *Runtime) error { + if !currentSigRt.Signature.PublicKey.Equal(newSigRt.Signature.PublicKey) { + logger.Error("RegisterRuntime: trying to change runtime owner", + "current_owner", currentSigRt.Signature.PublicKey, + "new_owner", newSigRt.Signature.PublicKey, + ) + return ErrRuntimeUpdateNotAllowed + } + + var currentRt Runtime + if err := cbor.Unmarshal(currentSigRt.Blob, ¤tRt); err != nil { + logger.Error("RegisterRuntime: corrupted current runtime descriptor", + "err", err, + ) + panic("registry: current runtime state is corrupted") + } + + if !currentRt.ID.Equal(&newRt.ID) { + logger.Error("RegisterRuntime: trying to update runtime ID", + "current_id", currentRt.ID.String(), + "new_id", newRt.ID.String(), + ) + return ErrRuntimeUpdateNotAllowed + } + if currentRt.Kind != newRt.Kind { + logger.Error("RegisterRuntime: trying to update runtime kind", + "current_kind", currentRt.Kind, + "new_kind", newRt.Kind, + ) + return ErrRuntimeUpdateNotAllowed + } + if !currentRt.Genesis.Equal(&newRt.Genesis) { + logger.Error("RegisterRuntime: trying to update genesis") + return ErrRuntimeUpdateNotAllowed + } + if (currentRt.KeyManager == nil) != (newRt.KeyManager == nil) { + logger.Error("RegisterRuntime: trying to change key manager", + "current_km", currentRt.KeyManager, + "new_km", newRt.KeyManager, + ) + return ErrRuntimeUpdateNotAllowed + } + // Both descriptors must either have the key manager set or not. + if currentRt.KeyManager != nil && !currentRt.KeyManager.Equal(newRt.KeyManager) { + logger.Error("RegisterRuntime: trying to change key manager", + "current_km", currentRt.KeyManager, + "new_km", newRt.KeyManager, + ) + return ErrRuntimeUpdateNotAllowed + } + return nil +} + // SortNodeList sorts the given node list to ensure a canonical order. func SortNodeList(nodes []*node.Node) { sort.Slice(nodes, func(i, j int) bool { @@ -1015,6 +1077,8 @@ type Genesis struct { // Runtimes is the initial list of runtimes. Runtimes []*SignedRuntime `json:"runtimes,omitempty"` + // SuspendedRuntimes is the list of suspended runtimes. + SuspendedRuntimes []*SignedRuntime `json:"suspended_runtimes,omitempty"` // Nodes is the initial list of nodes. Nodes []*node.SignedNode `json:"nodes,omitempty"` @@ -1031,11 +1095,11 @@ type ConsensusParameters struct { // DebugAllowUnroutableAddresses is true iff node registration should // allow unroutable addreses. - DebugAllowUnroutableAddresses bool `json:"debug_allow_unroutable_addresses"` + DebugAllowUnroutableAddresses bool `json:"debug_allow_unroutable_addresses,omitempty"` // DebugAllowRuntimeRegistration is true iff runtime registration should be // allowed outside of the genesis block. - DebugAllowRuntimeRegistration bool `json:"debug_allow_runtime_registration"` + DebugAllowRuntimeRegistration bool `json:"debug_allow_runtime_registration,omitempty"` // DebugAllowTestRuntimes is true iff test runtimes should be allowed to // be registered. @@ -1043,7 +1107,7 @@ type ConsensusParameters struct { // DebugBypassStake is true iff the registry should bypass all of the staking // related checks and operations. - DebugBypassStake bool `json:"debug_bypass_stake"` + DebugBypassStake bool `json:"debug_bypass_stake,omitempty"` // GasCosts are the registry transaction gas costs. GasCosts transaction.Costs `json:"gas_costs,omitempty"` @@ -1060,6 +1124,9 @@ const ( GasOpUnfreezeNode transaction.Op = "unfreeze_node" // GasOpRegisterRuntime is the gas operation identifier for runtime registration. GasOpRegisterRuntime transaction.Op = "register_runtime" + // GasOpRuntimeEpochMaintenance is the gas operation identifier for per-epoch + // runtime maintenance costs. + GasOpRuntimeEpochMaintenance transaction.Op = "runtime_epoch_maintenance" ) // SanityCheckEntities examines the entities table. @@ -1312,7 +1379,9 @@ func (g *Genesis) SanityCheck() error { } // Check runtimes. - seenRuntimes, err := SanityCheckRuntimes(g.Runtimes) + runtimes := append([]*SignedRuntime{}, g.Runtimes...) + runtimes = append(runtimes, g.SuspendedRuntimes...) + seenRuntimes, err := SanityCheckRuntimes(runtimes) if err != nil { return err } diff --git a/go/registry/api/runtime.go b/go/registry/api/runtime.go index a43cc06883a..6997cbe4fb8 100644 --- a/go/registry/api/runtime.go +++ b/go/registry/api/runtime.go @@ -246,6 +246,28 @@ type RuntimeGenesis struct { Round uint64 `json:"round"` } +// Equal compares vs another RuntimeGenesis for equality. +func (rtg *RuntimeGenesis) Equal(cmp *RuntimeGenesis) bool { + if !rtg.StateRoot.Equal(&cmp.StateRoot) { + return false + } + if rtg.Round != cmp.Round { + return false + } + if !rtg.State.Equal(cmp.State) { + return false + } + if len(rtg.StorageReceipts) != len(cmp.StorageReceipts) { + return false + } + for k, v := range rtg.StorageReceipts { + if !v.Equal(&cmp.StorageReceipts[k]) { + return false + } + } + return true +} + // SanityCheck does basic sanity checking of RuntimeGenesis. // isGenesis is true, if it is called during consensus chain init. func (rtg *RuntimeGenesis) SanityCheck(isGenesis bool) error { diff --git a/go/roothash/api/api.go b/go/roothash/api/api.go index a2dbd0ee512..aaac3edaae0 100644 --- a/go/roothash/api/api.go +++ b/go/roothash/api/api.go @@ -11,6 +11,7 @@ import ( "github.com/oasislabs/oasis-core/go/common/errors" "github.com/oasislabs/oasis-core/go/common/pubsub" "github.com/oasislabs/oasis-core/go/consensus/api/transaction" + "github.com/oasislabs/oasis-core/go/oasis-node/cmd/common/flags" "github.com/oasislabs/oasis-core/go/registry/api" "github.com/oasislabs/oasis-core/go/roothash/api/block" "github.com/oasislabs/oasis-core/go/roothash/api/commitment" @@ -41,6 +42,15 @@ var ( // ErrNotFound is the error returned when a block is not found. ErrNotFound = errors.New(ModuleName, 2, "roothash: block not found") + // ErrInvalidRuntime is the error returned when the passed runtime is invalid. + ErrInvalidRuntime = errors.New(ModuleName, 3, "roothash: invalid runtime") + + // ErrNoRound is the error returned when no round is in progress. + ErrNoRound = errors.New(ModuleName, 4, "roothash: no round is in progress") + + // ErrRuntimeSuspended is the error returned when the passed runtime is suspended. + ErrRuntimeSuspended = errors.New(ModuleName, 5, "roothash: runtime is suspended") + // MethodComputeCommit is the method name for compute commit submission. MethodComputeCommit = transaction.NewMethodName(ModuleName, "ComputeCommit", ComputeCommit{}) // MethodMergeCommit is the method name for merge commit submission. @@ -154,10 +164,30 @@ type MetricsMonitorable interface { // Genesis is the roothash genesis state. type Genesis struct { + // Parameters are the roothash consensus parameters. + Parameters ConsensusParameters `json:"params"` + // RuntimeStates is the per-runtime map of genesis blocks. RuntimeStates map[common.Namespace]*api.RuntimeGenesis `json:"runtime_states,omitempty"` } +// ConsensusParameters are the roothash consensus parameters. +type ConsensusParameters struct { + // GasCosts are the roothash transaction gas costs. + GasCosts transaction.Costs `json:"gas_costs,omitempty"` + + // DebugDoNotSuspendRuntimes is true iff runtimes should not be suspended + // for lack of paying maintenance fees. + DebugDoNotSuspendRuntimes bool `json:"debug_do_not_suspend_runtimes,omitempty"` +} + +const ( + // GasOpComputeCommit is the gas operation identifier for compute commits. + GasOpComputeCommit transaction.Op = "compute_commit" + // GasOpMergeCommit is the gas operation identifier for merge commits. + GasOpMergeCommit transaction.Op = "merge_commit" +) + // SanityCheckBlocks examines the blocks table. func SanityCheckBlocks(blocks map[common.Namespace]*block.Block) error { for _, blk := range blocks { @@ -172,6 +202,11 @@ func SanityCheckBlocks(blocks map[common.Namespace]*block.Block) error { // SanityCheck does basic sanity checking on the genesis state. func (g *Genesis) SanityCheck() error { + unsafeFlags := g.Parameters.DebugDoNotSuspendRuntimes + if unsafeFlags && !flags.DebugDontBlameOasis() { + return fmt.Errorf("roothash: sanity check failed: one or more unsafe debug flags set") + } + // Check blocks. for _, rtg := range g.RuntimeStates { if err := rtg.SanityCheck(true); err != nil { diff --git a/go/roothash/api/block/header.go b/go/roothash/api/block/header.go index 4442e2ee6b2..09841d56ce0 100644 --- a/go/roothash/api/block/header.go +++ b/go/roothash/api/block/header.go @@ -27,10 +27,17 @@ const ( RoundFailed HeaderType = 1 // EpochTransition is a header resulting from an epoch transition. + // // Such a header contains no transactions but advances the round as // normal. // TODO: Consider renaming this to CommitteeTransition. EpochTransition HeaderType = 2 + + // Suspended is a header resulting from the runtime being suspended. + // + // Such a header contains no transactions but advances the round as + // normal. + Suspended HeaderType = 3 ) // Header is a block header. diff --git a/go/storage/client/client.go b/go/storage/client/client.go index df737c3980a..6b34329ee82 100644 --- a/go/storage/client/client.go +++ b/go/storage/client/client.go @@ -221,7 +221,10 @@ func (b *storageClientBackend) writeWithClient( if successes == 0 { return nil, errors.New("storage client: failed to write to any storage node") } else if successes < n { - b.logger.Warn("write operation was only successfully applied to %d out of %d connected nodes", successes, n) + b.logger.Warn("write operation only partially applied", + "connected_nodes", n, + "successful_writes", successes, + ) } return receipts, nil diff --git a/go/storage/mkvs/urkel/writelog/writelog.go b/go/storage/mkvs/urkel/writelog/writelog.go index 29e51d85229..838da9e6911 100644 --- a/go/storage/mkvs/urkel/writelog/writelog.go +++ b/go/storage/mkvs/urkel/writelog/writelog.go @@ -1,6 +1,7 @@ package writelog import ( + "bytes" "encoding/json" "github.com/oasislabs/oasis-core/go/storage/mkvs/urkel/node" @@ -11,6 +12,19 @@ import ( // The keys in the write log must be unique. type WriteLog []LogEntry +// Equal compares vs another write log for equality. +func (wl WriteLog) Equal(cmp WriteLog) bool { + if len(wl) != len(cmp) { + return false + } + for k, v := range wl { + if !v.Equal(&cmp[k]) { + return false + } + } + return true +} + // LogEntry is a write log entry. type LogEntry struct { _ struct{} `cbor:",toarray"` //nolint @@ -19,6 +33,17 @@ type LogEntry struct { Value []byte } +// Equal compares vs another log entry for equality. +func (k *LogEntry) Equal(cmp *LogEntry) bool { + if !bytes.Equal(k.Key, cmp.Key) { + return false + } + if !bytes.Equal(k.Value, cmp.Value) { + return false + } + return true +} + func (k *LogEntry) UnmarshalJSON(src []byte) error { var kv [2][]byte if err := json.Unmarshal(src, &kv); err != nil { diff --git a/go/worker/common/committee/group.go b/go/worker/common/committee/group.go index 99fda1f1118..0fa2dca01a1 100644 --- a/go/worker/common/committee/group.go +++ b/go/worker/common/committee/group.go @@ -243,6 +243,23 @@ func (g *Group) RoundTransition(ctx context.Context) { g.activeEpoch.cancelRoundCtx = cancel } +// Suspend processes a runtime suspension that just happened. +// +// Resumption will be processed as a regular epoch transition. +func (g *Group) Suspend(ctx context.Context) { + g.Lock() + defer g.Unlock() + + if g.activeEpoch == nil { + return + } + + // Cancel context for the previous epoch. + (g.activeEpoch.cancelRoundCtx)() + // Invalidate current epoch. + g.activeEpoch = nil +} + // EpochTransition processes an epoch transition that just happened. func (g *Group) EpochTransition(ctx context.Context, height int64) error { g.Lock() diff --git a/go/worker/common/committee/node.go b/go/worker/common/committee/node.go index f6d4d2af4c2..eaa37bc6ee1 100644 --- a/go/worker/common/committee/node.go +++ b/go/worker/common/committee/node.go @@ -182,6 +182,19 @@ func (n *Node) handleEpochTransitionLocked(height int64) { } } +// Guarded by n.CrossNode. +func (n *Node) handleSuspendLocked(height int64) { + n.logger.Warn("runtime has been suspended") + + // Suspend group. + n.Group.Suspend(n.ctx) + + epoch := n.Group.GetEpochSnapshot() + for _, hooks := range n.hooks { + hooks.HandleEpochTransitionLocked(epoch) + } +} + // Guarded by n.CrossNode. func (n *Node) handleNewBlockLocked(blk *block.Block, height int64) { processedBlockCount.With(n.getMetricLabels()).Inc() @@ -225,6 +238,9 @@ func (n *Node) handleNewBlockLocked(blk *block.Block, height int64) { case block.EpochTransition: // Process an epoch transition. n.handleEpochTransitionLocked(height) + case block.Suspended: + // Process runtime being suspended. + n.handleSuspendLocked(height) default: n.logger.Error("invalid block type", "block", blk, @@ -253,13 +269,31 @@ func (n *Node) worker() { defer (n.cancelCtx)() // Wait for the runtime. - if _, err := n.Runtime.RegistryDescriptor(n.ctx); err != nil { + rt, err := n.Runtime.RegistryDescriptor(n.ctx) + if err != nil { n.logger.Error("failed to wait for registry descriptor", "err", err, ) return } + n.logger.Info("runtime is registered with the registry") + + // If the runtime requires a key manager, wait for the key manager to actually become available + // before processing any requests. + if rt.KeyManager != nil { + n.logger.Info("runtime indicates a key manager is required, waiting for it to be ready") + + if err = n.KeyManagerClient.WaitReady(n.ctx, rt.ID); err != nil { + n.logger.Error("failed to wait for key manager", + "err", err, + ) + return + } + + n.logger.Info("runtime has a key manager available") + } + // Start watching roothash blocks. blocks, blocksSub, err := n.Roothash.WatchBlocks(n.Runtime.ID()) if err != nil { diff --git a/runtime/src/common/bytes.rs b/runtime/src/common/bytes.rs index d382023da2d..af063c18fea 100644 --- a/runtime/src/common/bytes.rs +++ b/runtime/src/common/bytes.rs @@ -226,8 +226,6 @@ macro_rules! impl_bytes { #[cfg(test)] mod tests { - use super::*; - // Use hash of an empty string as a test key. const TEST_KEY_BYTES: [u8; 32] = [ 0xc6, 0x72, 0xb8, 0xd1, 0xef, 0x56, 0xed, 0x28, 0xab, 0x87, 0xc3, 0x62, 0x2c, 0x51, 0x14, @@ -235,10 +233,16 @@ mod tests { 0x96, 0x7a, ]; + impl_bytes!(TestKey, 32, "test key"); + + #[test] + fn test_length() { + assert_eq!(TestKey::len(), 32); + } + #[test] fn test_serde_base64() { // Serialize. - impl_bytes!(TestKey, 32, "test key"); let test_key = TestKey(TEST_KEY_BYTES); let test_key_str = serde_json::to_string(&test_key).unwrap(); assert_eq!( @@ -254,8 +258,6 @@ mod tests { #[test] fn test_serde_cbor() { // Serialize. - impl_bytes!(TestKey, 32, "test key"); - let test_key = TestKey(TEST_KEY_BYTES); let test_key_vec = serde_cbor::to_vec(&test_key).unwrap();