diff --git a/.changelog/4694.feature.md b/.changelog/4694.feature.md index e5ee282aaae..e3613874566 100644 --- a/.changelog/4694.feature.md +++ b/.changelog/4694.feature.md @@ -1 +1 @@ -runtime: verify consensus state integrity for queries +runtime: Enable dispatcher to verify state integrity for queries diff --git a/.changelog/4830.bugfix.md b/.changelog/4830.bugfix.md deleted file mode 100644 index fba1b48b53f..00000000000 --- a/.changelog/4830.bugfix.md +++ /dev/null @@ -1 +0,0 @@ -runtime: fix trusted state verification for queries diff --git a/.changelog/4830.feature.md b/.changelog/4830.feature.md new file mode 100644 index 00000000000..e3613874566 --- /dev/null +++ b/.changelog/4830.feature.md @@ -0,0 +1 @@ +runtime: Enable dispatcher to verify state integrity for queries diff --git a/.changelog/4904.feature.2.md b/.changelog/4904.feature.2.md new file mode 100644 index 00000000000..e3613874566 --- /dev/null +++ b/.changelog/4904.feature.2.md @@ -0,0 +1 @@ +runtime: Enable dispatcher to verify state integrity for queries diff --git a/.changelog/4904.feature.md b/.changelog/4904.feature.md new file mode 100644 index 00000000000..05f87538460 --- /dev/null +++ b/.changelog/4904.feature.md @@ -0,0 +1 @@ +runtime: Support consensus event queries diff --git a/go/common/version/version.go b/go/common/version/version.go index 10b25a37272..6103b1c5259 100644 --- a/go/common/version/version.go +++ b/go/common/version/version.go @@ -131,7 +131,7 @@ var ( // the runtime. // // NOTE: This version must be synced with runtime/src/common/version.rs. - RuntimeHostProtocol = Version{Major: 5, Minor: 0, Patch: 0} + RuntimeHostProtocol = Version{Major: 5, Minor: 1, Patch: 0} // RuntimeCommitteeProtocol versions the P2P protocol used by the runtime // committee members. diff --git a/go/consensus/api/grpc.go b/go/consensus/api/grpc.go index c1da213ea87..8c1dcf795aa 100644 --- a/go/consensus/api/grpc.go +++ b/go/consensus/api/grpc.go @@ -54,6 +54,8 @@ var ( // methodGetLightBlock is the GetLightBlock method. methodGetLightBlock = lightServiceName.NewMethod("GetLightBlock", int64(0)) + // methodGetLightBlockForState is the GetLightBlockForState method. + methodGetLightBlockForState = lightServiceName.NewMethod("GetLightBlockForState", int64(0)) // methodGetParameters is the GetParameters method. methodGetParameters = lightServiceName.NewMethod("GetParameters", int64(0)) // methodStateSyncGet is the StateSyncGet method. @@ -139,6 +141,10 @@ var ( MethodName: methodGetLightBlock.ShortName(), Handler: handlerGetLightBlock, }, + { + MethodName: methodGetLightBlockForState.ShortName(), + Handler: handlerGetLightBlockForState, + }, { MethodName: methodGetParameters.ShortName(), Handler: handlerGetParameters, @@ -474,6 +480,29 @@ func handlerGetLightBlock( return interceptor(ctx, height, info, handler) } +func handlerGetLightBlockForState( + srv interface{}, + ctx context.Context, + dec func(interface{}) error, + interceptor grpc.UnaryServerInterceptor, +) (interface{}, error) { + var height int64 + if err := dec(&height); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LightClientBackend).GetLightBlockForState(ctx, height) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: methodGetLightBlockForState.FullName(), + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LightClientBackend).GetLightBlockForState(ctx, req.(int64)) + } + return interceptor(ctx, height, info, handler) +} + func handlerGetParameters( srv interface{}, ctx context.Context, @@ -636,6 +665,15 @@ func (c *consensusLightClient) GetLightBlock(ctx context.Context, height int64) return &rsp, nil } +// Implements LightClientBackend. +func (c *consensusLightClient) GetLightBlockForState(ctx context.Context, height int64) (*LightBlock, error) { + var rsp LightBlock + if err := c.conn.Invoke(ctx, methodGetLightBlockForState.FullName(), height, &rsp); err != nil { + return nil, err + } + return &rsp, nil +} + // Implements LightClientBackend. func (c *consensusLightClient) GetParameters(ctx context.Context, height int64) (*Parameters, error) { var rsp Parameters diff --git a/go/consensus/api/light.go b/go/consensus/api/light.go index 2a65da3905d..a9b3b374050 100644 --- a/go/consensus/api/light.go +++ b/go/consensus/api/light.go @@ -14,6 +14,13 @@ type LightClientBackend interface { // client verification. GetLightBlock(ctx context.Context, height int64) (*LightBlock, error) + // GetLightBlockForState returns a light block for the state as of executing the consensus layer + // block at the specified height. Note that the height of the returned block may differ + // depending on consensus layer implementation details. + // + // In case light block for the given height is not yet available, it returns ErrVersionNotFound. + GetLightBlockForState(ctx context.Context, height int64) (*LightBlock, error) + // GetParameters returns the consensus parameters for a specific height. GetParameters(ctx context.Context, height int64) (*Parameters, error) diff --git a/go/consensus/tendermint/full/light.go b/go/consensus/tendermint/full/light.go index 4b5ed3b8ad3..8f7df101cf0 100644 --- a/go/consensus/tendermint/full/light.go +++ b/go/consensus/tendermint/full/light.go @@ -5,6 +5,7 @@ import ( "fmt" tmcore "github.com/tendermint/tendermint/rpc/core" + tmstate "github.com/tendermint/tendermint/state" tmtypes "github.com/tendermint/tendermint/types" "github.com/oasisprotocol/oasis-core/go/common/cbor" @@ -14,8 +15,7 @@ import ( "github.com/oasisprotocol/oasis-core/go/storage/mkvs/syncer" ) -// Implements LightClientBackend. -func (n *commonNode) GetLightBlock(ctx context.Context, height int64) (*consensusAPI.LightBlock, error) { +func (n *commonNode) getLightBlock(ctx context.Context, height int64, allowPending bool) (*consensusAPI.LightBlock, error) { if err := n.ensureStarted(ctx); err != nil { return nil, err } @@ -33,9 +33,27 @@ func (n *commonNode) GetLightBlock(ctx context.Context, height int64) (*consensu return nil, consensusAPI.ErrVersionNotFound } - if commit, cerr := tmcore.Commit(n.rpcCtx, &tmHeight); cerr == nil && commit.Header != nil { + commit, err := tmcore.Commit(n.rpcCtx, &tmHeight) + if err == nil && commit.Header != nil { lb.SignedHeader = &commit.SignedHeader tmHeight = commit.Header.Height + } else if allowPending { + // The specified height seems to be for the "next" block that has not yet been finalized. We + // construct a "pending" block instead (this block cannot be verified by a light client as + // it doesn't have any commits). + var state tmstate.State + state, err = n.stateStore.Load() + if err != nil { + return nil, fmt.Errorf("tendermint: failed to fetch latest blockchain state: %w", err) + } + + commit := tmtypes.NewCommit(height, 0, tmtypes.BlockID{}, nil) + var proposerAddr [20]byte + blk, _ := state.MakeBlock(height, nil, commit, nil, proposerAddr[:]) + lb.SignedHeader = &tmtypes.SignedHeader{ + Header: &blk.Header, + Commit: commit, + } } protoLb, err := lb.ToProto() if err != nil { @@ -59,6 +77,16 @@ func (n *commonNode) GetLightBlock(ctx context.Context, height int64) (*consensu }, nil } +// Implements LightClientBackend. +func (n *commonNode) GetLightBlock(ctx context.Context, height int64) (*consensusAPI.LightBlock, error) { + return n.getLightBlock(ctx, height, false) +} + +// Implements LightClientBackend. +func (n *commonNode) GetLightBlockForState(ctx context.Context, height int64) (*consensusAPI.LightBlock, error) { + return n.getLightBlock(ctx, height+1, true) +} + // Implements LightClientBackend. func (n *commonNode) GetParameters(ctx context.Context, height int64) (*consensusAPI.Parameters, error) { if err := n.ensureStarted(ctx); err != nil { diff --git a/go/consensus/tendermint/light/client.go b/go/consensus/tendermint/light/client.go index 56b71a314dd..24cbdfd292f 100644 --- a/go/consensus/tendermint/light/client.go +++ b/go/consensus/tendermint/light/client.go @@ -134,6 +134,11 @@ func (lc *lightClient) GetLightBlock(ctx context.Context, height int64) (*consen return lc.getPrimary().GetLightBlock(ctx, height) } +// Implements consensus.LightClientBackend. +func (lc *lightClient) GetLightBlockForState(ctx context.Context, height int64) (*consensus.LightBlock, error) { + return lc.getPrimary().GetLightBlockForState(ctx, height) +} + // Implements consensus.LightClientBackend. func (lc *lightClient) GetParameters(ctx context.Context, height int64) (*consensus.Parameters, error) { return lc.getPrimary().GetParameters(ctx, height) diff --git a/go/consensus/tendermint/seed/seed.go b/go/consensus/tendermint/seed/seed.go index 1f0b633b7a5..5aef4e55618 100644 --- a/go/consensus/tendermint/seed/seed.go +++ b/go/consensus/tendermint/seed/seed.go @@ -246,6 +246,11 @@ func (srv *seedService) GetLightBlock(ctx context.Context, height int64) (*conse return nil, consensus.ErrUnsupported } +// Implements consensus.Backend. +func (srv *seedService) GetLightBlockForState(ctx context.Context, height int64) (*consensus.LightBlock, error) { + return nil, consensus.ErrUnsupported +} + // Implements consensus.Backend. func (srv *seedService) GetParameters(ctx context.Context, height int64) (*consensus.Parameters, error) { return nil, consensus.ErrUnsupported diff --git a/go/runtime/host/protocol/types.go b/go/runtime/host/protocol/types.go index e3b279a7984..db8de15208d 100644 --- a/go/runtime/host/protocol/types.go +++ b/go/runtime/host/protocol/types.go @@ -14,6 +14,7 @@ import ( "github.com/oasisprotocol/oasis-core/go/common/sgx/quote" "github.com/oasisprotocol/oasis-core/go/common/version" consensus "github.com/oasisprotocol/oasis-core/go/consensus/api" + consensusResults "github.com/oasisprotocol/oasis-core/go/consensus/api/transaction/results" roothash "github.com/oasisprotocol/oasis-core/go/roothash/api" "github.com/oasisprotocol/oasis-core/go/roothash/api/block" "github.com/oasisprotocol/oasis-core/go/roothash/api/commitment" @@ -95,20 +96,22 @@ type Body struct { RuntimeConsensusSyncResponse *Empty `json:",omitempty"` // Host interface. - HostRPCCallRequest *HostRPCCallRequest `json:",omitempty"` - HostRPCCallResponse *HostRPCCallResponse `json:",omitempty"` - HostStorageSyncRequest *HostStorageSyncRequest `json:",omitempty"` - HostStorageSyncResponse *HostStorageSyncResponse `json:",omitempty"` - HostLocalStorageGetRequest *HostLocalStorageGetRequest `json:",omitempty"` - HostLocalStorageGetResponse *HostLocalStorageGetResponse `json:",omitempty"` - HostLocalStorageSetRequest *HostLocalStorageSetRequest `json:",omitempty"` - HostLocalStorageSetResponse *Empty `json:",omitempty"` - HostFetchConsensusBlockRequest *HostFetchConsensusBlockRequest `json:",omitempty"` - HostFetchConsensusBlockResponse *HostFetchConsensusBlockResponse `json:",omitempty"` - HostFetchTxBatchRequest *HostFetchTxBatchRequest `json:",omitempty"` - HostFetchTxBatchResponse *HostFetchTxBatchResponse `json:",omitempty"` - HostFetchGenesisHeightRequest *HostFetchGenesisHeightRequest `json:",omitempty"` - HostFetchGenesisHeightResponse *HostFetchGenesisHeightResponse `json:",omitempty"` + HostRPCCallRequest *HostRPCCallRequest `json:",omitempty"` + HostRPCCallResponse *HostRPCCallResponse `json:",omitempty"` + HostStorageSyncRequest *HostStorageSyncRequest `json:",omitempty"` + HostStorageSyncResponse *HostStorageSyncResponse `json:",omitempty"` + HostLocalStorageGetRequest *HostLocalStorageGetRequest `json:",omitempty"` + HostLocalStorageGetResponse *HostLocalStorageGetResponse `json:",omitempty"` + HostLocalStorageSetRequest *HostLocalStorageSetRequest `json:",omitempty"` + HostLocalStorageSetResponse *Empty `json:",omitempty"` + HostFetchConsensusBlockRequest *HostFetchConsensusBlockRequest `json:",omitempty"` + HostFetchConsensusBlockResponse *HostFetchConsensusBlockResponse `json:",omitempty"` + HostFetchConsensusEventsRequest *HostFetchConsensusEventsRequest `json:",omitempty"` + HostFetchConsensusEventsResponse *HostFetchConsensusEventsResponse `json:",omitempty"` + HostFetchTxBatchRequest *HostFetchTxBatchRequest `json:",omitempty"` + HostFetchTxBatchResponse *HostFetchTxBatchResponse `json:",omitempty"` + HostFetchGenesisHeightRequest *HostFetchGenesisHeightRequest `json:",omitempty"` + HostFetchGenesisHeightResponse *HostFetchGenesisHeightResponse `json:",omitempty"` } // Type returns the message type by determining the name of the first non-nil member. @@ -480,6 +483,30 @@ type HostFetchConsensusBlockResponse struct { Block consensus.LightBlock `json:"block"` } +// EventKind is the consensus event kind. +type EventKind uint8 + +// Supported consensus event kinds. +const ( + EventKindStaking EventKind = 1 + EventKindRegistry EventKind = 2 + EventKindRootHash EventKind = 3 + EventKindGovernance EventKind = 4 +) + +// HostFetchConsensusEventsRequest is a request to host to fetch the consensus events for the given +// height. +type HostFetchConsensusEventsRequest struct { + Height uint64 `json:"height"` + Kind EventKind `json:"kind"` +} + +// HostFetchConsensusEventsResponse is a response from host fetching the consensus events for the +// given height. +type HostFetchConsensusEventsResponse struct { + Events []*consensusResults.Event `json:"events,omitempty"` +} + // HostFetchGenesisHeightRequest is a request to host to fetch the consensus genesis height. type HostFetchGenesisHeightRequest struct{} diff --git a/go/runtime/registry/host.go b/go/runtime/registry/host.go index 4c51a61a890..163243d8db7 100644 --- a/go/runtime/registry/host.go +++ b/go/runtime/registry/host.go @@ -13,6 +13,7 @@ import ( "github.com/oasisprotocol/oasis-core/go/common/logging" "github.com/oasisprotocol/oasis-core/go/common/version" consensus "github.com/oasisprotocol/oasis-core/go/consensus/api" + consensusResults "github.com/oasisprotocol/oasis-core/go/consensus/api/transaction/results" keymanager "github.com/oasisprotocol/oasis-core/go/keymanager/api" registry "github.com/oasisprotocol/oasis-core/go/registry/api" "github.com/oasisprotocol/oasis-core/go/roothash/api/block" @@ -162,114 +163,208 @@ type runtimeHostHandler struct { consensus consensus.Backend } -// Implements protocol.Handler. -func (h *runtimeHostHandler) Handle(ctx context.Context, body *protocol.Body) (*protocol.Body, error) { - // RPC. - if body.HostRPCCallRequest != nil { - switch body.HostRPCCallRequest.Endpoint { - case runtimeKeymanager.EnclaveRPCEndpoint: - // Call into the remote key manager. - kmCli, err := h.env.GetKeyManagerClient(ctx) - if err != nil { - return nil, err - } - res, err := kmCli.CallEnclave(ctx, body.HostRPCCallRequest.Request, body.HostRPCCallRequest.PeerFeedback) - if err != nil { - return nil, err - } - return &protocol.Body{HostRPCCallResponse: &protocol.HostRPCCallResponse{ - Response: cbor.FixSliceForSerde(res), - }}, nil - default: - return nil, errEndpointNotSupported - } - } - // Storage. - if body.HostStorageSyncRequest != nil { - rq := body.HostStorageSyncRequest - - var rs syncer.ReadSyncer - switch rq.Endpoint { - case protocol.HostStorageEndpointRuntime: - // Runtime storage. - rs = h.runtime.Storage() - case protocol.HostStorageEndpointConsensus: - // Consensus state storage. - rs = h.consensus.State() - default: - return nil, errEndpointNotSupported - } - - var rsp *storage.ProofResponse - var err error - switch { - case rq.SyncGet != nil: - rsp, err = rs.SyncGet(ctx, rq.SyncGet) - case rq.SyncGetPrefixes != nil: - rsp, err = rs.SyncGetPrefixes(ctx, rq.SyncGetPrefixes) - case rq.SyncIterate != nil: - rsp, err = rs.SyncIterate(ctx, rq.SyncIterate) - default: - return nil, errMethodNotSupported +func (h *runtimeHostHandler) handleHostRPCCall( + ctx context.Context, + request *protocol.HostRPCCallRequest, +) (*protocol.Body, error) { + switch request.Endpoint { + case runtimeKeymanager.EnclaveRPCEndpoint: + // Call into the remote key manager. + kmCli, err := h.env.GetKeyManagerClient(ctx) + if err != nil { + return nil, err } + res, err := kmCli.CallEnclave(ctx, request.Request, request.PeerFeedback) if err != nil { return nil, err } + return &protocol.Body{HostRPCCallResponse: &protocol.HostRPCCallResponse{ + Response: cbor.FixSliceForSerde(res), + }}, nil + default: + return nil, errEndpointNotSupported + } +} - return &protocol.Body{HostStorageSyncResponse: &protocol.HostStorageSyncResponse{ProofResponse: rsp}}, nil +func (h *runtimeHostHandler) handleHostStorageSync( + ctx context.Context, + request *protocol.HostStorageSyncRequest, +) (*protocol.Body, error) { + var rs syncer.ReadSyncer + switch request.Endpoint { + case protocol.HostStorageEndpointRuntime: + // Runtime storage. + rs = h.runtime.Storage() + case protocol.HostStorageEndpointConsensus: + // Consensus state storage. + rs = h.consensus.State() + default: + return nil, errEndpointNotSupported + } + + var rsp *storage.ProofResponse + var err error + switch { + case request.SyncGet != nil: + rsp, err = rs.SyncGet(ctx, request.SyncGet) + case request.SyncGetPrefixes != nil: + rsp, err = rs.SyncGetPrefixes(ctx, request.SyncGetPrefixes) + case request.SyncIterate != nil: + rsp, err = rs.SyncIterate(ctx, request.SyncIterate) + default: + return nil, errMethodNotSupported + } + if err != nil { + return nil, err } - // Local storage. - if body.HostLocalStorageGetRequest != nil { + + return &protocol.Body{HostStorageSyncResponse: &protocol.HostStorageSyncResponse{ProofResponse: rsp}}, nil +} + +func (h *runtimeHostHandler) handleHostLocalStorage( + ctx context.Context, + body *protocol.Body, +) (*protocol.Body, error) { + switch { + case body.HostLocalStorageGetRequest != nil: value, err := h.runtime.LocalStorage().Get(body.HostLocalStorageGetRequest.Key) if err != nil { return nil, err } return &protocol.Body{HostLocalStorageGetResponse: &protocol.HostLocalStorageGetResponse{Value: value}}, nil - } - if body.HostLocalStorageSetRequest != nil { + case body.HostLocalStorageSetRequest != nil: if err := h.runtime.LocalStorage().Set(body.HostLocalStorageSetRequest.Key, body.HostLocalStorageSetRequest.Value); err != nil { return nil, err } return &protocol.Body{HostLocalStorageSetResponse: &protocol.Empty{}}, nil + default: + return nil, errMethodNotSupported + } +} + +func (h *runtimeHostHandler) handleHostFetchConsensusBlock( + ctx context.Context, + request *protocol.HostFetchConsensusBlockRequest, +) (*protocol.Body, error) { + lb, err := h.consensus.GetLightBlock(ctx, int64(request.Height)) + if err != nil { + return nil, err } - // Consensus light client. - if body.HostFetchConsensusBlockRequest != nil { - lb, err := h.consensus.GetLightBlock(ctx, int64(body.HostFetchConsensusBlockRequest.Height)) + return &protocol.Body{HostFetchConsensusBlockResponse: &protocol.HostFetchConsensusBlockResponse{ + Block: *lb, + }}, nil +} + +func (h *runtimeHostHandler) handleHostFetchConsensusEvents( + ctx context.Context, + request *protocol.HostFetchConsensusEventsRequest, +) (*protocol.Body, error) { + var evs []*consensusResults.Event + switch request.Kind { + case protocol.EventKindStaking: + sevs, err := h.consensus.Staking().GetEvents(ctx, int64(request.Height)) if err != nil { return nil, err } - return &protocol.Body{HostFetchConsensusBlockResponse: &protocol.HostFetchConsensusBlockResponse{ - Block: *lb, - }}, nil - } - if body.HostFetchGenesisHeightRequest != nil { - doc, err := h.consensus.GetGenesisDocument(ctx) + evs = make([]*consensusResults.Event, 0, len(sevs)) + for _, sev := range sevs { + evs = append(evs, &consensusResults.Event{Staking: sev}) + } + case protocol.EventKindRegistry: + revs, err := h.consensus.Registry().GetEvents(ctx, int64(request.Height)) if err != nil { return nil, err } - return &protocol.Body{HostFetchGenesisHeightResponse: &protocol.HostFetchGenesisHeightResponse{ - Height: uint64(doc.Height), - }}, nil - } - // Transaction pool. - if rq := body.HostFetchTxBatchRequest; rq != nil { - txPool, err := h.env.GetTxPool(ctx) + evs = make([]*consensusResults.Event, 0, len(revs)) + for _, rev := range revs { + evs = append(evs, &consensusResults.Event{Registry: rev}) + } + case protocol.EventKindRootHash: + revs, err := h.consensus.RootHash().GetEvents(ctx, int64(request.Height)) if err != nil { return nil, err } - - batch := txPool.GetSchedulingExtra(rq.Offset, rq.Limit) - raw := make([][]byte, 0, len(batch)) - for _, tx := range batch { - raw = append(raw, tx.Raw()) + evs = make([]*consensusResults.Event, 0, len(revs)) + for _, rev := range revs { + evs = append(evs, &consensusResults.Event{RootHash: rev}) + } + case protocol.EventKindGovernance: + gevs, err := h.consensus.Governance().GetEvents(ctx, int64(request.Height)) + if err != nil { + return nil, err + } + evs = make([]*consensusResults.Event, 0, len(gevs)) + for _, gev := range gevs { + evs = append(evs, &consensusResults.Event{Governance: gev}) } + default: + return nil, errMethodNotSupported + } + return &protocol.Body{HostFetchConsensusEventsResponse: &protocol.HostFetchConsensusEventsResponse{ + Events: evs, + }}, nil +} - return &protocol.Body{HostFetchTxBatchResponse: &protocol.HostFetchTxBatchResponse{ - Batch: raw, - }}, nil +func (h *runtimeHostHandler) handleHostFetchGenesisHeight( + ctx context.Context, + request *protocol.HostFetchGenesisHeightRequest, +) (*protocol.Body, error) { + doc, err := h.consensus.GetGenesisDocument(ctx) + if err != nil { + return nil, err + } + return &protocol.Body{HostFetchGenesisHeightResponse: &protocol.HostFetchGenesisHeightResponse{ + Height: uint64(doc.Height), + }}, nil +} + +func (h *runtimeHostHandler) handleHostFetchTxBatch( + ctx context.Context, + request *protocol.HostFetchTxBatchRequest, +) (*protocol.Body, error) { + txPool, err := h.env.GetTxPool(ctx) + if err != nil { + return nil, err } - return nil, errMethodNotSupported + batch := txPool.GetSchedulingExtra(request.Offset, request.Limit) + raw := make([][]byte, 0, len(batch)) + for _, tx := range batch { + raw = append(raw, tx.Raw()) + } + + return &protocol.Body{HostFetchTxBatchResponse: &protocol.HostFetchTxBatchResponse{ + Batch: raw, + }}, nil +} + +// Implements protocol.Handler. +func (h *runtimeHostHandler) Handle(ctx context.Context, body *protocol.Body) (*protocol.Body, error) { + switch { + case body.HostRPCCallRequest != nil: + // RPC. + return h.handleHostRPCCall(ctx, body.HostRPCCallRequest) + case body.HostStorageSyncRequest != nil: + // Storage. + return h.handleHostStorageSync(ctx, body.HostStorageSyncRequest) + case body.HostLocalStorageGetRequest != nil, body.HostLocalStorageSetRequest != nil: + // Local storage. + return h.handleHostLocalStorage(ctx, body) + case body.HostFetchConsensusBlockRequest != nil: + // Consensus light client. + return h.handleHostFetchConsensusBlock(ctx, body.HostFetchConsensusBlockRequest) + case body.HostFetchConsensusEventsRequest != nil: + // Consensus events. + return h.handleHostFetchConsensusEvents(ctx, body.HostFetchConsensusEventsRequest) + case body.HostFetchGenesisHeightRequest != nil: + // Consensus genesis height. + return h.handleHostFetchGenesisHeight(ctx, body.HostFetchGenesisHeightRequest) + case body.HostFetchTxBatchRequest != nil: + // Transaction pool. + return h.handleHostFetchTxBatch(ctx, body.HostFetchTxBatchRequest) + default: + return nil, errMethodNotSupported + } } // runtimeHostNotifier is a runtime host notifier suitable for compute runtimes. It handles things diff --git a/go/staking/api/api_test.go b/go/staking/api/api_test.go index b6b38635ab9..83a3c903713 100644 --- a/go/staking/api/api_test.go +++ b/go/staking/api/api_test.go @@ -8,6 +8,7 @@ import ( "github.com/oasisprotocol/oasis-core/go/beacon/api" "github.com/oasisprotocol/oasis-core/go/common/cbor" + "github.com/oasisprotocol/oasis-core/go/common/crypto/hash" "github.com/oasisprotocol/oasis-core/go/common/crypto/signature" "github.com/oasisprotocol/oasis-core/go/common/quantity" ) @@ -506,3 +507,151 @@ func TestReclaimEscrowResultsSerialization(t *testing.T) { require.EqualValues(tc.rr, dec, "ReclaimEscrow serialization should round-trip") } } + +func TestEventsSerialization(t *testing.T) { + require := require.New(t) + + pk1 := signature.NewPublicKey("aaafffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + addr1 := NewAddress(pk1) + pk2 := signature.NewPublicKey("bbbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + addr2 := NewAddress(pk2) + var txHash hash.Hash + txHash.Empty() + + // NOTE: These cases should be synced with tests in runtime/src/consensus/staking.rs. + for _, tc := range []struct { + ev Event + expectedBase64 string + }{ + {Event{}, "oWd0eF9oYXNoWCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=="}, + {Event{Height: 42}, "omZoZWlnaHQYKmd0eF9oYXNoWCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=="}, + {Event{Height: 42, TxHash: txHash}, "omZoZWlnaHQYKmd0eF9oYXNoWCDGcrjR71btKKuHw2IsURQGm90617j5c3SY0MAezvCWeg=="}, + + // Transfer. + { + Event{ + Height: 42, + TxHash: txHash, + Transfer: &TransferEvent{ + From: addr1, + To: addr2, + Amount: mustInitQuantity(t, 100), + }, + }, + "o2ZoZWlnaHQYKmd0eF9oYXNoWCDGcrjR71btKKuHw2IsURQGm90617j5c3SY0MAezvCWemh0cmFuc2ZlcqNidG9VALkSOXiV5kcMUfq+zJ3cg/cK7YahZGZyb21VACByFDSJP2FsCYFI4s+fmePigm4TZmFtb3VudEFk", + }, + + // Burn. + { + Event{ + Height: 42, + TxHash: txHash, + Burn: &BurnEvent{ + Owner: addr1, + Amount: mustInitQuantity(t, 100), + }, + }, + "o2RidXJuomVvd25lclUAIHIUNIk/YWwJgUjiz5+Z4+KCbhNmYW1vdW50QWRmaGVpZ2h0GCpndHhfaGFzaFggxnK40e9W7Sirh8NiLFEUBpvdOte4+XN0mNDAHs7wlno=", + }, + + // Escrow. + { + Event{ + Height: 42, + TxHash: txHash, + Escrow: &EscrowEvent{ + Add: &AddEscrowEvent{ + Owner: addr1, + Escrow: addr2, + Amount: mustInitQuantity(t, 100), + NewShares: mustInitQuantity(t, 50), + }, + }, + }, + "o2Zlc2Nyb3ehY2FkZKRlb3duZXJVACByFDSJP2FsCYFI4s+fmePigm4TZmFtb3VudEFkZmVzY3Jvd1UAuRI5eJXmRwxR+r7MndyD9wrthqFqbmV3X3NoYXJlc0EyZmhlaWdodBgqZ3R4X2hhc2hYIMZyuNHvVu0oq4fDYixRFAab3TrXuPlzdJjQwB7O8JZ6", + }, + { + Event{ + Height: 42, + TxHash: txHash, + Escrow: &EscrowEvent{ + Take: &TakeEscrowEvent{ + Owner: addr1, + Amount: mustInitQuantity(t, 100), + }, + }, + }, + "o2Zlc2Nyb3ehZHRha2WiZW93bmVyVQAgchQ0iT9hbAmBSOLPn5nj4oJuE2ZhbW91bnRBZGZoZWlnaHQYKmd0eF9oYXNoWCDGcrjR71btKKuHw2IsURQGm90617j5c3SY0MAezvCWeg==", + }, + { + Event{ + Height: 42, + TxHash: txHash, + Escrow: &EscrowEvent{ + DebondingStart: &DebondingStartEscrowEvent{ + Owner: addr1, + Escrow: addr2, + Amount: mustInitQuantity(t, 100), + ActiveShares: mustInitQuantity(t, 50), + DebondingShares: mustInitQuantity(t, 25), + DebondEndTime: 42, + }, + }, + }, + "o2Zlc2Nyb3ehb2RlYm9uZGluZ19zdGFydKZlb3duZXJVACByFDSJP2FsCYFI4s+fmePigm4TZmFtb3VudEFkZmVzY3Jvd1UAuRI5eJXmRwxR+r7MndyD9wrthqFtYWN0aXZlX3NoYXJlc0Eyb2RlYm9uZF9lbmRfdGltZRgqcGRlYm9uZGluZ19zaGFyZXNBGWZoZWlnaHQYKmd0eF9oYXNoWCDGcrjR71btKKuHw2IsURQGm90617j5c3SY0MAezvCWeg==", + }, + { + Event{ + Height: 42, + TxHash: txHash, + Escrow: &EscrowEvent{ + Reclaim: &ReclaimEscrowEvent{ + Owner: addr1, + Escrow: addr2, + Amount: mustInitQuantity(t, 100), + Shares: mustInitQuantity(t, 25), + }, + }, + }, + "o2Zlc2Nyb3ehZ3JlY2xhaW2kZW93bmVyVQAgchQ0iT9hbAmBSOLPn5nj4oJuE2ZhbW91bnRBZGZlc2Nyb3dVALkSOXiV5kcMUfq+zJ3cg/cK7YahZnNoYXJlc0EZZmhlaWdodBgqZ3R4X2hhc2hYIMZyuNHvVu0oq4fDYixRFAab3TrXuPlzdJjQwB7O8JZ6", + }, + + // Allowance change. + { + Event{ + Height: 42, + TxHash: txHash, + AllowanceChange: &AllowanceChangeEvent{ + Owner: addr1, + Beneficiary: addr2, + Allowance: mustInitQuantity(t, 100), + Negative: false, + AmountChange: mustInitQuantity(t, 50), + }, + }, + "o2ZoZWlnaHQYKmd0eF9oYXNoWCDGcrjR71btKKuHw2IsURQGm90617j5c3SY0MAezvCWenBhbGxvd2FuY2VfY2hhbmdlpGVvd25lclUAIHIUNIk/YWwJgUjiz5+Z4+KCbhNpYWxsb3dhbmNlQWRrYmVuZWZpY2lhcnlVALkSOXiV5kcMUfq+zJ3cg/cK7YahbWFtb3VudF9jaGFuZ2VBMg==", + }, + { + Event{ + Height: 42, + TxHash: txHash, + AllowanceChange: &AllowanceChangeEvent{ + Owner: addr1, + Beneficiary: addr2, + Allowance: mustInitQuantity(t, 100), + Negative: true, + AmountChange: mustInitQuantity(t, 50), + }, + }, + "o2ZoZWlnaHQYKmd0eF9oYXNoWCDGcrjR71btKKuHw2IsURQGm90617j5c3SY0MAezvCWenBhbGxvd2FuY2VfY2hhbmdlpWVvd25lclUAIHIUNIk/YWwJgUjiz5+Z4+KCbhNobmVnYXRpdmX1aWFsbG93YW5jZUFka2JlbmVmaWNpYXJ5VQC5Ejl4leZHDFH6vsyd3IP3Cu2GoW1hbW91bnRfY2hhbmdlQTI=", + }, + } { + enc := cbor.Marshal(tc.ev) + require.Equal(tc.expectedBase64, base64.StdEncoding.EncodeToString(enc), "serialization should match") + + var dec Event + err := cbor.Unmarshal(enc, &dec) + require.NoError(err, "Unmarshal") + require.EqualValues(tc.ev, dec, "Event serialization should round-trip") + } +} diff --git a/go/worker/client/committee/node.go b/go/worker/client/committee/node.go index 78e19b95157..28114e3a5a8 100644 --- a/go/worker/client/committee/node.go +++ b/go/worker/client/committee/node.go @@ -154,8 +154,8 @@ func (n *Node) Query(ctx context.Context, round uint64, method string, args []by return nil, fmt.Errorf("client: failed to fetch annotated block from history: %w", err) } - // Get consensus state at queried round + 1 (tendermint light clients are a block behind). - lb, err := n.commonNode.Consensus.GetLightBlock(ctx, annBlk.Height+1) + // Get consensus light block for state after executing block at given height. + lb, err := n.commonNode.Consensus.GetLightBlockForState(ctx, annBlk.Height) if err != nil { return nil, fmt.Errorf("client: failed to get light block at height %d: %w", annBlk.Height, err) } diff --git a/runtime/src/common/version.rs b/runtime/src/common/version.rs index 4c243b59726..7212efc518e 100644 --- a/runtime/src/common/version.rs +++ b/runtime/src/common/version.rs @@ -64,7 +64,7 @@ impl From for Version { // the worker host. pub const PROTOCOL_VERSION: Version = Version { major: 5, - minor: 0, + minor: 1, patch: 0, }; diff --git a/runtime/src/consensus/mod.rs b/runtime/src/consensus/mod.rs index baccb7a589d..9f2d0cc9cbc 100644 --- a/runtime/src/consensus/mod.rs +++ b/runtime/src/consensus/mod.rs @@ -19,3 +19,11 @@ pub struct LightBlock { pub height: u64, pub meta: Vec, } + +/// An event emitted by the consensus layer. +#[derive(Clone, Debug, cbor::Encode, cbor::Decode)] +pub enum Event { + #[cbor(rename = "staking")] + Staking(staking::Event), + // TODO: Add support for other kind of events. +} diff --git a/runtime/src/consensus/staking.rs b/runtime/src/consensus/staking.rs index d377c5897e8..37d4545bd6b 100644 --- a/runtime/src/consensus/staking.rs +++ b/runtime/src/consensus/staking.rs @@ -7,7 +7,7 @@ use std::collections::BTreeMap; use crate::{ - common::quantity::Quantity, + common::{crypto::hash::Hash, quantity::Quantity}, consensus::{address::Address, beacon::EpochTime}, }; @@ -228,6 +228,93 @@ pub struct WithdrawResult { pub amount_change: Quantity, } +/// A staking-related event. +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, cbor::Encode, cbor::Decode)] +pub struct Event { + #[cbor(optional)] + pub height: i64, + #[cbor(optional)] + pub tx_hash: Hash, + + // TODO: Consider refactoring this to be an enum. + #[cbor(optional)] + pub transfer: Option, + #[cbor(optional)] + pub burn: Option, + #[cbor(optional)] + pub escrow: Option, + #[cbor(optional)] + pub allowance_change: Option, +} + +/// Event emitted when stake is transferred, either by a call to Transfer or Withdraw. +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, cbor::Encode, cbor::Decode)] +pub struct TransferEvent { + pub from: Address, + pub to: Address, + pub amount: Quantity, +} + +/// Event emitted when stake is destroyed via a call to Burn. +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, cbor::Encode, cbor::Decode)] +pub struct BurnEvent { + pub owner: Address, + pub amount: Quantity, +} + +/// Escrow-related events. +#[derive(Clone, Debug, PartialEq, Eq, Hash, cbor::Encode, cbor::Decode)] +pub enum EscrowEvent { + /// Event emitted when stake is transferred into an escrow account. + #[cbor(rename = "add")] + Add { + owner: Address, + escrow: Address, + amount: Quantity, + new_shares: Quantity, + }, + + /// Event emitted when stake is taken from an escrow account (i.e. stake is slashed). + #[cbor(rename = "take")] + Take { owner: Address, amount: Quantity }, + + /// Event emitted when the debonding process has started and the given number of active shares + /// have been moved into the debonding pool and started debonding. + /// + /// Note that the given amount is valid at the time of debonding start and may not correspond to + /// the final debonded amount in case any escrowed stake is subject to slashing. + #[cbor(rename = "debonding_start")] + DebondingStart { + owner: Address, + escrow: Address, + amount: Quantity, + active_shares: Quantity, + debonding_shares: Quantity, + debond_end_time: EpochTime, + }, + + /// Event emitted when stake is reclaimed from an escrow account back into owner's general + /// account. + #[cbor(rename = "reclaim")] + Reclaim { + owner: Address, + escrow: Address, + amount: Quantity, + shares: Quantity, + }, +} + +/// Event emitted when allowance is changed for a beneficiary. +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, cbor::Encode, cbor::Decode)] +pub struct AllowanceChangeEvent { + pub owner: Address, + pub beneficiary: Address, + pub allowance: Quantity, + #[cbor(optional)] + pub negative: bool, + pub amount_change: Quantity, +} + #[cfg(test)] mod tests { use super::*; @@ -466,4 +553,161 @@ mod tests { assert_eq!(dec, rr, "decoded result should match the expected value"); } } + + #[test] + fn test_consistent_events() { + let addr1 = Address::from_pk(&PublicKey::from( + "aaafffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + )); + let addr2 = Address::from_pk(&PublicKey::from( + "bbbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + )); + let tx_hash = Hash::empty_hash(); + + let tcs = vec![ + ( + "oWd0eF9oYXNoWCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + Event::default(), + ), + ( + "omZoZWlnaHQYKmd0eF9oYXNoWCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + Event { + height: 42, + ..Default::default() + }, + ), + ( + "omZoZWlnaHQYKmd0eF9oYXNoWCDGcrjR71btKKuHw2IsURQGm90617j5c3SY0MAezvCWeg==", + Event { + height: 42, + tx_hash, + ..Default::default() + }, + ), + + // Transfer. + ( + "o2ZoZWlnaHQYKmd0eF9oYXNoWCDGcrjR71btKKuHw2IsURQGm90617j5c3SY0MAezvCWemh0cmFuc2ZlcqNidG9VALkSOXiV5kcMUfq+zJ3cg/cK7YahZGZyb21VACByFDSJP2FsCYFI4s+fmePigm4TZmFtb3VudEFk", + Event { + height: 42, + tx_hash, + transfer: Some(TransferEvent { + from: addr1.clone(), + to: addr2.clone(), + amount: 100u32.into(), + }), + ..Default::default() + }, + ), + + // Burn. + ( + "o2RidXJuomVvd25lclUAIHIUNIk/YWwJgUjiz5+Z4+KCbhNmYW1vdW50QWRmaGVpZ2h0GCpndHhfaGFzaFggxnK40e9W7Sirh8NiLFEUBpvdOte4+XN0mNDAHs7wlno=", + Event { + height: 42, + tx_hash, + burn: Some(BurnEvent { + owner: addr1.clone(), + amount: 100u32.into(), + }), + ..Default::default() + }, + ), + + // Escrow. + ( + "o2Zlc2Nyb3ehY2FkZKRlb3duZXJVACByFDSJP2FsCYFI4s+fmePigm4TZmFtb3VudEFkZmVzY3Jvd1UAuRI5eJXmRwxR+r7MndyD9wrthqFqbmV3X3NoYXJlc0EyZmhlaWdodBgqZ3R4X2hhc2hYIMZyuNHvVu0oq4fDYixRFAab3TrXuPlzdJjQwB7O8JZ6", + Event { + height: 42, + tx_hash, + escrow: Some(EscrowEvent::Add { + owner: addr1.clone(), + escrow: addr2.clone(), + amount: 100u32.into(), + new_shares: 50u32.into(), + }), + ..Default::default() + }, + ), + ( + "o2Zlc2Nyb3ehZHRha2WiZW93bmVyVQAgchQ0iT9hbAmBSOLPn5nj4oJuE2ZhbW91bnRBZGZoZWlnaHQYKmd0eF9oYXNoWCDGcrjR71btKKuHw2IsURQGm90617j5c3SY0MAezvCWeg==", + Event { + height: 42, + tx_hash, + escrow: Some(EscrowEvent::Take { + owner: addr1.clone(), + amount: 100u32.into(), + }), + ..Default::default() + }, + ), + ( + "o2Zlc2Nyb3ehb2RlYm9uZGluZ19zdGFydKZlb3duZXJVACByFDSJP2FsCYFI4s+fmePigm4TZmFtb3VudEFkZmVzY3Jvd1UAuRI5eJXmRwxR+r7MndyD9wrthqFtYWN0aXZlX3NoYXJlc0Eyb2RlYm9uZF9lbmRfdGltZRgqcGRlYm9uZGluZ19zaGFyZXNBGWZoZWlnaHQYKmd0eF9oYXNoWCDGcrjR71btKKuHw2IsURQGm90617j5c3SY0MAezvCWeg==", + Event { + height: 42, + tx_hash, + escrow: Some(EscrowEvent::DebondingStart { + owner: addr1.clone(), + escrow: addr2.clone(), + amount: 100u32.into(), + active_shares: 50u32.into(), + debonding_shares: 25u32.into(), + debond_end_time: 42, + }), + ..Default::default() + }, + ), + ( + "o2Zlc2Nyb3ehZ3JlY2xhaW2kZW93bmVyVQAgchQ0iT9hbAmBSOLPn5nj4oJuE2ZhbW91bnRBZGZlc2Nyb3dVALkSOXiV5kcMUfq+zJ3cg/cK7YahZnNoYXJlc0EZZmhlaWdodBgqZ3R4X2hhc2hYIMZyuNHvVu0oq4fDYixRFAab3TrXuPlzdJjQwB7O8JZ6", + Event { + height: 42, + tx_hash, + escrow: Some(EscrowEvent::Reclaim { + owner: addr1.clone(), + escrow: addr2.clone(), + amount: 100u32.into(), + shares: 25u32.into(), + }), + ..Default::default() + }, + ), + + // Allowance change. + ( + "o2ZoZWlnaHQYKmd0eF9oYXNoWCDGcrjR71btKKuHw2IsURQGm90617j5c3SY0MAezvCWenBhbGxvd2FuY2VfY2hhbmdlpGVvd25lclUAIHIUNIk/YWwJgUjiz5+Z4+KCbhNpYWxsb3dhbmNlQWRrYmVuZWZpY2lhcnlVALkSOXiV5kcMUfq+zJ3cg/cK7YahbWFtb3VudF9jaGFuZ2VBMg==", + Event { + height: 42, + tx_hash, + allowance_change: Some(AllowanceChangeEvent { + owner: addr1.clone(), + beneficiary: addr2.clone(), + allowance: 100u32.into(), + negative: false, + amount_change: 50u32.into(), + }), + ..Default::default() + }, + ), + ( + "o2ZoZWlnaHQYKmd0eF9oYXNoWCDGcrjR71btKKuHw2IsURQGm90617j5c3SY0MAezvCWenBhbGxvd2FuY2VfY2hhbmdlpWVvd25lclUAIHIUNIk/YWwJgUjiz5+Z4+KCbhNobmVnYXRpdmX1aWFsbG93YW5jZUFka2JlbmVmaWNpYXJ5VQC5Ejl4leZHDFH6vsyd3IP3Cu2GoW1hbW91bnRfY2hhbmdlQTI=", + Event { + height: 42, + tx_hash, + allowance_change: Some(AllowanceChangeEvent { + owner: addr1.clone(), + beneficiary: addr2.clone(), + allowance: 100u32.into(), + negative: true, + amount_change: 50u32.into(), + }), + ..Default::default() + }, + ), + ]; + for (encoded_base64, ev) in tcs { + let dec: Event = cbor::from_slice(&base64::decode(encoded_base64).unwrap()) + .expect("event should deserialize correctly"); + assert_eq!(dec, ev, "decoded event should match the expected value"); + } + } } diff --git a/runtime/src/consensus/state/beacon.rs b/runtime/src/consensus/state/beacon.rs index cf96d4b2f94..e71250df619 100644 --- a/runtime/src/consensus/state/beacon.rs +++ b/runtime/src/consensus/state/beacon.rs @@ -9,7 +9,7 @@ use crate::{ state::StateError, }, key_format, - storage::mkvs::ImmutableMKVS, + storage::mkvs::{FallibleMKVS, ImmutableMKVS}, }; /// Consensus beacon state wrapper. @@ -48,18 +48,56 @@ impl<'a, T: ImmutableMKVS> ImmutableState<'a, T> { /// Returns the future epoch number. pub fn future_epoch(&self, ctx: Context) -> Result { + self.future_epoch_state(ctx).map(|es| es.epoch) + } + + /// Returns the future epoch state. + pub fn future_epoch_state(&self, ctx: Context) -> Result { match self.mkvs.get(ctx, &FutureEpochKeyFmt(()).encode()) { Ok(Some(b)) => { let state: EpochTimeState = cbor::from_slice(&b).map_err(|err| StateError::Unavailable(anyhow!(err)))?; - Ok(state.epoch) + Ok(state) } - Ok(None) => Ok(EpochTime::default()), + Ok(None) => Ok(EpochTimeState::default()), Err(err) => Err(StateError::Unavailable(anyhow!(err))), } } } +/// Mutable consensus beacon state wrapper. +pub struct MutableState; + +impl MutableState { + /// Set current epoch state. + pub fn set_epoch_state( + mkvs: &mut S, + ctx: Context, + epoch_state: EpochTimeState, + ) -> Result<(), StateError> { + mkvs.insert( + ctx, + &CurrentEpochKeyFmt(()).encode(), + &cbor::to_vec(epoch_state), + )?; + Ok(()) + } + + /// Set future epoch state. + pub fn set_future_epoch_state( + mkvs: &mut S, + ctx: Context, + epoch_state: EpochTimeState, + ) -> Result<(), StateError> { + mkvs.insert( + ctx, + &FutureEpochKeyFmt(()).encode(), + &cbor::to_vec(epoch_state), + )?; + Ok(()) + } +} + #[cfg(test)] mod test { use std::sync::Arc; @@ -68,12 +106,58 @@ mod test { common::crypto::hash::Hash, storage::mkvs::{ interop::{Fixture, ProtocolServer}, + sync::NoopReadSyncer, Root, RootType, Tree, }, }; use super::*; + #[test] + fn test_mutable_state() { + let mut mkvs = Tree::builder() + .with_root_type(RootType::State) + .build(Box::new(NoopReadSyncer)); + + let ctx = Arc::new(Context::background()); + + MutableState::set_epoch_state( + &mut mkvs, + Context::create_child(&ctx), + EpochTimeState { + epoch: 10, + height: 100, + }, + ) + .unwrap(); + + MutableState::set_future_epoch_state( + &mut mkvs, + Context::create_child(&ctx), + EpochTimeState { + epoch: 11, + height: 110, + }, + ) + .unwrap(); + + let beacon_state = ImmutableState::new(&mkvs); + + // Test current epoch state. + let epoch_state = beacon_state + .epoch_state(Context::create_child(&ctx)) + .expect("epoch state query should work"); + assert_eq!(10u64, epoch_state.epoch, "expected epoch should match"); + assert_eq!(100i64, epoch_state.height, "expected height should match"); + + // Test future epoch state. + let epoch_state = beacon_state + .future_epoch_state(Context::create_child(&ctx)) + .expect("future epoch state query should work"); + assert_eq!(11u64, epoch_state.epoch, "expected epoch should match"); + assert_eq!(110i64, epoch_state.height, "expected height should match"); + } + #[test] fn test_beacon_state_interop() { // Keep in sync with go/consensus/tendermint/apps/beacon/state/interop/interop.go. @@ -114,5 +198,12 @@ mod test { .future_epoch(Context::create_child(&ctx)) .expect("future epoch query should work"); assert_eq!(43u64, epoch, "expected future epoch should match"); + + // Test future epoch state. + let epoch_state = beacon_state + .future_epoch_state(Context::create_child(&ctx)) + .expect("future epoch state query should work"); + assert_eq!(43u64, epoch_state.epoch, "expected epoch should match"); + assert_eq!(15i64, epoch_state.height, "expected height should match"); } } diff --git a/runtime/src/consensus/state/mod.rs b/runtime/src/consensus/state/mod.rs index c6617a205ac..f615e13eb52 100644 --- a/runtime/src/consensus/state/mod.rs +++ b/runtime/src/consensus/state/mod.rs @@ -34,25 +34,34 @@ impl From for types::Error { /// Provides consensus state tree from the host. pub struct ConsensusState { + // An explicit height field is needed because the relationship between the underlying consensus + // height and the corresponding state root is a consensus backend implementation detail. + height: u64, mkvs: Tree, } impl ConsensusState { /// Creates a consensus state wrapping the provided tree. - pub fn new(tree: Tree) -> Self { - Self { mkvs: tree } + pub fn new(height: u64, tree: Tree) -> Self { + Self { height, mkvs: tree } } /// Creates consensus state using host protocol. - pub fn from_protocol(protocol: Arc, root: Root) -> Self { + pub fn from_protocol(protocol: Arc, height: u64, root: Root) -> Self { let read_syncer = HostReadSyncer::new(protocol, HostStorageEndpoint::Consensus); Self { + height, mkvs: Tree::builder() .with_capacity(100_000, 10_000_000) .with_root(root) .build(Box::new(read_syncer)), } } + + /// Consensus layer height that this data is for. + pub fn height(&self) -> u64 { + self.height + } } impl ImmutableMKVS for ConsensusState { diff --git a/runtime/src/consensus/state/roothash.rs b/runtime/src/consensus/state/roothash.rs index 79d299aff54..8c89e0d277c 100644 --- a/runtime/src/consensus/state/roothash.rs +++ b/runtime/src/consensus/state/roothash.rs @@ -10,7 +10,10 @@ use crate::{ key_format::{KeyFormat, KeyFormatAtom}, namespace::Namespace, }, - consensus::{roothash::Error, state::StateError}, + consensus::{ + roothash::{Error, RoundResults}, + state::StateError, + }, key_format, storage::mkvs::ImmutableMKVS, }; @@ -28,6 +31,7 @@ impl<'a, T: ImmutableMKVS> ImmutableState<'a, T> { } key_format!(StateRootKeyFmt, 0x25, Hash); +key_format!(LastRoundResultsKeyFmt, 0x27, Hash); impl<'a, T: ImmutableMKVS> ImmutableState<'a, T> { /// Returns the state root for a specific runtime. @@ -43,4 +47,18 @@ impl<'a, T: ImmutableMKVS> ImmutableState<'a, T> { Err(err) => Err(StateError::Unavailable(anyhow!(err)).into()), } } + + /// Returns the last round results for a specific runtime. + pub fn last_round_results(&self, ctx: Context, id: Namespace) -> Result { + match self.mkvs.get( + ctx, + &LastRoundResultsKeyFmt(Hash::digest_bytes(id.as_ref())).encode(), + ) { + Ok(Some(b)) => { + cbor::from_slice(&b).map_err(|err| StateError::Unavailable(anyhow!(err)).into()) + } + Ok(None) => Err(Error::InvalidRuntime(id)), + Err(err) => Err(StateError::Unavailable(anyhow!(err)).into()), + } + } } diff --git a/runtime/src/consensus/tendermint/verifier.rs b/runtime/src/consensus/tendermint/verifier.rs index cd42e356189..c1ab09b95af 100644 --- a/runtime/src/consensus/tendermint/verifier.rs +++ b/runtime/src/consensus/tendermint/verifier.rs @@ -55,11 +55,11 @@ use crate::{ decode_light_block, state_root_from_header, LightBlockMeta, TENDERMINT_CONTEXT, }, verifier::{self, verify_state_freshness, Error, TrustRoot, TrustedState}, - LightBlock, HEIGHT_LATEST, + Event, LightBlock, HEIGHT_LATEST, }, protocol::{Protocol, ProtocolUntrustedLocalStorage}, storage::KeyValue, - types::Body, + types::{Body, EventKind, HostFetchConsensusEventsRequest, HostFetchConsensusEventsResponse}, }; use super::{encode_light_block, store::LruStore}; @@ -133,6 +133,7 @@ impl verifier::Verifier for NopVerifier { let state_root = untrusted_block.get_state_root(); Ok(ConsensusState::from_protocol( self.protocol.clone(), + state_root.version + 1, state_root, )) } @@ -146,6 +147,26 @@ impl verifier::Verifier for NopVerifier { self.unverified_state(block) } + fn events_at(&self, height: u64, kind: EventKind) -> Result, Error> { + let result = self + .protocol + .call_host( + Context::background(), + Body::HostFetchConsensusEventsRequest(HostFetchConsensusEventsRequest { + height, + kind, + }), + ) + .map_err(|err| Error::VerificationFailed(err.into()))?; + + match result { + Body::HostFetchConsensusEventsResponse(HostFetchConsensusEventsResponse { events }) => { + Ok(events) + } + _ => Err(Error::VerificationFailed(anyhow!("bad response from host"))), + } + } + fn latest_height(&self) -> Result { Ok(self.fetch_light_block(HEIGHT_LATEST)?.height) } @@ -168,6 +189,7 @@ enum Command { LatestState(channel::Sender>), LatestHeight(channel::Sender>), StateAt(u64, channel::Sender>), + EventsAt(u64, EventKind, channel::Sender, Error>>), } /// Tendermint consensus layer verifier. @@ -290,6 +312,7 @@ impl Verifier { Ok(ConsensusState::from_protocol( self.protocol.clone(), + state_root.version + 1, state_root, )) } @@ -393,7 +416,11 @@ impl Verifier { // Verify the consensus layer block first to obtain an authoritative state root. let consensus_block = self.verify_consensus_only(cache, instance, consensus_block)?; let state_root = consensus_block.get_state_root(); - let state = ConsensusState::from_protocol(self.protocol.clone(), state_root); + let state = ConsensusState::from_protocol( + self.protocol.clone(), + state_root.version + 1, + state_root, + ); // Check if we have already verified this runtime header to avoid re-verification. if let Some(state_root) = cache.verified_state_roots.get(&runtime_header.round) { @@ -498,8 +525,12 @@ impl Verifier { let consensus_block = untrusted_block; - let state = - ConsensusState::from_protocol(self.protocol.clone(), consensus_block.get_state_root()); + let state_root = consensus_block.get_state_root(); + let state = ConsensusState::from_protocol( + self.protocol.clone(), + state_root.version + 1, + state_root, + ); // Check if we have already verified this runtime header to avoid re-verification. if let Some((state_root, state_epoch)) = cache @@ -560,6 +591,27 @@ impl Verifier { Ok(state) } + fn events_at(&self, height: u64, kind: EventKind) -> Result, Error> { + let result = self + .protocol + .call_host( + Context::background(), + Body::HostFetchConsensusEventsRequest(HostFetchConsensusEventsRequest { + height, + kind, + }), + ) + .map_err(|err| Error::VerificationFailed(err.into()))?; + // TODO: Perform event verification once this becomes possible. + + match result { + Body::HostFetchConsensusEventsResponse(HostFetchConsensusEventsResponse { events }) => { + Ok(events) + } + _ => Err(Error::VerificationFailed(anyhow!("bad response from host"))), + } + } + fn update_insecure_posix_time(&self, verified_block: &TMLightBlock) { // Update untrusted time if ahead. This makes sure that the enclave's sense of time is // synced with consensus sense of time based on the fact that consensus time is harder to @@ -847,6 +899,11 @@ impl Verifier { .send(self.latest_consensus_height(&cache)) .map_err(|_| Error::Internal)?; } + Command::EventsAt(height, kind, sender) => { + sender + .send(self.events_at(height, kind)) + .map_err(|_| Error::Internal)?; + } } // Persist last verified block once in a while. @@ -1041,6 +1098,7 @@ impl verifier::Verifier for Handle { let state_root = untrusted_block.get_state_root(); Ok(ConsensusState::from_protocol( self.protocol.clone(), + state_root.version + 1, state_root, )) } @@ -1063,6 +1121,15 @@ impl verifier::Verifier for Handle { receiver.recv().map_err(|_| Error::Internal)? } + fn events_at(&self, height: u64, kind: EventKind) -> Result, Error> { + let (sender, receiver) = channel::bounded(1); + self.command_sender + .send(Command::EventsAt(height, kind, sender)) + .map_err(|_| Error::Internal)?; + + receiver.recv().map_err(|_| Error::Internal)? + } + fn latest_height(&self) -> Result { let (sender, receiver) = channel::bounded(1); self.command_sender diff --git a/runtime/src/consensus/verifier.rs b/runtime/src/consensus/verifier.rs index 958b65393dc..1d4c8b8cf43 100644 --- a/runtime/src/consensus/verifier.rs +++ b/runtime/src/consensus/verifier.rs @@ -1,4 +1,6 @@ //! Trait for consensus layer verification. +use std::sync::Arc; + use anyhow::anyhow; use io_context::Context; use thiserror::Error; @@ -7,12 +9,12 @@ use super::{ beacon::EpochTime, roothash::{ComputeResultsHeader, Header}, state::{registry::ImmutableState as RegistryState, ConsensusState}, - LightBlock, + Event, LightBlock, }; use crate::{ common::{crypto::signature::PublicKey, namespace::Namespace, version::Version}, rak::RAK, - types, + types::{self, EventKind}, }; #[derive(Debug, Error)] @@ -104,6 +106,14 @@ pub trait Verifier: Send + Sync { /// verification manually if needed. fn state_at(&self, height: u64) -> Result; + /// Return the consensus layer events at the given height. + /// + /// # Warning + /// + /// Event integrity is currently not verified and it thus relies on replicated computation even + /// when using a TEE-enabled runtime. + fn events_at(&self, height: u64, kind: EventKind) -> Result, Error>; + /// Return the latest known consensus layer height. fn latest_height(&self) -> Result; @@ -111,6 +121,54 @@ pub trait Verifier: Send + Sync { fn trust(&self, header: &ComputeResultsHeader) -> Result<(), Error>; } +impl Verifier for Arc { + fn sync(&self, height: u64) -> Result<(), Error> { + Verifier::sync(&**self, height) + } + + fn verify( + &self, + consensus_block: LightBlock, + runtime_header: Header, + epoch: EpochTime, + ) -> Result { + Verifier::verify(&**self, consensus_block, runtime_header, epoch) + } + + fn verify_for_query( + &self, + consensus_block: LightBlock, + runtime_header: Header, + epoch: EpochTime, + ) -> Result { + Verifier::verify_for_query(&**self, consensus_block, runtime_header, epoch) + } + + fn unverified_state(&self, consensus_block: LightBlock) -> Result { + Verifier::unverified_state(&**self, consensus_block) + } + + fn latest_state(&self) -> Result { + Verifier::latest_state(&**self) + } + + fn state_at(&self, height: u64) -> Result { + Verifier::state_at(&**self, height) + } + + fn events_at(&self, height: u64, kind: EventKind) -> Result, Error> { + Verifier::events_at(&**self, height, kind) + } + + fn latest_height(&self) -> Result { + Verifier::latest_height(&**self) + } + + fn trust(&self, header: &ComputeResultsHeader) -> Result<(), Error> { + Verifier::trust(&**self, header) + } +} + /// Consensus layer trust root. #[derive(Debug, Clone, Default, PartialEq, Eq, cbor::Encode, cbor::Decode)] pub struct TrustRoot { diff --git a/runtime/src/dispatcher.rs b/runtime/src/dispatcher.rs index 5e6e0bf5e48..c33f6a06cd1 100644 --- a/runtime/src/dispatcher.rs +++ b/runtime/src/dispatcher.rs @@ -497,14 +497,11 @@ impl Dispatcher { let txn_dispatcher = txn_dispatcher.clone(); tokio::task::spawn_blocking(move || { - // Verify consensus state and runtime state root integrity before executing the query. - let consensus_state = state.consensus_verifier.verify_for_query( - state.consensus_block, - state.header.clone(), - state.epoch, - )?; - // Ensure the runtime is still ready to process requests. - protocol.ensure_initialized()?; + // For queries we don't do any consensus layer integrity verification by default and it + // is up to the runtime to decide whether this is critical on a query-by-query basis. + let consensus_state = state + .consensus_verifier + .unverified_state(state.consensus_block.clone())?; let cache = cache_set.query(Root { namespace: state.header.namespace, @@ -518,6 +515,7 @@ impl Dispatcher { let txn_ctx = TxnContext::new( ctx.freeze(), protocol, + &state.consensus_block, consensus_state, &mut overlay, &state.header, @@ -546,7 +544,7 @@ impl Dispatcher { // For check-only we don't do any consensus layer integrity verification. let consensus_state = state .consensus_verifier - .unverified_state(state.consensus_block)?; + .unverified_state(state.consensus_block.clone())?; let mut cache = cache_set.check(Root { namespace: state.header.namespace, @@ -559,6 +557,7 @@ impl Dispatcher { let txn_ctx = TxnContext::new( ctx.clone(), protocol.clone(), + &state.consensus_block, consensus_state, &mut overlay, &state.header, @@ -594,7 +593,7 @@ impl Dispatcher { ) -> Result { // Verify consensus state and runtime state root integrity before execution. let consensus_state = state.consensus_verifier.verify( - state.consensus_block, + state.consensus_block.clone(), state.header.clone(), state.epoch, )?; @@ -614,6 +613,7 @@ impl Dispatcher { let txn_ctx = TxnContext::new( ctx.clone(), protocol, + &state.consensus_block, consensus_state, &mut overlay, header, diff --git a/runtime/src/transaction/context.rs b/runtime/src/transaction/context.rs index ec17c9e847f..af22926c658 100644 --- a/runtime/src/transaction/context.rs +++ b/runtime/src/transaction/context.rs @@ -8,6 +8,7 @@ use crate::{ beacon::EpochTime, roothash::{Header, RoundResults}, state::ConsensusState, + LightBlock, }, protocol::Protocol, storage::MKVS, @@ -19,6 +20,8 @@ pub struct Context<'a> { pub io_ctx: Arc, /// Low-level access to the underlying Runtime Host Protocol. pub protocol: Arc, + /// Consensus light block. + pub consensus_block: &'a LightBlock, /// Consensus state tree. pub consensus_state: ConsensusState, /// Runtime state. @@ -42,6 +45,7 @@ impl<'a> Context<'a> { pub fn new( io_ctx: Arc, protocol: Arc, + consensus_block: &'a LightBlock, consensus_state: ConsensusState, runtime_state: &'a mut dyn MKVS, header: &'a Header, @@ -53,6 +57,7 @@ impl<'a> Context<'a> { Self { io_ctx, protocol, + consensus_block, consensus_state, runtime_state, header, diff --git a/runtime/src/transaction/dispatcher.rs b/runtime/src/transaction/dispatcher.rs index 480776a031a..204661c83bb 100644 --- a/runtime/src/transaction/dispatcher.rs +++ b/runtime/src/transaction/dispatcher.rs @@ -14,6 +14,10 @@ use crate::{ /// to process transactions. pub trait Dispatcher: Send + Sync { /// Execute the transactions in the given batch. + /// + /// # Consensus Layer State Integrity + /// + /// Before this method is invoked, consensus layer state integirty verification is performed. fn execute_batch( &self, ctx: Context, @@ -25,6 +29,10 @@ pub trait Dispatcher: Send + Sync { /// /// The passed batch is an initial batch. In case the runtime needs additional items it should /// request them from the host. + /// + /// # Consensus Layer State Integrity + /// + /// Before this method is invoked, consensus layer state integirty verification is performed. fn schedule_and_execute_batch( &self, _ctx: Context, @@ -39,6 +47,12 @@ pub trait Dispatcher: Send + Sync { } /// Check the transactions in the given batch for validity. + /// + /// # Consensus Layer State Integrity + /// + /// No consensus layer state integrity verification is performed for queries by default. The + /// runtime dispatcher implementation should perform integrity verification if needed on a + /// query-by-query basis. fn check_batch( &self, ctx: Context, @@ -56,6 +70,12 @@ pub trait Dispatcher: Send + Sync { } /// Process a query. + /// + /// # Consensus Layer State Integrity + /// + /// No consensus layer state integrity verification is performed for queries by default. The + /// runtime dispatcher implementation should perform integrity verification if needed on a + /// query-by-query basis. fn query(&self, _ctx: Context, _method: &str, _args: Vec) -> Result, RuntimeError> { // Default implementation returns an error. Err(RuntimeError::new( diff --git a/runtime/src/types.rs b/runtime/src/types.rs index 3c15af55573..9bc34a75ca7 100644 --- a/runtime/src/types.rs +++ b/runtime/src/types.rs @@ -14,6 +14,7 @@ use crate::{ version::Version, }, consensus::{ + self, beacon::EpochTime, roothash::{self, Block, ComputeResultsHeader, Header}, LightBlock, @@ -229,6 +230,8 @@ pub enum Body { HostFetchConsensusBlockResponse { block: LightBlock, }, + HostFetchConsensusEventsRequest(HostFetchConsensusEventsRequest), + HostFetchConsensusEventsResponse(HostFetchConsensusEventsResponse), HostFetchTxBatchRequest { #[cbor(optional)] offset: Option, @@ -369,6 +372,30 @@ pub struct CheckTxMetadata { pub sender_seq: u64, } +/// Consensus event kind. +#[derive(Clone, Copy, Debug, cbor::Encode, cbor::Decode)] +#[repr(u8)] +pub enum EventKind { + Staking = 1, + Registry = 2, + RootHash = 3, + Governance = 4, +} + +/// Request to host to fetch the consensus events for the given height. +#[derive(Clone, Debug, cbor::Encode, cbor::Decode)] +#[cbor(no_default)] +pub struct HostFetchConsensusEventsRequest { + pub height: u64, + pub kind: EventKind, +} + +/// Response from host fetching the consensus events for the given height. +#[derive(Clone, Debug, Default, cbor::Encode, cbor::Decode)] +pub struct HostFetchConsensusEventsResponse { + pub events: Vec, +} + #[derive(Clone, Copy, Debug, cbor::Encode, cbor::Decode)] #[repr(u8)] pub enum MessageType { diff --git a/tests/runtimes/simple-keyvalue/src/main.rs b/tests/runtimes/simple-keyvalue/src/main.rs index 1562ed65c89..0f53d36fb11 100644 --- a/tests/runtimes/simple-keyvalue/src/main.rs +++ b/tests/runtimes/simple-keyvalue/src/main.rs @@ -15,7 +15,7 @@ use oasis_core_runtime::{ config::Config, consensus::{ roothash::{IncomingMessage, Message}, - verifier::TrustRoot, + verifier::{TrustRoot, Verifier}, }, dispatcher::{PostInitState, PreInitState}, protocol::HostInfo, @@ -44,6 +44,7 @@ pub struct Context<'a, 'core> { pub core: &'a mut TxnContext<'core>, pub host_info: &'a HostInfo, pub key_manager: &'a dyn KeyManagerClient, + pub consensus_verifier: &'a dyn Verifier, pub messages: Vec, } @@ -90,13 +91,19 @@ impl<'a, 'b, 'core> TxContext<'a, 'b, 'core> { struct Dispatcher { host_info: HostInfo, key_manager: Arc, + consensus_verifier: Arc, } impl Dispatcher { - fn new(host_info: HostInfo, key_manager: Arc) -> Self { + fn new( + host_info: HostInfo, + key_manager: Arc, + consensus_verifier: Arc, + ) -> Self { Self { host_info, key_manager, + consensus_verifier, } } @@ -213,6 +220,7 @@ impl TxnDispatcher for Dispatcher { core: &mut ctx, host_info: &self.host_info, key_manager: &self.key_manager, + consensus_verifier: &self.consensus_verifier, messages: vec![], }; let mut ctx = TxContext::new(&mut ctx, false); @@ -246,6 +254,7 @@ impl TxnDispatcher for Dispatcher { core: &mut ctx, host_info: &self.host_info, key_manager: &self.key_manager, + consensus_verifier: &self.consensus_verifier, messages: vec![], }; @@ -267,6 +276,7 @@ impl TxnDispatcher for Dispatcher { core: &mut ctx, host_info: &self.host_info, key_manager: &self.key_manager, + consensus_verifier: &self.consensus_verifier, messages: vec![], }; @@ -303,6 +313,7 @@ impl TxnDispatcher for Dispatcher { core: &mut ctx, host_info: &self.host_info, key_manager: &self.key_manager, + consensus_verifier: &self.consensus_verifier, messages: vec![], }; @@ -375,7 +386,7 @@ pub fn main_with_version(version: Version) { .expect("failed to update km client policy"); }))); - let dispatcher = Dispatcher::new(hi, km_client); + let dispatcher = Dispatcher::new(hi, km_client, state.consensus_verifier.clone()); PostInitState { txn_dispatcher: Some(Box::new(dispatcher)), diff --git a/tests/runtimes/simple-keyvalue/src/methods.rs b/tests/runtimes/simple-keyvalue/src/methods.rs index 6237d12102e..6815a3a0df4 100644 --- a/tests/runtimes/simple-keyvalue/src/methods.rs +++ b/tests/runtimes/simple-keyvalue/src/methods.rs @@ -17,17 +17,18 @@ use oasis_core_runtime::{ versioned::Versioned, }, consensus::{ + self, address::Address, registry::Runtime, roothash::{Message, RegistryMessage, StakingMessage}, staking::{ - Account, AddEscrowResult, Delegation, ReclaimEscrowResult, TransferResult, + self, Account, AddEscrowResult, Delegation, ReclaimEscrowResult, TransferResult, WithdrawResult, }, state::staking::ImmutableState as StakingImmutableState, }, storage::MKVS, - types::Error as RuntimeError, + types::{Error as RuntimeError, EventKind}, }; /// Implementation of the transaction methods supported by the test runtime. @@ -466,9 +467,35 @@ impl BlockHandler { } Some(b"transfer") => { - let _: TransferResult = + let xfer: TransferResult = cbor::from_value(ev.result.clone().expect("transfer result should exist")) .expect("transfer result should deserialize correctly"); + + // Test that we can query the corresponding transfer event. + let mut height = ctx.core.consensus_state.height(); + let mut found = false; + while height > 0 { + let events = ctx + .consensus_verifier + .events_at(height, EventKind::Staking) + .expect("should be able to query events"); + + found = events.iter().any(|ev| { + matches!(ev, consensus::Event::Staking(staking::Event { + transfer: Some(staking::TransferEvent { from, to, amount }), + .. + }) if from == &xfer.from + && to == &xfer.to + && amount == &xfer.amount) + }); + if found { + break; + } + + height -= 1; + } + + assert!(found, "should find the corresponding transfer event"); } Some(b"add_escrow") => {