Skip to content

Commit

Permalink
go/runtime/registry: Refactor runtime host handler
Browse files Browse the repository at this point in the history
  • Loading branch information
peternose committed Sep 5, 2022
1 parent 44db718 commit bed69f6
Show file tree
Hide file tree
Showing 2 changed files with 140 additions and 105 deletions.
115 changes: 115 additions & 0 deletions go/runtime/registry/handlers.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
package registry

import (
"context"

"github.com/oasisprotocol/oasis-core/go/common/cbor"
"github.com/oasisprotocol/oasis-core/go/runtime/host/protocol"
runtimeKeymanager "github.com/oasisprotocol/oasis-core/go/runtime/keymanager/api"
storage "github.com/oasisprotocol/oasis-core/go/storage/api"
"github.com/oasisprotocol/oasis-core/go/storage/mkvs/syncer"
)

func handlerHostRPCCallRequest(ctx context.Context, h *runtimeHostHandler, rq *protocol.HostRPCCallRequest) (*protocol.HostRPCCallResponse, error) {
switch rq.Endpoint {
case runtimeKeymanager.EnclaveRPCEndpoint:
// Call into the remote key manager.
kmCli, err := h.env.GetKeyManagerClient(ctx)
if err != nil {
return nil, err
}
res, err := kmCli.CallEnclave(ctx, rq.Request, rq.PeerFeedback)
if err != nil {
return nil, err
}
return &protocol.HostRPCCallResponse{
Response: cbor.FixSliceForSerde(res),
}, nil
default:
return nil, errEndpointNotSupported
}
}

func handlerHostStorageSyncRequest(ctx context.Context, h *runtimeHostHandler, rq *protocol.HostStorageSyncRequest) (*protocol.HostStorageSyncResponse, error) {
var rs syncer.ReadSyncer
switch rq.Endpoint {
case protocol.HostStorageEndpointRuntime:
// Runtime storage.
rs = h.runtime.Storage()
case protocol.HostStorageEndpointConsensus:
// Consensus state storage.
rs = h.consensus.State()
default:
return nil, errEndpointNotSupported
}

var rsp *storage.ProofResponse
var err error
switch {
case rq.SyncGet != nil:
rsp, err = rs.SyncGet(ctx, rq.SyncGet)
case rq.SyncGetPrefixes != nil:
rsp, err = rs.SyncGetPrefixes(ctx, rq.SyncGetPrefixes)
case rq.SyncIterate != nil:
rsp, err = rs.SyncIterate(ctx, rq.SyncIterate)
default:
return nil, errMethodNotSupported
}
if err != nil {
return nil, err
}

return &protocol.HostStorageSyncResponse{ProofResponse: rsp}, nil
}

func handlerHostLocalStorageGetRequest(ctx context.Context, h *runtimeHostHandler, rq *protocol.HostLocalStorageGetRequest) (*protocol.HostLocalStorageGetResponse, error) {
value, err := h.runtime.LocalStorage().Get(rq.Key)
if err != nil {
return nil, err
}
return &protocol.HostLocalStorageGetResponse{Value: value}, nil
}

func handlerHostLocalStorageSetRequest(ctx context.Context, h *runtimeHostHandler, rq *protocol.HostLocalStorageSetRequest) (*protocol.Empty, error) {
if err := h.runtime.LocalStorage().Set(rq.Key, rq.Value); err != nil {
return nil, err
}
return &protocol.Empty{}, nil
}

func handlerHostFetchConsensusBlockRequest(ctx context.Context, h *runtimeHostHandler, rq *protocol.HostFetchConsensusBlockRequest) (*protocol.HostFetchConsensusBlockResponse, error) {
lb, err := h.consensus.GetLightBlock(ctx, int64(rq.Height))
if err != nil {
return nil, err
}
return &protocol.HostFetchConsensusBlockResponse{
Block: *lb,
}, nil
}

func handlerHostFetchGenesisHeightRequest(ctx context.Context, h *runtimeHostHandler, rq *protocol.HostFetchGenesisHeightRequest) (*protocol.HostFetchGenesisHeightResponse, error) {
doc, err := h.consensus.GetGenesisDocument(ctx)
if err != nil {
return nil, err
}
return &protocol.HostFetchGenesisHeightResponse{
Height: uint64(doc.Height),
}, nil
}

func handlerHostFetchTxBatchRequest(ctx context.Context, h *runtimeHostHandler, rq *protocol.HostFetchTxBatchRequest) (*protocol.HostFetchTxBatchResponse, error) {
txPool, err := h.env.GetTxPool(ctx)
if err != nil {
return nil, err
}

batch := txPool.GetSchedulingExtra(rq.Offset, rq.Limit)
raw := make([][]byte, 0, len(batch))
for _, tx := range batch {
raw = append(raw, tx.Raw())
}

return &protocol.HostFetchTxBatchResponse{
Batch: raw,
}, nil
}
130 changes: 25 additions & 105 deletions go/runtime/registry/host.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@ import (
"github.com/oasisprotocol/oasis-core/go/runtime/host/protocol"
runtimeKeymanager "github.com/oasisprotocol/oasis-core/go/runtime/keymanager/api"
"github.com/oasisprotocol/oasis-core/go/runtime/txpool"
storage "github.com/oasisprotocol/oasis-core/go/storage/api"
"github.com/oasisprotocol/oasis-core/go/storage/mkvs/syncer"
)

// notifyTimeout is the maximum time to wait for a notification to be processed by the runtime.
Expand Down Expand Up @@ -163,113 +161,35 @@ type runtimeHostHandler struct {
}

// Implements protocol.Handler.
func (h *runtimeHostHandler) Handle(ctx context.Context, body *protocol.Body) (*protocol.Body, error) {
// RPC.
if body.HostRPCCallRequest != nil {
switch body.HostRPCCallRequest.Endpoint {
case runtimeKeymanager.EnclaveRPCEndpoint:
// Call into the remote key manager.
kmCli, err := h.env.GetKeyManagerClient(ctx)
if err != nil {
return nil, err
}
res, err := kmCli.CallEnclave(ctx, body.HostRPCCallRequest.Request, body.HostRPCCallRequest.PeerFeedback)
if err != nil {
return nil, err
}
return &protocol.Body{HostRPCCallResponse: &protocol.HostRPCCallResponse{
Response: cbor.FixSliceForSerde(res),
}}, nil
default:
return nil, errEndpointNotSupported
}
}
// Storage.
if body.HostStorageSyncRequest != nil {
rq := body.HostStorageSyncRequest

var rs syncer.ReadSyncer
switch rq.Endpoint {
case protocol.HostStorageEndpointRuntime:
// Runtime storage.
rs = h.runtime.Storage()
case protocol.HostStorageEndpointConsensus:
// Consensus state storage.
rs = h.consensus.State()
default:
return nil, errEndpointNotSupported
}

var rsp *storage.ProofResponse
var err error
switch {
case rq.SyncGet != nil:
rsp, err = rs.SyncGet(ctx, rq.SyncGet)
case rq.SyncGetPrefixes != nil:
rsp, err = rs.SyncGetPrefixes(ctx, rq.SyncGetPrefixes)
case rq.SyncIterate != nil:
rsp, err = rs.SyncIterate(ctx, rq.SyncIterate)
default:
return nil, errMethodNotSupported
}
if err != nil {
return nil, err
}
func (h *runtimeHostHandler) Handle(ctx context.Context, rq *protocol.Body) (*protocol.Body, error) {
var (
rsp protocol.Body
err error
)

return &protocol.Body{HostStorageSyncResponse: &protocol.HostStorageSyncResponse{ProofResponse: rsp}}, nil
}
// Local storage.
if body.HostLocalStorageGetRequest != nil {
value, err := h.runtime.LocalStorage().Get(body.HostLocalStorageGetRequest.Key)
if err != nil {
return nil, err
}
return &protocol.Body{HostLocalStorageGetResponse: &protocol.HostLocalStorageGetResponse{Value: value}}, nil
}
if body.HostLocalStorageSetRequest != nil {
if err := h.runtime.LocalStorage().Set(body.HostLocalStorageSetRequest.Key, body.HostLocalStorageSetRequest.Value); err != nil {
return nil, err
}
return &protocol.Body{HostLocalStorageSetResponse: &protocol.Empty{}}, nil
}
// Consensus light client.
if body.HostFetchConsensusBlockRequest != nil {
lb, err := h.consensus.GetLightBlock(ctx, int64(body.HostFetchConsensusBlockRequest.Height))
if err != nil {
return nil, err
}
return &protocol.Body{HostFetchConsensusBlockResponse: &protocol.HostFetchConsensusBlockResponse{
Block: *lb,
}}, nil
switch {
case rq.HostRPCCallRequest != nil:
rsp.HostRPCCallResponse, err = handlerHostRPCCallRequest(ctx, h, rq.HostRPCCallRequest)
case rq.HostStorageSyncRequest != nil:
rsp.HostStorageSyncResponse, err = handlerHostStorageSyncRequest(ctx, h, rq.HostStorageSyncRequest)
case rq.HostLocalStorageGetRequest != nil:
rsp.HostLocalStorageGetResponse, err = handlerHostLocalStorageGetRequest(ctx, h, rq.HostLocalStorageGetRequest)
case rq.HostLocalStorageSetRequest != nil:
rsp.HostLocalStorageSetResponse, err = handlerHostLocalStorageSetRequest(ctx, h, rq.HostLocalStorageSetRequest)
case rq.HostFetchConsensusBlockRequest != nil:
rsp.HostFetchConsensusBlockResponse, err = handlerHostFetchConsensusBlockRequest(ctx, h, rq.HostFetchConsensusBlockRequest)
case rq.HostFetchGenesisHeightRequest != nil:
rsp.HostFetchGenesisHeightResponse, err = handlerHostFetchGenesisHeightRequest(ctx, h, rq.HostFetchGenesisHeightRequest)
case rq.HostFetchTxBatchRequest != nil:
rsp.HostFetchTxBatchResponse, err = handlerHostFetchTxBatchRequest(ctx, h, rq.HostFetchTxBatchRequest)
default:
err = errMethodNotSupported
}
if body.HostFetchGenesisHeightRequest != nil {
doc, err := h.consensus.GetGenesisDocument(ctx)
if err != nil {
return nil, err
}
return &protocol.Body{HostFetchGenesisHeightResponse: &protocol.HostFetchGenesisHeightResponse{
Height: uint64(doc.Height),
}}, nil
}
// Transaction pool.
if rq := body.HostFetchTxBatchRequest; rq != nil {
txPool, err := h.env.GetTxPool(ctx)
if err != nil {
return nil, err
}

batch := txPool.GetSchedulingExtra(rq.Offset, rq.Limit)
raw := make([][]byte, 0, len(batch))
for _, tx := range batch {
raw = append(raw, tx.Raw())
}

return &protocol.Body{HostFetchTxBatchResponse: &protocol.HostFetchTxBatchResponse{
Batch: raw,
}}, nil
if err != nil {
return nil, err
}

return nil, errMethodNotSupported
return &rsp, nil
}

// runtimeHostNotifier is a runtime host notifier suitable for compute runtimes. It handles things
Expand Down

0 comments on commit bed69f6

Please sign in to comment.