diff --git a/.buildkite/scripts/common_e2e.sh b/.buildkite/scripts/common_e2e.sh index d0d2792f479..9a5ca2985ad 100644 --- a/.buildkite/scripts/common_e2e.sh +++ b/.buildkite/scripts/common_e2e.sh @@ -77,6 +77,16 @@ run_backend_tendermint_committee() { registry entity init \ --datadir ${entity_dir} + # Provision the key manager runtime. + ${EKIDEN_NODE} \ + registry runtime init_genesis \ + --runtime.id ${EKIDEN_KM_RUNTIME_ID} \ + ${EKIDEN_TEE_HARDWARE:+--runtime.tee_hardware ${EKIDEN_TEE_HARDWARE}} \ + --runtime.kind keymanager \ + --runtime.genesis.file keymanager_genesis.json \ + --entity ${entity_dir} \ + --datadir ${entity_dir} + # Provision the runtime. ${EKIDEN_NODE} \ registry runtime init_genesis \ @@ -86,6 +96,8 @@ run_backend_tendermint_committee() { --runtime.storage_group_size ${storage_group_size} \ ${runtime_genesis:+--runtime.genesis.state ${runtime_genesis}} \ ${EKIDEN_TEE_HARDWARE:+--runtime.tee_hardware ${EKIDEN_TEE_HARDWARE}} \ + --runtime.keymanager ${EKIDEN_KM_RUNTIME_ID} \ + --runtime.kind compute \ --entity ${entity_dir} \ --datadir ${entity_dir} @@ -97,6 +109,7 @@ run_backend_tendermint_committee() { genesis init \ --genesis_file ${genesis_file} \ --entity ${entity_dir}/entity_genesis.json \ + --runtime ${entity_dir}/keymanager_genesis.json \ --runtime ${entity_dir}/runtime_genesis.json \ ${roothash_genesis_blocks:+--roothash ${roothash_genesis_blocks}} \ ${runtime_genesis:+--storage ${runtime_genesis}} \ @@ -156,12 +169,14 @@ run_backend_tendermint_committee() { --scheduler.backend trivial \ --registry.backend tendermint \ --roothash.backend tendermint \ + --keymanager.backend tendermint \ --genesis.file ${genesis_file} \ --tendermint.core.listen_address tcp://0.0.0.0:${tm_port} \ --tendermint.consensus.timeout_commit 250ms \ --tendermint.debug.addr_book_lenient \ --tendermint.seeds "${EKIDEN_SEED_NODE_ID}@127.0.0.1:${EKIDEN_SEED_NODE_PORT}" \ --datadir ${datadir} \ + --debug.allow_test_keys \ & # HACK HACK HACK HACK HACK @@ -239,6 +254,7 @@ run_compute_node() { --scheduler.backend trivial \ --registry.backend tendermint \ --roothash.backend tendermint \ + --keymanager.backend tendermint \ --genesis.file ${EKIDEN_GENESIS_FILE} \ --tendermint.core.listen_address tcp://0.0.0.0:${tm_port} \ --tendermint.consensus.timeout_commit 250ms \ @@ -258,6 +274,7 @@ run_compute_node() { --worker.entity_private_key ${EKIDEN_ENTITY_PRIVATE_KEY} \ --tendermint.seeds "${EKIDEN_SEED_NODE_ID}@127.0.0.1:${EKIDEN_SEED_NODE_PORT}" \ --datadir ${data_dir} \ + --debug.allow_test_keys \ ${extra_args} 2>&1 | sed "s/^/[compute-node-${id}] /" & } @@ -312,6 +329,7 @@ run_storage_node() { --scheduler.backend trivial \ --registry.backend tendermint \ --roothash.backend tendermint \ + --keymanager.backend tendermint \ --genesis.file ${EKIDEN_GENESIS_FILE} \ --tendermint.core.listen_address tcp://0.0.0.0:${tm_port} \ --tendermint.consensus.timeout_commit 250ms \ @@ -322,6 +340,7 @@ run_storage_node() { --worker.p2p.port ${p2p_port} \ --worker.entity_private_key ${EKIDEN_ENTITY_PRIVATE_KEY} \ --datadir ${data_dir} \ + --debug.allow_test_keys \ 2>&1 | sed "s/^/[storage-node-${id}] /" & } @@ -372,6 +391,7 @@ run_client_node() { --registry.backend tendermint \ --roothash.backend tendermint \ --roothash.tendermint.index_blocks \ + --keymanager.backend tendermint \ --genesis.file ${EKIDEN_GENESIS_FILE} \ --tendermint.core.listen_address tcp://0.0.0.0:${tm_port} \ --tendermint.consensus.timeout_commit 250ms \ @@ -379,6 +399,7 @@ run_client_node() { --tendermint.seeds "${EKIDEN_SEED_NODE_ID}@127.0.0.1:${EKIDEN_SEED_NODE_PORT}" \ --client.indexer.runtimes ${EKIDEN_RUNTIME_ID} \ --datadir ${data_dir} \ + --debug.allow_test_keys \ 2>&1 | sed "s/^/[client-node-${id}] /" & } @@ -451,6 +472,7 @@ run_keymanager_node() { --scheduler.backend trivial \ --registry.backend tendermint \ --roothash.backend tendermint \ + --keymanager.backend tendermint \ --genesis.file ${EKIDEN_GENESIS_FILE} \ --tendermint.core.listen_address tcp://0.0.0.0:${tm_port} \ --tendermint.consensus.timeout_commit 250ms \ @@ -463,8 +485,10 @@ run_keymanager_node() { --worker.keymanager.runtime.loader ${EKIDEN_RUNTIME_LOADER} \ --worker.keymanager.runtime.binary ${EKIDEN_ROOT_PATH}/target/${runtime_target}/debug/ekiden-keymanager-runtime${runtime_ext} \ --worker.keymanager.runtime.id ${EKIDEN_KM_RUNTIME_ID} \ + --worker.keymanager.may_generate \ --tendermint.seeds "${EKIDEN_SEED_NODE_ID}@127.0.0.1:${EKIDEN_SEED_NODE_PORT}" \ --datadir ${data_dir} \ + --debug.allow_test_keys \ ${extra_args} 2>&1 | sed "s/^/[key-manager] /" & } @@ -506,10 +530,12 @@ run_seed_node() { --scheduler.backend trivial \ --registry.backend tendermint \ --roothash.backend tendermint \ + --keymanager.backend tendermint \ --tendermint.core.listen_address tcp://0.0.0.0:${EKIDEN_SEED_NODE_PORT} \ --tendermint.seed_mode \ --tendermint.debug.addr_book_lenient \ --datadir ${data_dir} \ + --debug.allow_test_keys \ ${extra_args} 2>&1 | sed "s/^/[seed-node-${id}] /" & # 'show-node-id' relies on key file to be present. diff --git a/Cargo.lock b/Cargo.lock index f99c99d3b91..cb73618dc44 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -384,8 +384,8 @@ dependencies = [ name = "ekiden-keymanager-runtime" version = "0.3.0-alpha" dependencies = [ - "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "ekiden-keymanager-api 0.3.0-alpha", + "ekiden-keymanager-client 0.3.0-alpha", "ekiden-runtime 0.3.0-alpha", "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "io-context 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -395,6 +395,7 @@ dependencies = [ "serde_cbor 0.9.0 (git+https://github.com/pyfisch/cbor?rev=114ecaeac53799d0bf81ca8d1b980c7c419d76fe)", "sgx-isa 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp800-185 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "x25519-dalek 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "zeroize 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/Makefile b/Makefile index 143a27d3ab4..7ec0210727f 100644 --- a/Makefile +++ b/Makefile @@ -32,7 +32,7 @@ ECHO = echo endif -.PHONY: all tools runtimes rust go clean fmt test test-unit test-e2e +.PHONY: all tools runtimes rust go clean fmt test test-unit test-e2e regenerate-single-node all: tools runtimes rust go @$(ECHO) "$(CYAN)*** Everything built successfully!$(OFF)" @@ -85,3 +85,7 @@ test-e2e: clean: @$(ECHO) "$(CYAN)*** Cleaning up...$(OFF)" @cargo clean + +regenerate-single-node: go + @$(ECHO) "$(CYAN)*** Regenerating single node config artifacts...$(OFF)" + @./scripts/regenerate_single_node.sh diff --git a/configs/single_node.yml b/configs/single_node.yml index a41bb1f531c..02757219be6 100644 --- a/configs/single_node.yml +++ b/configs/single_node.yml @@ -8,6 +8,14 @@ # datadir: /tmp/ekiden-single-node +# Debug. +# +# Enable test keys, and the built in test entity to reduce config maintenance +# burden. +debug: + allow_test_keys: true + test_entity: true + # Logging. # Per-module log levels are defined below. If you prefer just one unified log level, you can use: # log: @@ -52,6 +60,7 @@ worker: port: 9100 keymanager: enabled: true + may_generate: true runtime: id: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" binary: target/debug/ekiden-keymanager-runtime @@ -93,6 +102,10 @@ tendermint: consensus: timeout_commit: 1s +# Key manager backend configuration. +keymanager: + backend: tendermint + # Client configuration. client: indexer: diff --git a/configs/single_node/entity.json b/configs/single_node/entity.json deleted file mode 100644 index ae3aca670e5..00000000000 --- a/configs/single_node/entity.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"4Q+YiP/lflkaGC3RvuZAxgAmaKCV1fdAmBbZNHr5zyw=","registration_time":1549020540} \ No newline at end of file diff --git a/configs/single_node/entity.pem b/configs/single_node/entity.pem deleted file mode 100644 index 44ca7f3fd2b..00000000000 --- a/configs/single_node/entity.pem +++ /dev/null @@ -1,4 +0,0 @@ ------BEGIN ED25519 PRIVATE KEY----- -YQ7+1jcxLgQ8HJe2Nr4tXKOZo/lXdNd6W2+AK6omYuPhD5iI/+V+WRoYLdG+5kDG -ACZooJXV90CYFtk0evnPLA== ------END ED25519 PRIVATE KEY----- diff --git a/configs/single_node/entity_genesis.json b/configs/single_node/entity_genesis.json deleted file mode 100644 index d681064ad16..00000000000 --- a/configs/single_node/entity_genesis.json +++ /dev/null @@ -1 +0,0 @@ -{"signature":{"public_key":"4Q+YiP/lflkaGC3RvuZAxgAmaKCV1fdAmBbZNHr5zyw=","signature":"nKNlW2YTlZJprL3V/2f4OTnehvljMgn0xzbdZDX+JFHqTJGDSQAYkr0QABXBxuhCclE/vxD+PfKKu0oLSvT+Bg=="},"untrusted_raw_value":"omJpZFgg4Q+YiP/lflkaGC3RvuZAxgAmaKCV1fdAmBbZNHr5zyxxcmVnaXN0cmF0aW9uX3RpbWUaXFQtfA=="} \ No newline at end of file diff --git a/configs/single_node/genesis.json b/configs/single_node/genesis.json index daefc4025b8..b635548aab3 100644 --- a/configs/single_node/genesis.json +++ b/configs/single_node/genesis.json @@ -1 +1 @@ -{"extra_data":null,"genesis_time":"2019-05-16T10:19:39.086184371Z","registry":{"entities":[{"signature":{"public_key":"4Q+YiP/lflkaGC3RvuZAxgAmaKCV1fdAmBbZNHr5zyw=","signature":"nKNlW2YTlZJprL3V/2f4OTnehvljMgn0xzbdZDX+JFHqTJGDSQAYkr0QABXBxuhCclE/vxD+PfKKu0oLSvT+Bg=="},"untrusted_raw_value":"omJpZFgg4Q+YiP/lflkaGC3RvuZAxgAmaKCV1fdAmBbZNHr5zyxxcmVnaXN0cmF0aW9uX3RpbWUaXFQtfA=="}],"runtimes":[{"signature":{"public_key":"4Q+YiP/lflkaGC3RvuZAxgAmaKCV1fdAmBbZNHr5zyw=","signature":"WgGcADIkZkjUOZQ6QP7WG1xIApGGumy6cAU1etD6TfiPqUuXJWBOR7NVc+d2fThbpW5EUW680G21YlCM4XBnDw=="},"untrusted_raw_value":"q2JpZFggAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABkY29kZfZka2luZABnZ2VuZXNpc6Jqc3RhdGVfcm9vdFggxnK40e9W7Sirh8NiLFEUBpvdOte4+XN0mNDAHs7wlnpvc3RvcmFnZV9yZWNlaXB0omlzaWduYXR1cmVYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABqcHVibGljX2tlefZsdGVlX2hhcmR3YXJlAHFyZWdpc3RyYXRpb25fdGltZRpcVC18cnJlcGxpY2FfZ3JvdXBfc2l6ZQFyc3RvcmFnZV9ncm91cF9zaXplAXgZcmVwbGljYV9ncm91cF9iYWNrdXBfc2l6ZQB4GnJlcGxpY2FfYWxsb3dlZF9zdHJhZ2dsZXJzAHggdHJhbnNhY3Rpb25fc2NoZWR1bGVyX2dyb3VwX3NpemUB"}]},"roothash":{"blocks":{}},"staking":{"ledger":null},"storage":{"state":null},"validators":[{"core_address":"127.0.0.1:26656","name":"single-node","power":10,"pub_key":"RPHEs6FhqInmh2upLCDD9j3R7PIEratspDZWZJewFig="}]} \ No newline at end of file +{"extra_data":null,"genesis_time":"2019-06-13T09:40:50.990285733Z","registry":{"entities":[{"signature":{"public_key":"TqUyj5Q+9vZtqu10yw6Zw7HEX3Ywe0JQA9vHyzY47TU=","signature":"Yx7IvyWipXTNh1nGUGtukolAV3lJ2cy0F+cM6lkyKbMcYavviVEOKGyuLndxyc/o5QTuv3XUeooNeH+2OS7SDw=="},"untrusted_raw_value":"omJpZFggTqUyj5Q+9vZtqu10yw6Zw7HEX3Ywe0JQA9vHyzY47TVxcmVnaXN0cmF0aW9uX3RpbWUaXPHAAA=="}],"runtimes":[{"signature":{"public_key":"TqUyj5Q+9vZtqu10yw6Zw7HEX3Ywe0JQA9vHyzY47TU=","signature":"rQThUvx2oRHTXkbzm2itbzGkhJIaFJ70VeuY+UPY7PnmnaNnw6OKrJowC9QN0MsTTNYJ1qypcMNvJ9sUutsBCg=="},"untrusted_raw_value":"q2JpZFgg//////////////////////////////////////////9ka2luZAFnZ2VuZXNpc6Jqc3RhdGVfcm9vdFggxnK40e9W7Sirh8NiLFEUBpvdOte4+XN0mNDAHs7wlnpvc3RvcmFnZV9yZWNlaXB0omlzaWduYXR1cmVYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABqcHVibGljX2tlefZra2V5X21hbmFnZXJYIP//////////////////////////////////////////bHRlZV9oYXJkd2FyZQBxcmVnaXN0cmF0aW9uX3RpbWUaXPHAAHJyZXBsaWNhX2dyb3VwX3NpemUBcnN0b3JhZ2VfZ3JvdXBfc2l6ZQF4GXJlcGxpY2FfZ3JvdXBfYmFja3VwX3NpemUAeBpyZXBsaWNhX2FsbG93ZWRfc3RyYWdnbGVycwB4IHRyYW5zYWN0aW9uX3NjaGVkdWxlcl9ncm91cF9zaXplAQ=="},{"signature":{"public_key":"TqUyj5Q+9vZtqu10yw6Zw7HEX3Ywe0JQA9vHyzY47TU=","signature":"meg8c+dRcpnlFHbs0+tuNziWpdOVa7TczkUy4Bkpgng0ju/WZM89YW1eK6rQEvPnXakiuwFbvJKc7eyRHbeqBg=="},"untrusted_raw_value":"q2JpZFggAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABka2luZABnZ2VuZXNpc6Jqc3RhdGVfcm9vdFggxnK40e9W7Sirh8NiLFEUBpvdOte4+XN0mNDAHs7wlnpvc3RvcmFnZV9yZWNlaXB0omlzaWduYXR1cmVYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABqcHVibGljX2tlefZra2V5X21hbmFnZXJYIP//////////////////////////////////////////bHRlZV9oYXJkd2FyZQBxcmVnaXN0cmF0aW9uX3RpbWUaXPHAAHJyZXBsaWNhX2dyb3VwX3NpemUBcnN0b3JhZ2VfZ3JvdXBfc2l6ZQF4GXJlcGxpY2FfZ3JvdXBfYmFja3VwX3NpemUAeBpyZXBsaWNhX2FsbG93ZWRfc3RyYWdnbGVycwB4IHRyYW5zYWN0aW9uX3NjaGVkdWxlcl9ncm91cF9zaXplAQ=="}]},"roothash":{"blocks":{}},"staking":{"ledger":null},"storage":{"state":null},"validators":[{"core_address":"127.0.0.1:26656","name":"single-node","power":10,"pub_key":"RPHEs6FhqInmh2upLCDD9j3R7PIEratspDZWZJewFig="}]} \ No newline at end of file diff --git a/configs/single_node/runtime_genesis.json b/configs/single_node/runtime_genesis.json deleted file mode 100644 index 0a5d056e96a..00000000000 --- a/configs/single_node/runtime_genesis.json +++ /dev/null @@ -1 +0,0 @@ -{"signature":{"public_key":"4Q+YiP/lflkaGC3RvuZAxgAmaKCV1fdAmBbZNHr5zyw=","signature":"WgGcADIkZkjUOZQ6QP7WG1xIApGGumy6cAU1etD6TfiPqUuXJWBOR7NVc+d2fThbpW5EUW680G21YlCM4XBnDw=="},"untrusted_raw_value":"q2JpZFggAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABkY29kZfZka2luZABnZ2VuZXNpc6Jqc3RhdGVfcm9vdFggxnK40e9W7Sirh8NiLFEUBpvdOte4+XN0mNDAHs7wlnpvc3RvcmFnZV9yZWNlaXB0omlzaWduYXR1cmVYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABqcHVibGljX2tlefZsdGVlX2hhcmR3YXJlAHFyZWdpc3RyYXRpb25fdGltZRpcVC18cnJlcGxpY2FfZ3JvdXBfc2l6ZQFyc3RvcmFnZV9ncm91cF9zaXplAXgZcmVwbGljYV9ncm91cF9iYWNrdXBfc2l6ZQB4GnJlcGxpY2FfYWxsb3dlZF9zdHJhZ2dsZXJzAHggdHJhbnNhY3Rpb25fc2NoZWR1bGVyX2dyb3VwX3NpemUB"} \ No newline at end of file diff --git a/configs/single_node_sgx.yml b/configs/single_node_sgx.yml index 98ae5236241..edc139f0e1e 100644 --- a/configs/single_node_sgx.yml +++ b/configs/single_node_sgx.yml @@ -8,6 +8,14 @@ # datadir: /tmp/ekiden-single-node-sgx +# Debug. +# +# Enable test keys, and the built in test entity to reduce config maintenance +# burden. +debug: + allow_test_keys: true + test_entity: true + # Logging. # Per-module log levels are defined below. If you prefer just one unified log level, you can use: # log: @@ -55,6 +63,7 @@ worker: keymanager: enabled: true tee_hardware: intel-sgx + may_generate: true runtime: id: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" binary: target/x86_64-fortanix-unknown-sgx/debug/ekiden-keymanager-runtime.sgxs @@ -100,6 +109,10 @@ tendermint: consensus: timeout_commit: 1s +# Key manager backend configuration. +keymanager: + backend: tendermint + # Client configuration. client: indexer: diff --git a/configs/single_node_sgx/entity.json b/configs/single_node_sgx/entity.json deleted file mode 100644 index ae3aca670e5..00000000000 --- a/configs/single_node_sgx/entity.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"4Q+YiP/lflkaGC3RvuZAxgAmaKCV1fdAmBbZNHr5zyw=","registration_time":1549020540} \ No newline at end of file diff --git a/configs/single_node_sgx/entity.pem b/configs/single_node_sgx/entity.pem deleted file mode 100644 index 44ca7f3fd2b..00000000000 --- a/configs/single_node_sgx/entity.pem +++ /dev/null @@ -1,4 +0,0 @@ ------BEGIN ED25519 PRIVATE KEY----- -YQ7+1jcxLgQ8HJe2Nr4tXKOZo/lXdNd6W2+AK6omYuPhD5iI/+V+WRoYLdG+5kDG -ACZooJXV90CYFtk0evnPLA== ------END ED25519 PRIVATE KEY----- diff --git a/configs/single_node_sgx/entity_genesis.json b/configs/single_node_sgx/entity_genesis.json deleted file mode 100644 index d681064ad16..00000000000 --- a/configs/single_node_sgx/entity_genesis.json +++ /dev/null @@ -1 +0,0 @@ -{"signature":{"public_key":"4Q+YiP/lflkaGC3RvuZAxgAmaKCV1fdAmBbZNHr5zyw=","signature":"nKNlW2YTlZJprL3V/2f4OTnehvljMgn0xzbdZDX+JFHqTJGDSQAYkr0QABXBxuhCclE/vxD+PfKKu0oLSvT+Bg=="},"untrusted_raw_value":"omJpZFgg4Q+YiP/lflkaGC3RvuZAxgAmaKCV1fdAmBbZNHr5zyxxcmVnaXN0cmF0aW9uX3RpbWUaXFQtfA=="} \ No newline at end of file diff --git a/configs/single_node_sgx/genesis.json b/configs/single_node_sgx/genesis.json index 36e30122c53..ebb48955388 100644 --- a/configs/single_node_sgx/genesis.json +++ b/configs/single_node_sgx/genesis.json @@ -1 +1 @@ -{"extra_data":null,"genesis_time":"2019-05-16T10:20:24.920062503Z","registry":{"entities":[{"signature":{"public_key":"4Q+YiP/lflkaGC3RvuZAxgAmaKCV1fdAmBbZNHr5zyw=","signature":"nKNlW2YTlZJprL3V/2f4OTnehvljMgn0xzbdZDX+JFHqTJGDSQAYkr0QABXBxuhCclE/vxD+PfKKu0oLSvT+Bg=="},"untrusted_raw_value":"omJpZFgg4Q+YiP/lflkaGC3RvuZAxgAmaKCV1fdAmBbZNHr5zyxxcmVnaXN0cmF0aW9uX3RpbWUaXFQtfA=="}],"runtimes":[{"signature":{"public_key":"4Q+YiP/lflkaGC3RvuZAxgAmaKCV1fdAmBbZNHr5zyw=","signature":"1TNmA4pKxOKA58dO6m00iEyEHq6dUx0GZ9VqNiMfR1mjDfLNd/JLY0h0wsScd87DOoigIdkh/Fi7LV4GfsM6BQ=="},"untrusted_raw_value":"q2JpZFggAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABkY29kZfZka2luZABnZ2VuZXNpc6Jqc3RhdGVfcm9vdFggxnK40e9W7Sirh8NiLFEUBpvdOte4+XN0mNDAHs7wlnpvc3RvcmFnZV9yZWNlaXB0omlzaWduYXR1cmVYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABqcHVibGljX2tlefZsdGVlX2hhcmR3YXJlAXFyZWdpc3RyYXRpb25fdGltZRpcVC18cnJlcGxpY2FfZ3JvdXBfc2l6ZQFyc3RvcmFnZV9ncm91cF9zaXplAXgZcmVwbGljYV9ncm91cF9iYWNrdXBfc2l6ZQB4GnJlcGxpY2FfYWxsb3dlZF9zdHJhZ2dsZXJzAHggdHJhbnNhY3Rpb25fc2NoZWR1bGVyX2dyb3VwX3NpemUB"}]},"roothash":{"blocks":{}},"staking":{"ledger":null},"storage":{"state":null},"validators":[{"core_address":"127.0.0.1:26656","name":"single-node","power":10,"pub_key":"RPHEs6FhqInmh2upLCDD9j3R7PIEratspDZWZJewFig="}]} \ No newline at end of file +{"extra_data":null,"genesis_time":"2019-06-13T09:40:51.108454277Z","registry":{"entities":[{"signature":{"public_key":"TqUyj5Q+9vZtqu10yw6Zw7HEX3Ywe0JQA9vHyzY47TU=","signature":"Yx7IvyWipXTNh1nGUGtukolAV3lJ2cy0F+cM6lkyKbMcYavviVEOKGyuLndxyc/o5QTuv3XUeooNeH+2OS7SDw=="},"untrusted_raw_value":"omJpZFggTqUyj5Q+9vZtqu10yw6Zw7HEX3Ywe0JQA9vHyzY47TVxcmVnaXN0cmF0aW9uX3RpbWUaXPHAAA=="}],"runtimes":[{"signature":{"public_key":"TqUyj5Q+9vZtqu10yw6Zw7HEX3Ywe0JQA9vHyzY47TU=","signature":"h87+YyWUvVIdwF66M8WAF2o2L+uAjqQqLpt4uqKiuqX5BI3EBmRdK4hiyrYycWBa2K3lAWYJJtFe5eovKLekBA=="},"untrusted_raw_value":"q2JpZFgg//////////////////////////////////////////9ka2luZAFnZ2VuZXNpc6Jqc3RhdGVfcm9vdFggxnK40e9W7Sirh8NiLFEUBpvdOte4+XN0mNDAHs7wlnpvc3RvcmFnZV9yZWNlaXB0omlzaWduYXR1cmVYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABqcHVibGljX2tlefZra2V5X21hbmFnZXJYIP//////////////////////////////////////////bHRlZV9oYXJkd2FyZQFxcmVnaXN0cmF0aW9uX3RpbWUaXPHAAHJyZXBsaWNhX2dyb3VwX3NpemUBcnN0b3JhZ2VfZ3JvdXBfc2l6ZQF4GXJlcGxpY2FfZ3JvdXBfYmFja3VwX3NpemUAeBpyZXBsaWNhX2FsbG93ZWRfc3RyYWdnbGVycwB4IHRyYW5zYWN0aW9uX3NjaGVkdWxlcl9ncm91cF9zaXplAQ=="},{"signature":{"public_key":"TqUyj5Q+9vZtqu10yw6Zw7HEX3Ywe0JQA9vHyzY47TU=","signature":"KECsvSKJJLpLLYFthrihr4uFna2U5HE0KmFVMk3kjtv3NAFBpkqW/Rrk1QYP7G1nFW/lLwi34VLI46BdnamxCw=="},"untrusted_raw_value":"q2JpZFggAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABka2luZABnZ2VuZXNpc6Jqc3RhdGVfcm9vdFggxnK40e9W7Sirh8NiLFEUBpvdOte4+XN0mNDAHs7wlnpvc3RvcmFnZV9yZWNlaXB0omlzaWduYXR1cmVYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABqcHVibGljX2tlefZra2V5X21hbmFnZXJYIP//////////////////////////////////////////bHRlZV9oYXJkd2FyZQFxcmVnaXN0cmF0aW9uX3RpbWUaXPHAAHJyZXBsaWNhX2dyb3VwX3NpemUBcnN0b3JhZ2VfZ3JvdXBfc2l6ZQF4GXJlcGxpY2FfZ3JvdXBfYmFja3VwX3NpemUAeBpyZXBsaWNhX2FsbG93ZWRfc3RyYWdnbGVycwB4IHRyYW5zYWN0aW9uX3NjaGVkdWxlcl9ncm91cF9zaXplAQ=="}]},"roothash":{"blocks":{}},"staking":{"ledger":null},"storage":{"state":null},"validators":[{"core_address":"127.0.0.1:26656","name":"single-node","power":10,"pub_key":"RPHEs6FhqInmh2upLCDD9j3R7PIEratspDZWZJewFig="}]} \ No newline at end of file diff --git a/configs/single_node_sgx/runtime_genesis.json b/configs/single_node_sgx/runtime_genesis.json deleted file mode 100644 index d1d7feae3df..00000000000 --- a/configs/single_node_sgx/runtime_genesis.json +++ /dev/null @@ -1 +0,0 @@ -{"signature":{"public_key":"4Q+YiP/lflkaGC3RvuZAxgAmaKCV1fdAmBbZNHr5zyw=","signature":"1TNmA4pKxOKA58dO6m00iEyEHq6dUx0GZ9VqNiMfR1mjDfLNd/JLY0h0wsScd87DOoigIdkh/Fi7LV4GfsM6BQ=="},"untrusted_raw_value":"q2JpZFggAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABkY29kZfZka2luZABnZ2VuZXNpc6Jqc3RhdGVfcm9vdFggxnK40e9W7Sirh8NiLFEUBpvdOte4+XN0mNDAHs7wlnpvc3RvcmFnZV9yZWNlaXB0omlzaWduYXR1cmVYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABqcHVibGljX2tlefZsdGVlX2hhcmR3YXJlAXFyZWdpc3RyYXRpb25fdGltZRpcVC18cnJlcGxpY2FfZ3JvdXBfc2l6ZQFyc3RvcmFnZV9ncm91cF9zaXplAXgZcmVwbGljYV9ncm91cF9iYWNrdXBfc2l6ZQB4GnJlcGxpY2FfYWxsb3dlZF9zdHJhZ2dsZXJzAHggdHJhbnNhY3Rpb25fc2NoZWR1bGVyX2dyb3VwX3NpemUB"} \ No newline at end of file diff --git a/go/common/crypto/signature/signature.go b/go/common/crypto/signature/signature.go index af4be0e15d3..b09be4179e3 100644 --- a/go/common/crypto/signature/signature.go +++ b/go/common/crypto/signature/signature.go @@ -12,6 +12,7 @@ import ( "io" "io/ioutil" "os" + "sync" "golang.org/x/crypto/ed25519" @@ -79,6 +80,9 @@ var ( _ encoding.BinaryMarshaler = RawSignature{} _ encoding.BinaryUnmarshaler = (*RawSignature)(nil) _ encoding.BinaryUnmarshaler = (*PrivateKey)(nil) + + testPublicKeys sync.Map + blacklistedPublicKeys sync.Map ) // MapKey is a PublicKey as a fixed sized byte array for use as a map key. @@ -105,6 +109,9 @@ func (k PublicKey) Verify(context, message, sig []byte) bool { if len(sig) != SignatureSize { return false } + if _, isBlacklisted := blacklistedPublicKeys.Load(k.ToMapKey()); isBlacklisted { + return false + } data, err := digest(context, message) if err != nil { @@ -299,6 +306,12 @@ func (k *PrivateKey) UnmarshalBinary(data []byte) error { return nil } +// UnmarshalSeed decodes a RFC 8032 seed into a private key. +func (k *PrivateKey) UnmarshalSeed(seed []byte) error { + nk := ed25519.NewKeyFromSeed(seed) + return k.UnmarshalBinary(nk[:]) +} + // UnmarshalPEM decodes a PEM marshaled PrivateKey. func (k *PrivateKey) UnmarshalPEM(data []byte) error { b, err := unmarshalPEM(privPEMType, data) @@ -514,6 +527,24 @@ func (s *SignedPublicKey) Open(context []byte, pub *PublicKey) error { // nolint return s.Signed.Open(context, pub) } +// RegisterTestPublicKey registers a hardcoded test public key with the +// internal public key blacklist. +func RegisterTestPublicKey(pk PublicKey) { + testPublicKeys.Store(pk.ToMapKey(), true) +} + +// BuildPublicKeyBlacklist builds the public key blacklist. +func BuildPublicKeyBlacklist(allowTestKeys bool) { + if !allowTestKeys { + testPublicKeys.Range(func(k, v interface{}) bool { + blacklistedPublicKeys.Store(k, v) + return true + }) + } + + // Explicitly forbid other keys here. +} + func digest(context, message []byte) ([]byte, error) { if len(context) != ContextSize { return nil, errMalformedContext diff --git a/go/common/entity/entity.go b/go/common/entity/entity.go index 9a8f53a4a37..b1be783027d 100644 --- a/go/common/entity/entity.go +++ b/go/common/entity/entity.go @@ -3,6 +3,7 @@ package entity import ( "crypto/rand" + "crypto/sha512" "errors" "io/ioutil" "os" @@ -29,6 +30,9 @@ var ( _ cbor.Marshaler = (*Entity)(nil) _ cbor.Unmarshaler = (*Entity)(nil) + + testEntity Entity + testEntityPrivateKey signature.PrivateKey ) // Entity represents an entity that controls one or more Nodes and or @@ -157,6 +161,11 @@ func Generate(baseDir string) (*Entity, *signature.PrivateKey, error) { return ent, &privKey, nil } +// TestEntity returns the built-in test entity and private key. +func TestEntity() (*Entity, *signature.PrivateKey, error) { + return &testEntity, &testEntityPrivateKey, nil +} + func getPaths(baseDir string) (string, string) { return filepath.Join(baseDir, entityFilename), filepath.Join(baseDir, privKeyFilename) } @@ -182,3 +191,15 @@ func SignEntity(privateKey signature.PrivateKey, context []byte, entity *Entity) Signed: *signed, }, nil } + +func init() { + seed := sha512.Sum512_256([]byte("ekiden test entity key seed")) + if err := testEntityPrivateKey.UnmarshalSeed(seed[:]); err != nil { + panic("entity: failed to generate test entity private key") + } + testEntityPublicKey := testEntityPrivateKey.Public() + signature.RegisterTestPublicKey(testEntityPublicKey) + + testEntity.ID = testEntityPublicKey + testEntity.RegistrationTime = uint64(time.Date(2019, 6, 1, 0, 0, 0, 0, time.UTC).Unix()) +} diff --git a/go/common/node/node.go b/go/common/node/node.go index 42c76fd1bfb..9aafe778edc 100644 --- a/go/common/node/node.go +++ b/go/common/node/node.go @@ -105,6 +105,10 @@ type Runtime struct { // Capabilities are the node's capabilities for a given runtime. Capabilities Capabilities `codec:"capabilities"` + + // ExtraInfo is the extra per node + per runtime opaque data associated + // with the current instance. + ExtraInfo []byte `codec:"extra_info"` } func (r *Runtime) fromProto(pb *pbCommon.NodeRuntime) error { @@ -118,6 +122,8 @@ func (r *Runtime) fromProto(pb *pbCommon.NodeRuntime) error { } } + r.ExtraInfo = pb.GetExtraInfo() + return nil } @@ -131,6 +137,8 @@ func (r *Runtime) toProto() *pbCommon.NodeRuntime { pb.Capabilities.Tee = r.Capabilities.TEE.toProto() } + pb.ExtraInfo = r.ExtraInfo + return pb } diff --git a/go/ekiden/cmd/common/common.go b/go/ekiden/cmd/common/common.go index 130216fa0b1..98e9026c45a 100644 --- a/go/ekiden/cmd/common/common.go +++ b/go/ekiden/cmd/common/common.go @@ -11,12 +11,14 @@ import ( "github.com/spf13/viper" "github.com/oasislabs/ekiden/go/common" + "github.com/oasislabs/ekiden/go/common/crypto/signature" "github.com/oasislabs/ekiden/go/common/logging" ) const ( - cfgConfigFile = "config" - cfgDataDir = "datadir" + cfgConfigFile = "config" + cfgDataDir = "datadir" + cfgAllowTestKeys = "debug.allow_test_keys" ) var ( @@ -56,6 +58,7 @@ func Init() error { initFns := []func() error{ initDataDir, initLogging, + initPublicKeyBlacklist, } for _, fn := range initFns { @@ -79,10 +82,12 @@ func Logger() *logging.Logger { func RegisterRootFlags(rootCmd *cobra.Command) { rootCmd.PersistentFlags().StringVar(&cfgFile, cfgConfigFile, "", "config file") rootCmd.PersistentFlags().String(cfgDataDir, "", "data directory") + rootCmd.PersistentFlags().Bool(cfgAllowTestKeys, false, "Allow test keys (UNSAFE)") for _, v := range []string{ cfgConfigFile, cfgDataDir, + cfgAllowTestKeys, } { _ = viper.BindPFlag(v, rootCmd.PersistentFlags().Lookup(v)) } @@ -143,6 +148,11 @@ func normalizePath(f string) string { return f } +func initPublicKeyBlacklist() error { + signature.BuildPublicKeyBlacklist(viper.GetBool(cfgAllowTestKeys)) + return nil +} + // GetOutputWriter will create a file if the config string is set, // and otherwise return os.Stdout. func GetOutputWriter(cmd *cobra.Command, cfg string) (io.WriteCloser, bool, error) { diff --git a/go/ekiden/cmd/common/flags/flags.go b/go/ekiden/cmd/common/flags/flags.go index 2c9c65b8fdf..c8f98673425 100644 --- a/go/ekiden/cmd/common/flags/flags.go +++ b/go/ekiden/cmd/common/flags/flags.go @@ -11,6 +11,8 @@ const ( cfgVerbose = "verbose" cfgForce = "force" cfgRetries = "retries" + + cfgDebugTestEntity = "debug.test_entity" ) // Verbose returns true iff the verbose flag is set. @@ -54,3 +56,17 @@ func RegisterRetries(cmd *cobra.Command) { _ = viper.BindPFlag(cfgRetries, cmd.Flags().Lookup(cfgRetries)) } + +// DebugTestEntity returns true iff the test entity enable flag is set. +func DebugTestEntity() bool { + return viper.GetBool(cfgDebugTestEntity) +} + +// RegisterDebugTestEntity registers the test entity enable flag. +func RegisterDebugTestEntity(cmd *cobra.Command) { + if !cmd.Flags().Parsed() { + cmd.Flags().Bool(cfgDebugTestEntity, false, "use the test entity (UNSAFE)") + } + + _ = viper.BindPFlag(cfgDebugTestEntity, cmd.Flags().Lookup(cfgDebugTestEntity)) +} diff --git a/go/ekiden/cmd/debug/bootstrap/bootstrap.go b/go/ekiden/cmd/debug/bootstrap/bootstrap.go index 6026b40c5b4..deea6e5e137 100644 --- a/go/ekiden/cmd/debug/bootstrap/bootstrap.go +++ b/go/ekiden/cmd/debug/bootstrap/bootstrap.go @@ -9,6 +9,7 @@ import ( "github.com/oasislabs/ekiden/go/common/logging" cmdCommon "github.com/oasislabs/ekiden/go/ekiden/cmd/common" "github.com/oasislabs/ekiden/go/ekiden/cmd/common/background" + "github.com/oasislabs/ekiden/go/ekiden/cmd/common/flags" cmdGenesis "github.com/oasislabs/ekiden/go/ekiden/cmd/genesis" "github.com/oasislabs/ekiden/go/genesis" "github.com/oasislabs/ekiden/go/genesis/bootstrap" @@ -130,6 +131,8 @@ func registerBootstrapFlags(cmd *cobra.Command) { } { _ = viper.BindPFlag(v, cmd.Flags().Lookup(v)) } + + flags.RegisterDebugTestEntity(cmd) } // Register registers the bootstrap sub-command and all of it's children. diff --git a/go/ekiden/cmd/genesis/genesis.go b/go/ekiden/cmd/genesis/genesis.go index 9f18d3f3fdd..3255b28c0c7 100644 --- a/go/ekiden/cmd/genesis/genesis.go +++ b/go/ekiden/cmd/genesis/genesis.go @@ -15,6 +15,7 @@ import ( "github.com/oasislabs/ekiden/go/common/json" "github.com/oasislabs/ekiden/go/common/logging" "github.com/oasislabs/ekiden/go/ekiden/cmd/common" + "github.com/oasislabs/ekiden/go/ekiden/cmd/common/flags" "github.com/oasislabs/ekiden/go/genesis" registry "github.com/oasislabs/ekiden/go/registry/api" roothash "github.com/oasislabs/ekiden/go/roothash/api" @@ -167,6 +168,32 @@ func AppendRegistryState(doc *genesis.Document, entities, runtimes []string, l * regSt.Entities = append(regSt.Entities, &entity) } + if flags.DebugTestEntity() { + ent, privKey, err := entity.TestEntity() + if err != nil { + l.Error("failed to retrive test entity", + "err", err, + ) + return err + } + + signed, err := entity.SignEntity(*privKey, registry.RegisterGenesisEntitySignatureContext, ent) + if err != nil { + l.Error("failed to sign test entity", + "err", err, + ) + return err + } + + if err = signed.Open(registry.RegisterGenesisEntitySignatureContext, ent); err != nil { + l.Error("signed entity does not round trip", + "err", err, + ) + return err + } + + regSt.Entities = append(regSt.Entities, signed) + } for _, v := range runtimes { b, err := ioutil.ReadFile(v) @@ -292,6 +319,8 @@ func registerInitGenesisFlags(cmd *cobra.Command) { } { _ = viper.BindPFlag(v, cmd.Flags().Lookup(v)) } + + flags.RegisterDebugTestEntity(cmd) } // Register registers the genesis sub-command and all of it's children. diff --git a/go/ekiden/cmd/node/node.go b/go/ekiden/cmd/node/node.go index b701fe0284a..76640f26236 100644 --- a/go/ekiden/cmd/node/node.go +++ b/go/ekiden/cmd/node/node.go @@ -20,6 +20,7 @@ import ( "github.com/oasislabs/ekiden/go/dummydebug" cmdCommon "github.com/oasislabs/ekiden/go/ekiden/cmd/common" "github.com/oasislabs/ekiden/go/ekiden/cmd/common/background" + "github.com/oasislabs/ekiden/go/ekiden/cmd/common/flags" cmdGrpc "github.com/oasislabs/ekiden/go/ekiden/cmd/common/grpc" "github.com/oasislabs/ekiden/go/ekiden/cmd/common/metrics" "github.com/oasislabs/ekiden/go/ekiden/cmd/common/pprof" @@ -28,6 +29,8 @@ import ( epochtimeAPI "github.com/oasislabs/ekiden/go/epochtime/api" "github.com/oasislabs/ekiden/go/genesis" "github.com/oasislabs/ekiden/go/ias" + "github.com/oasislabs/ekiden/go/keymanager" + keymanagerAPI "github.com/oasislabs/ekiden/go/keymanager/api" keymanagerClient "github.com/oasislabs/ekiden/go/keymanager/client" "github.com/oasislabs/ekiden/go/registry" registryAPI "github.com/oasislabs/ekiden/go/registry/api" @@ -44,7 +47,7 @@ import ( workerCommon "github.com/oasislabs/ekiden/go/worker/common" "github.com/oasislabs/ekiden/go/worker/common/p2p" "github.com/oasislabs/ekiden/go/worker/compute" - "github.com/oasislabs/ekiden/go/worker/keymanager" + keymanagerWorker "github.com/oasislabs/ekiden/go/worker/keymanager" "github.com/oasislabs/ekiden/go/worker/merge" "github.com/oasislabs/ekiden/go/worker/registration" workerStorage "github.com/oasislabs/ekiden/go/worker/storage" @@ -74,18 +77,20 @@ type Node struct { grpcInternal *grpc.Server svcTmnt tmService.TendermintService - Genesis genesis.Provider - Identity *identity.Identity - Beacon beaconAPI.Backend - Epochtime epochtimeAPI.Backend - Registry registryAPI.Backend - RootHash roothashAPI.Backend - Scheduler schedulerAPI.Backend - Staking stakingAPI.Backend - Storage storageAPI.Backend - IAS *ias.IAS - Client *client.Client - KeyManager *keymanagerClient.Client + Genesis genesis.Provider + Identity *identity.Identity + Beacon beaconAPI.Backend + Epochtime epochtimeAPI.Backend + Registry registryAPI.Backend + RootHash roothashAPI.Backend + Scheduler schedulerAPI.Backend + Staking stakingAPI.Backend + Storage storageAPI.Backend + IAS *ias.IAS + Client *client.Client + + KeyManager keymanagerAPI.Backend + KeyManagerClient *keymanagerClient.Client CommonWorker *workerCommon.Worker ComputeWorker *compute.Worker @@ -131,6 +136,9 @@ func (n *Node) initBackends() error { if n.Staking, err = staking.New(n.svcMgr.Ctx, n.svcTmnt); err != nil { return err } + if n.KeyManager, err = keymanager.New(n.svcMgr.Ctx, n.Epochtime, n.Registry, n.svcTmnt); err != nil { + return err + } n.svcMgr.RegisterCleanupOnly(n.Staking, "staking backend") if n.Scheduler, err = scheduler.New(n.svcMgr.Ctx, n.Epochtime, n.Registry, n.Beacon, n.svcTmnt); err != nil { return err @@ -203,13 +211,14 @@ func (n *Node) initAndStartWorkers(logger *logging.Logger) error { } n.svcMgr.Register(n.WorkerRegistration) - // Initialize the key manager service. - kmSvc, kmEnabled, err := keymanager.New( + // Initialize the key manager worker service. + kmSvc, kmEnabled, err := keymanagerWorker.New( dataDir, n.IAS, n.CommonWorker.Grpc, n.WorkerRegistration, &workerCommonCfg, + n.KeyManager, ) if err != nil { return err @@ -246,7 +255,7 @@ func (n *Node) initAndStartWorkers(logger *logging.Logger) error { n.CommonWorker, n.MergeWorker, n.IAS, - n.KeyManager, + n.KeyManagerClient, n.WorkerRegistration, ) if err != nil { @@ -473,7 +482,7 @@ func NewNode() (*Node, error) { logger.Info("starting ekiden node") // Initialize the key manager client service. - node.KeyManager, err = keymanagerClient.New(node.Registry) + node.KeyManagerClient, err = keymanagerClient.New(node.KeyManager, node.Registry) if err != nil { logger.Error("failed to initialize key manager client", "err", err, @@ -490,7 +499,7 @@ func NewNode() (*Node, error) { node.Scheduler, node.Registry, node.svcTmnt, - node.KeyManager, + node.KeyManagerClient, ) if err != nil { return nil, err @@ -559,6 +568,7 @@ func RegisterFlags(cmd *cobra.Command) { ias.RegisterFlags, keymanager.RegisterFlags, keymanagerClient.RegisterFlags, + keymanagerWorker.RegisterFlags, client.RegisterFlags, compute.RegisterFlags, p2p.RegisterFlags, @@ -571,4 +581,6 @@ func RegisterFlags(cmd *cobra.Command) { } { v(cmd) } + + flags.RegisterDebugTestEntity(cmd) } diff --git a/go/ekiden/cmd/registry/entity/entity.go b/go/ekiden/cmd/registry/entity/entity.go index b529721b86c..ae7d2bc72f3 100644 --- a/go/ekiden/cmd/registry/entity/entity.go +++ b/go/ekiden/cmd/registry/entity/entity.go @@ -117,7 +117,7 @@ func doInit(cmd *cobra.Command, args []string) { } // Generate a new entity. - ent, privKey, err := entity.Generate(dataDir) + ent, privKey, err := loadOrGenerateEntity(dataDir, true) if err != nil { logger.Error("failed to generate entity", "err", err, @@ -161,7 +161,7 @@ func doRegisterOrDeregister(cmd *cobra.Command, args []string) { os.Exit(1) } - ent, privKey, err := entity.Load(dataDir) + ent, privKey, err := loadOrGenerateEntity(dataDir, false) if err != nil { logger.Error("failed to load entity", "err", err, @@ -290,6 +290,18 @@ func doList(cmd *cobra.Command, args []string) { } } +func loadOrGenerateEntity(dataDir string, generate bool) (*entity.Entity, *signature.PrivateKey, error) { + if cmdFlags.DebugTestEntity() { + return entity.TestEntity() + } + + if generate { + return entity.Generate(dataDir) + } + + return entity.Load(dataDir) +} + // Register registers the entity sub-command and all of it's children. func Register(parentCmd *cobra.Command) { for _, v := range []*cobra.Command{ @@ -306,6 +318,14 @@ func Register(parentCmd *cobra.Command) { cmdFlags.RegisterRetries(deregisterCmd) cmdFlags.RegisterVerbose(listCmd) + for _, v := range []*cobra.Command{ + initCmd, + registerCmd, + deregisterCmd, + } { + cmdFlags.RegisterDebugTestEntity(v) + } + for _, v := range []*cobra.Command{ registerCmd, deregisterCmd, diff --git a/go/ekiden/cmd/registry/runtime/runtime.go b/go/ekiden/cmd/registry/runtime/runtime.go index 1dcca1f4682..ea7629e366e 100644 --- a/go/ekiden/cmd/registry/runtime/runtime.go +++ b/go/ekiden/cmd/registry/runtime/runtime.go @@ -39,8 +39,14 @@ const ( cfgStorageGroupSize = "runtime.storage_group_size" cfgTransactionSchedulerGroupSize = "runtime.transaction_scheduler_group_size" cfgGenesisState = "runtime.genesis.state" + cfgKind = "runtime.kind" + cfgKeyManager = "runtime.keymanager" + cfgOutput = "runtime.genesis.file" cfgEntity = "entity" + optKindCompute = "compute" + optKindKeyManager = "keymanager" + runtimeGenesisFilename = "runtime_genesis.json" ) @@ -54,7 +60,9 @@ var ( Use: "init_genesis", Short: "initialize a runtime for genesis", PreRun: func(cmd *cobra.Command, args []string) { + cmdFlags.RegisterDebugTestEntity(cmd) registerRuntimeFlags(cmd) + registerOutputFlag(cmd) }, Run: doInitGenesis, } @@ -64,6 +72,7 @@ var ( Short: "register a runtime", PreRun: func(cmd *cobra.Command, args []string) { cmdGrpc.RegisterClientFlags(cmd, false) + cmdFlags.RegisterDebugTestEntity(cmd) cmdFlags.RegisterRetries(cmd) registerRuntimeFlags(cmd) }, @@ -122,7 +131,7 @@ func doInitGenesis(cmd *cobra.Command, args []string) { // Write out the signed runtime registration. b := json.Marshal(signed) - if err = ioutil.WriteFile(filepath.Join(dataDir, runtimeGenesisFilename), b, 0600); err != nil { + if err = ioutil.WriteFile(filepath.Join(dataDir, viper.GetString(cfgOutput)), b, 0600); err != nil { logger.Error("failed to write signed runtime genesis registration", "err", err, ) @@ -245,7 +254,7 @@ func runtimeFromFlags() (*registry.Runtime, *signature.PrivateKey, error) { return nil, nil, fmt.Errorf("invalid TEE hardware") } - ent, privKey, err := entity.Load(viper.GetString(cfgEntity)) + ent, privKey, err := loadEntity(viper.GetString(cfgEntity)) if err != nil { logger.Error("failed to load owning entity", "err", err, @@ -253,6 +262,31 @@ func runtimeFromFlags() (*registry.Runtime, *signature.PrivateKey, error) { return nil, nil, err } + var ( + kmID signature.PublicKey + kind registry.RuntimeKind + ) + s = viper.GetString(cfgKind) + switch strings.ToLower(s) { + case optKindCompute: + if err = kmID.UnmarshalHex(viper.GetString(cfgKeyManager)); err != nil { + logger.Error("failed to parse key manager ID", + "err", err, + ) + return nil, nil, err + } + case optKindKeyManager: + kind = registry.KindKeyManager + + // Key managers don't have their own key manager. + kmID = id + default: + logger.Error("invalid runtime kind", + cfgKind, s, + ) + return nil, nil, fmt.Errorf("invalid runtime Kind") + } + // TODO: Support root upload when registering. gen := registry.RuntimeGenesis{} switch state := viper.GetString(cfgGenesisState); state { @@ -314,13 +348,14 @@ func runtimeFromFlags() (*registry.Runtime, *signature.PrivateKey, error) { return ®istry.Runtime{ ID: id, Genesis: gen, - Code: nil, // TBD - TEEHardware: teeHardware, ReplicaGroupSize: uint64(viper.GetInt64(cfgReplicaGroupSize)), ReplicaGroupBackupSize: uint64(viper.GetInt64(cfgReplicaGroupBackupSize)), ReplicaAllowedStragglers: uint64(viper.GetInt64(cfgReplicaAllowedStragglers)), StorageGroupSize: uint64(viper.GetInt64(cfgStorageGroupSize)), TransactionSchedulerGroupSize: uint64(viper.GetInt64(cfgTransactionSchedulerGroupSize)), + TEEHardware: teeHardware, + KeyManager: kmID, + Kind: kind, RegistrationTime: ent.RegistrationTime, }, privKey, nil } @@ -346,6 +381,26 @@ func signForRegistration(rt *registry.Runtime, privKey *signature.PrivateKey, is return signed, err } +func loadEntity(dataDir string) (*entity.Entity, *signature.PrivateKey, error) { + if cmdFlags.DebugTestEntity() { + return entity.TestEntity() + } + + return entity.Load(dataDir) +} + +func registerOutputFlag(cmd *cobra.Command) { + if !cmd.Flags().Parsed() { + cmd.Flags().String(cfgOutput, runtimeGenesisFilename, "File name of the document to be written under datadir") + } + + for _, v := range []string{ + cfgOutput, + } { + _ = viper.BindPFlag(v, cmd.Flags().Lookup(v)) + } +} + func registerRuntimeFlags(cmd *cobra.Command) { if !cmd.Flags().Parsed() { cmd.Flags().String(cfgID, "", "Runtime ID") @@ -356,6 +411,8 @@ func registerRuntimeFlags(cmd *cobra.Command) { cmd.Flags().Uint64(cfgStorageGroupSize, 1, "Number of storage nodes for the runtime") cmd.Flags().Uint64(cfgTransactionSchedulerGroupSize, 1, "Number of transaction scheduler nodes for the runtime") cmd.Flags().String(cfgGenesisState, "", "Runtime state at genesis") + cmd.Flags().String(cfgKeyManager, "", "Key Manager Runtime ID") + cmd.Flags().String(cfgKind, optKindCompute, "Kind of runtime. Supported values are \"compute\" and \"keymanager\"") cmd.Flags().String(cfgEntity, "", "Path to directory containing entity private key and descriptor") } @@ -368,6 +425,8 @@ func registerRuntimeFlags(cmd *cobra.Command) { cfgStorageGroupSize, cfgTransactionSchedulerGroupSize, cfgGenesisState, + cfgKeyManager, + cfgKind, cfgEntity, } { _ = viper.BindPFlag(v, cmd.Flags().Lookup(v)) @@ -384,7 +443,15 @@ func Register(parentCmd *cobra.Command) { runtimeCmd.AddCommand(v) } + for _, v := range []*cobra.Command{ + initGenesisCmd, + registerCmd, + } { + cmdFlags.RegisterDebugTestEntity(v) + } + registerRuntimeFlags(initGenesisCmd) + registerOutputFlag(initGenesisCmd) cmdGrpc.RegisterClientFlags(listCmd, false) cmdFlags.RegisterVerbose(listCmd) diff --git a/go/ekiden/node_test.go b/go/ekiden/node_test.go index 8ab95a889e7..aa300934675 100644 --- a/go/ekiden/node_test.go +++ b/go/ekiden/node_test.go @@ -47,6 +47,7 @@ var ( {"log.level.default", "DEBUG"}, {"epochtime.backend", "tendermint_mock"}, {"beacon.backend", "tendermint"}, + {"keymanager.backend", "tendermint"}, {"registry.backend", "tendermint"}, {"roothash.backend", "tendermint"}, {"roothash.tendermint.index_blocks", true}, @@ -66,6 +67,7 @@ var ( {"worker.txnscheduler.enabled", true}, {"worker.merge.enabled", true}, {"client.indexer.runtimes", []string{testRuntimeID}}, + {"debug.allow_test_keys", true}, } testRuntime = ®istry.Runtime{ diff --git a/go/grpc/common/common.proto b/go/grpc/common/common.proto index e1442412b95..2626b97cf99 100644 --- a/go/grpc/common/common.proto +++ b/go/grpc/common/common.proto @@ -38,6 +38,7 @@ message Node { message NodeRuntime { bytes id = 1; Capabilities capabilities = 2; + bytes extra_info = 3; } message Capabilities { diff --git a/go/grpc/registry/runtime.proto b/go/grpc/registry/runtime.proto index 0c2037a4c7a..90ee7fe3539 100644 --- a/go/grpc/registry/runtime.proto +++ b/go/grpc/registry/runtime.proto @@ -7,21 +7,22 @@ option go_package = "github.com/oasislabs/ekiden/go/grpc/registry"; message Runtime { bytes id = 1; - bytes code = 2; - common.CapabilitiesTEE.Hardware tee_hardware = 3; + common.CapabilitiesTEE.Hardware tee_hardware = 2; - uint64 replica_group_size = 4; + uint64 replica_group_size = 3; - uint64 storage_group_size = 5; + uint64 storage_group_size = 4; - uint64 replica_group_backup_size = 6; + uint64 replica_group_backup_size = 5; - uint64 replica_allowed_stragglers = 7; + uint64 replica_allowed_stragglers = 6; - uint64 registration_time = 8; + uint64 registration_time = 7; - uint32 kind = 9; + uint32 kind = 8; + + bytes key_manager = 9; } service RuntimeRegistry { diff --git a/go/keymanager/api/api.go b/go/keymanager/api/api.go new file mode 100644 index 00000000000..7c45498a48f --- /dev/null +++ b/go/keymanager/api/api.go @@ -0,0 +1,116 @@ +// Package api implementes the key manager management API and common data types. +package api + +import ( + "context" + "errors" + + "github.com/oasislabs/ekiden/go/common/cbor" + "github.com/oasislabs/ekiden/go/common/crypto/signature" + "github.com/oasislabs/ekiden/go/common/node" + "github.com/oasislabs/ekiden/go/common/pubsub" + registry "github.com/oasislabs/ekiden/go/registry/api" +) + +var ( + // ErrNoSuchKeyManager is the error returned when a key manager does not + // exist. + ErrNoSuchKeyManager = errors.New("keymanager: no such key manager") + + // TestPublicKey is the insecure hardcoded key manager public key, used + // in insecure builds when a RAK is unavailable. + TestPublicKey signature.PublicKey + + initResponseContext = []byte("EkKmIniR") +) + +// Status is the current key manager status. +type Status struct { + // ID is the runtime ID of the key manager. + ID signature.PublicKey `codec:"id"` + + // IsInitialized is true iff the key manager is done initializing. + IsInitialized bool `codec:"is_initialized"` + + // IsSecure is true iff the key manger is secure. + IsSecure bool `codec:"is_secure"` + + // Checksum is the key manager master secret verification checksum. + Checksum []byte `codec:"checksum"` + + // Nodes is the list of currently active key manager node IDs. + Nodes []signature.PublicKey `codec:"nodes"` + + // TODO: Policy +} + +// Backend is a key manager management implementation. +type Backend interface { + // GetStatus returns a key manager status by key manager ID. + GetStatus(context.Context, signature.PublicKey) (*Status, error) + + // GetStatuses returns all currently tracked key manager statuses. + GetStatuses(context.Context) ([]*Status, error) + + // WatchStatuses returns a channel that produces a stream of messages + // containing the key manager statuses as it changes over time. + // + // Upon subscription the current status is sent immediately. + WatchStatuses() (<-chan *Status, *pubsub.Subscription) +} + +// InitResponse is the initialization RPC response, returned as part of a +// SignedInitResponse from the key manager enclave. +type InitResponse struct { + IsSecure bool `codec:"is_secure"` + Checksum []byte `codec:"checksum"` +} + +// SignedInitResponse is the signed initialization RPC response, returned +// from the key manager enclave. +type SignedInitResponse struct { + InitResponse InitResponse `codec:"init_response"` + Signature []byte `codec:"signature"` +} + +func (r *SignedInitResponse) Verify(pk signature.PublicKey) error { + raw := cbor.Marshal(r.InitResponse) + if !pk.Verify(initResponseContext, raw, r.Signature) { + return errors.New("keymanager: invalid initialization response signature") + } + return nil +} + +// VerifyExtraInfo verifies and parses the per-node + per-runtime ExtraInfo +// blob for a key manager. +func VerifyExtraInfo(rt *registry.Runtime, nodeRt *node.Runtime) (*InitResponse, error) { + var ( + hw node.TEEHardware + rak signature.PublicKey + ) + if nodeRt.Capabilities.TEE == nil || nodeRt.Capabilities.TEE.Hardware == node.TEEHardwareInvalid { + hw = node.TEEHardwareInvalid + rak = TestPublicKey + } else { + // TODO: MRENCLAVE/MRSIGNER. + hw = nodeRt.Capabilities.TEE.Hardware + rak = nodeRt.Capabilities.TEE.RAK + } + if hw != rt.TEEHardware { + return nil, errors.New("keymanger: TEEHardware mismatch") + } + + var untrustedSignedInitResponse SignedInitResponse + if err := cbor.Unmarshal(nodeRt.ExtraInfo, &untrustedSignedInitResponse); err != nil { + return nil, err + } + if err := untrustedSignedInitResponse.Verify(rak); err != nil { + return nil, err + } + return &untrustedSignedInitResponse.InitResponse, nil +} + +func init() { + _ = TestPublicKey.UnmarshalHex("9d41a874b80e39a40c9644e964f0e4f967100c91654bfd7666435fe906af060f") + signature.RegisterTestPublicKey(TestPublicKey) +} diff --git a/go/keymanager/client/client.go b/go/keymanager/client/client.go index d060b4d83fa..a88072d9eb4 100644 --- a/go/keymanager/client/client.go +++ b/go/keymanager/client/client.go @@ -4,6 +4,7 @@ package client import ( "context" "crypto/x509" + "encoding/base64" "sync" "github.com/pkg/errors" @@ -18,6 +19,7 @@ import ( "github.com/oasislabs/ekiden/go/common/grpc/resolver/manual" "github.com/oasislabs/ekiden/go/common/logging" "github.com/oasislabs/ekiden/go/common/node" + "github.com/oasislabs/ekiden/go/keymanager/api" registry "github.com/oasislabs/ekiden/go/registry/api" "github.com/oasislabs/ekiden/go/worker/common/enclaverpc" ) @@ -39,90 +41,176 @@ type Client struct { logger *logging.Logger - registry registry.Backend + backend api.Backend + registry registry.Backend + + state map[signature.MapKey]*clientState + kmMap map[signature.MapKey]signature.PublicKey + + debugClient *enclaverpc.Client +} + +type clientState struct { + status *api.Status conn *grpc.ClientConn client *enclaverpc.Client resolverCleanupFn func() } +func (st *clientState) kill() { + if st.resolverCleanupFn != nil { + st.resolverCleanupFn() + st.resolverCleanupFn = nil + } + if st.conn != nil { + st.conn.Close() + st.conn = nil + } +} + // CallRemote calls a runtime-specific key manager via remote EnclaveRPC. func (c *Client) CallRemote(ctx context.Context, runtimeID signature.PublicKey, data []byte) ([]byte, error) { + if c.debugClient != nil { + return c.debugClient.CallEnclave(ctx, data) + } + + c.logger.Debug("remote query", + "id", runtimeID, + "data", base64.StdEncoding.EncodeToString(data), + ) + c.RLock() defer c.RUnlock() - if c.client == nil { - return nil, ErrKeyManagerNotAvailable + + id := runtimeID.ToMapKey() + kmID := c.kmMap[id] + if kmID == nil { + if c.state[id] == nil { + return nil, ErrKeyManagerNotAvailable + } + + // The target query is for a keymanager runtime ID, probably + // replication. + kmID = runtimeID } - // TODO: The runtimeID is currently entirely ignored. `data` also contains - // a runtimeID for the purpose of separating keys. + st := c.state[kmID.ToMapKey()] + if st == nil || st.client == nil { + return nil, ErrKeyManagerNotAvailable + } - return c.client.CallEnclave(ctx, data) + return st.client.CallEnclave(ctx, data) } func (c *Client) worker() { - // TODO: The "correct" way to implement this is to schedule the key manager, - // but for now just work under the assumption that this is running on staging - // and or prod, and there is only one KM node registered at once, that all - // the runtimes will use. - - ch, sub := c.registry.WatchNodeList() - defer sub.Close() - - findFirstKMNode := func(l []*node.Node) *node.Node { - for _, n := range l { - if n.HasRoles(node.RoleKeyManager) { - return n + stCh, stSub := c.backend.WatchStatuses() + defer stSub.Close() + + rtCh, rtSub := c.registry.WatchRuntimes() + defer rtSub.Close() + + nlCh, nlSub := c.registry.WatchNodeList() + defer nlSub.Close() + + for { + select { + case st := <-stCh: + nl, err := c.registry.GetNodes(context.TODO()) + if err != nil { + c.logger.Error("failed to poll node list", + "err", err, + ) + continue } + c.updateState(st, nl) + case rt := <-rtCh: + c.updateRuntime(rt) + case nl := <-nlCh: + c.updateNodes(nl.Nodes) } - return nil } +} - for nl := range ch { - c.logger.Debug("updating node list", - "epoch", nl.Epoch, - ) +func (c *Client) updateRuntime(rt *registry.Runtime) { + c.Lock() + defer c.Unlock() - c.updateConnection(findFirstKMNode(nl.Nodes)) + switch rt.Kind { + case registry.KindCompute: + c.logger.Debug("set new runtime key manager", + "id", rt.ID, + "km_id", rt.KeyManager, + ) + c.kmMap[rt.ID.ToMapKey()] = rt.KeyManager + case registry.KindKeyManager: + c.kmMap[rt.ID.ToMapKey()] = rt.ID + default: } } -func (c *Client) updateConnection(n *node.Node) { - if n == nil { - c.logger.Error("failed to update connection, no key manager nodes found") - return - } +func (c *Client) updateState(status *api.Status, nodeList []*node.Node) { + c.logger.Debug("updating connection state", + "id", status.ID, + ) - if n.Certificate == nil { - // TODO: The registry should reject such registrations, so this should never happen. - c.logger.Error("key manager node registered without certificate, refusing to communicate", - "node_id", n.ID, - ) - return + nodeMap := make(map[signature.MapKey]*node.Node) + for _, n := range nodeList { + nodeMap[n.ID.ToMapKey()] = n } - // TODO: Only update the connection if the key or address changed. c.Lock() defer c.Unlock() - cert, err := n.Certificate.Parse() - if err != nil { - c.logger.Error("failed to parse key manager certificate", - "err", err, - ) + idKey := status.ID.ToMapKey() + st := c.state[idKey] + + // It's not possible to service requests for this key manager. + if !status.IsInitialized || len(status.Nodes) == 0 { + // Kill the conn and return. + if st != nil { + st.kill() + c.state[idKey] = nil + } + return } + + // Build the new state. certPool := x509.NewCertPool() - certPool.AddCert(cert) + var addresses []resolver.Address + for _, v := range status.Nodes { + n := nodeMap[v.ToMapKey()] + if n == nil { + c.logger.Warn("key manager node missing descriptor", + "id", n.ID, + ) + continue + } + + cert, err := n.Certificate.Parse() + if err != nil { + c.logger.Error("failed to parse key manager certificate", + "id", n.ID, + "err", err, + ) + continue + } + certPool.AddCert(cert) + + for _, addr := range n.Addresses { + addresses = append(addresses, resolver.Address{Addr: addr.String()}) + } + } + creds := credentials.NewClientTLSFromCert(certPool, "ekiden-node") opts := grpc.WithTransportCredentials(creds) - if c.resolverCleanupFn != nil { - c.resolverCleanupFn() - c.resolverCleanupFn = nil - } - if c.conn != nil { - c.conn.Close() - c.conn = nil + // TODO: This probably could skip updating the connection sometimes. + + // Kill the old state if it exists. + if st != nil { + st.kill() + c.state[idKey] = nil } // Note: While this may look screwed up, the resolver needs the client conn @@ -136,25 +224,42 @@ func (c *Client) updateConnection(n *node.Node) { ) return } - var addresses []resolver.Address - for _, addr := range n.Addresses { - addresses = append(addresses, resolver.Address{Addr: addr.String()}) - } manualResolver.NewAddress(addresses) c.logger.Debug("updated connection", - "node", n, + "id", status.ID, ) - c.client = enclaverpc.NewFromConn(conn, kmEndpoint) - c.conn = conn - c.resolverCleanupFn = cleanupFn + c.state[idKey] = &clientState{ + status: status, + conn: conn, + client: enclaverpc.NewFromConn(conn, kmEndpoint), + resolverCleanupFn: cleanupFn, + } +} + +func (c *Client) updateNodes(nodeList []*node.Node) { + var statuses []*api.Status + + // This is ok because the caller's leaf functions are the only thing + // that mutates the status list. + c.RLock() + for _, v := range c.state { + statuses = append(statuses, v.status) + } + c.RUnlock() + + for _, v := range statuses { + c.updateState(v, nodeList) + } } // New creates a new key manager client instance. -func New(registryBackend registry.Backend) (*Client, error) { +func New(backend api.Backend, registryBackend registry.Backend) (*Client, error) { c := &Client{ logger: logging.GetLogger("keymanager/client"), + state: make(map[signature.MapKey]*clientState), + kmMap: make(map[signature.MapKey]signature.PublicKey), } if debugAddress := viper.GetString(cfgDebugClientAddress); debugAddress != "" { @@ -165,13 +270,15 @@ func New(registryBackend registry.Backend) (*Client, error) { return nil, errors.Wrap(err, "keymanager/client: failed to create debug client") } - c.client = client + c.debugClient = client return c, nil } // Standard configuration watches the various backends. + c.backend = backend c.registry = registryBackend + go c.worker() return c, nil diff --git a/go/keymanager/init.go b/go/keymanager/init.go new file mode 100644 index 00000000000..5b51abe865f --- /dev/null +++ b/go/keymanager/init.go @@ -0,0 +1,56 @@ +// Package keymanager implements the key manager backend. +package keymanager + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + epochtime "github.com/oasislabs/ekiden/go/epochtime/api" + "github.com/oasislabs/ekiden/go/keymanager/api" + "github.com/oasislabs/ekiden/go/keymanager/tendermint" + registry "github.com/oasislabs/ekiden/go/registry/api" + "github.com/oasislabs/ekiden/go/tendermint/service" +) + +const cfgBackend = "keymanager.backend" + +// New constructs a new Backend based on the configuration flags. +func New( + ctx context.Context, + timeSource epochtime.Backend, + registry registry.Backend, + service service.TendermintService, +) (api.Backend, error) { + backend := viper.GetString(cfgBackend) + + var ( + impl api.Backend + err error + ) + + switch strings.ToLower(backend) { + case tendermint.BackendName: + impl, err = tendermint.New(ctx, timeSource, service) + default: + return nil, fmt.Errorf("keymanager: unsupported backend: '%v'", backend) + } + + return impl, err +} + +// RegisterFlags registers the configuration flags with the provided command. +func RegisterFlags(cmd *cobra.Command) { + if !cmd.Flags().Parsed() { + cmd.Flags().String(cfgBackend, tendermint.BackendName, "Key manager backend") + } + + for _, v := range []string{ + cfgBackend, + } { + _ = viper.BindPFlag(v, cmd.Flags().Lookup(v)) + } +} diff --git a/go/keymanager/tendermint/tendermint.go b/go/keymanager/tendermint/tendermint.go new file mode 100644 index 00000000000..0c3983b34b3 --- /dev/null +++ b/go/keymanager/tendermint/tendermint.go @@ -0,0 +1,163 @@ +// Package tendermint provides the tendermint backed key manager management +// implementation. +package tendermint + +import ( + "bytes" + "context" + + "github.com/eapache/channels" + "github.com/pkg/errors" + tmtypes "github.com/tendermint/tendermint/types" + + "github.com/oasislabs/ekiden/go/common/cbor" + "github.com/oasislabs/ekiden/go/common/crypto/signature" + "github.com/oasislabs/ekiden/go/common/logging" + "github.com/oasislabs/ekiden/go/common/pubsub" + epochtime "github.com/oasislabs/ekiden/go/epochtime/api" + "github.com/oasislabs/ekiden/go/keymanager/api" + tmapi "github.com/oasislabs/ekiden/go/tendermint/api" + app "github.com/oasislabs/ekiden/go/tendermint/apps/keymanager" + "github.com/oasislabs/ekiden/go/tendermint/service" +) + +// BackendName is the name of the backend. +const BackendName = "tendermint" + +type tendermintBackend struct { + logger *logging.Logger + + service service.TendermintService + notifier *pubsub.Broker +} + +func (r *tendermintBackend) GetStatus(ctx context.Context, id signature.PublicKey) (*api.Status, error) { + query := tmapi.QueryGetByIDRequest{ + ID: id, + } + + response, err := r.service.Query(app.QueryGetStatus, query, 0) + if err != nil { + return nil, errors.Wrap(err, "keymanager/tendermint: get status query failed") + } + if response == nil { + return nil, api.ErrNoSuchKeyManager + } + + var status api.Status + if err = cbor.Unmarshal(response, &status); err != nil { + return nil, errors.Wrap(err, "keymanager/tendermint: get status malformed response") + } + + return &status, nil +} + +func (r *tendermintBackend) GetStatuses(ctx context.Context) ([]*api.Status, error) { + response, err := r.service.Query(app.QueryGetStatuses, nil, 0) + if err != nil { + return nil, errors.Wrap(err, "keymanager/tendermint: get statuses query failed") + } + + var statuses []*api.Status + if err = cbor.Unmarshal(response, &statuses); err != nil { + return nil, errors.Wrap(err, "keymanager/tendermint: get statuses malformed response") + } + + return statuses, nil +} + +func (r *tendermintBackend) WatchStatuses() (<-chan *api.Status, *pubsub.Subscription) { + sub := r.notifier.Subscribe() + ch := make(chan *api.Status) + sub.Unwrap(ch) + + return ch, sub +} + +func (r *tendermintBackend) worker(ctx context.Context) { + sub, err := r.service.Subscribe("keymanager-worker", app.QueryApp) + if err != nil { + r.logger.Error("failed to subscribe", + "err", err, + ) + return + } + defer r.service.Unsubscribe("keymanager-worker", app.QueryApp) // nolint: errcheck + + for { + var event interface{} + + select { + case msg := <-sub.Out(): + event = msg.Data() + case <-sub.Cancelled(): + return + case <-ctx.Done(): + return + } + + switch ev := event.(type) { + case tmtypes.EventDataNewBlock: + r.onEventDataNewBlock(ev) + default: + } + } +} + +func (r *tendermintBackend) onEventDataNewBlock(ev tmtypes.EventDataNewBlock) { + tags := ev.ResultBeginBlock.GetTags() + tags = append(tags, ev.ResultEndBlock.GetTags()...) + + for _, pair := range tags { + if bytes.Equal(pair.GetKey(), app.TagStatusUpdate) { + var statuses []*api.Status + if err := cbor.Unmarshal(pair.GetValue(), &statuses); err != nil { + r.logger.Error("worker: failed to get statuses from tag", + "err", err, + ) + continue + } + + for _, status := range statuses { + r.notifier.Broadcast(status) + } + } + } +} + +// New constructs a new tendermint backed key manager management Backend +// instance. +func New(ctx context.Context, timeSource epochtime.Backend, service service.TendermintService) (api.Backend, error) { + // We can only work with a block-based epochtime. + blockTimeSource, ok := timeSource.(epochtime.BlockBackend) + if !ok { + return nil, errors.New("keymanager/tendermint: need a block-based epochtime backend") + } + + app := app.New(blockTimeSource) + if err := service.RegisterApplication(app); err != nil { + return nil, errors.Wrap(err, "keymanager/tendermint: failed to register app") + } + + r := &tendermintBackend{ + logger: logging.GetLogger("keymanager/tendermint"), + service: service, + } + r.notifier = pubsub.NewBrokerEx(func(ch *channels.InfiniteChannel) { + statuses, err := r.GetStatuses(ctx) + if err != nil { + r.logger.Error("status notifier: unable to get a list of statuses", + "err", err, + ) + return + } + + wr := ch.In() + for _, v := range statuses { + wr <- v + } + }) + go r.worker(ctx) + + return r, nil +} diff --git a/go/registry/api/api.go b/go/registry/api/api.go index 0caeb39663d..6e18bf6ce39 100644 --- a/go/registry/api/api.go +++ b/go/registry/api/api.go @@ -341,6 +341,18 @@ func VerifyRegisterRuntimeArgs(logger *logging.Logger, sigRt *SignedRuntime, isG } // TODO: Who should sign the runtime? Current compute node assumes an entity (deployer). + switch rt.Kind { + case KindCompute: + if rt.ID.Equal(rt.KeyManager) { + return nil, ErrInvalidArgument + } + case KindKeyManager: + if !rt.ID.Equal(rt.KeyManager) { + return nil, ErrInvalidArgument + } + default: + return nil, ErrInvalidArgument + } if !isGenesis && !rt.Genesis.StateRoot.IsEmpty() { // TODO: Verify storage receipt for the state root, reject such registrations for now. diff --git a/go/registry/api/runtime.go b/go/registry/api/runtime.go index 96c594fae78..40a4719f566 100644 --- a/go/registry/api/runtime.go +++ b/go/registry/api/runtime.go @@ -42,9 +42,6 @@ type Runtime struct { // Genesis is the runtime genesis information. Genesis RuntimeGenesis `codec:"genesis"` - // Code is the runtime code body. - Code []byte `codec:"code"` - // ReplicaGroupSize is the size of the computation group. ReplicaGroupSize uint64 `codec:"replica_group_size"` @@ -69,6 +66,9 @@ type Runtime struct { // TEEHardware specifies the runtime's TEE hardware requirements. TEEHardware node.TEEHardware `codec:"tee_hardware"` + + // KeyManager is the key manager runtime ID for this runtime. + KeyManager signature.PublicKey `codec:"key_manager"` } // String returns a string representation of itself. @@ -101,10 +101,14 @@ func (c *Runtime) FromProto(pb *pbRegistry.Runtime) error { return err } - c.Code = append([]byte{}, pb.GetCode()...) if err := c.TEEHardware.FromProto(pb.GetTeeHardware()); err != nil { return err } + + if err := c.KeyManager.UnmarshalBinary(pb.GetKeyManager()); err != nil { + return err + } + c.ReplicaGroupSize = pb.GetReplicaGroupSize() c.ReplicaGroupBackupSize = pb.GetReplicaGroupBackupSize() c.ReplicaAllowedStragglers = pb.GetReplicaAllowedStragglers() @@ -123,10 +127,12 @@ func (c *Runtime) ToProto() *pbRegistry.Runtime { if pb.Id, err = c.ID.MarshalBinary(); err != nil { panic(err) } - pb.Code = append([]byte{}, c.Code...) if pb.TeeHardware, err = c.TEEHardware.ToProto(); err != nil { panic(err) } + if pb.KeyManager, err = c.KeyManager.MarshalBinary(); err != nil { + panic(err) + } pb.ReplicaGroupSize = c.ReplicaGroupSize pb.ReplicaGroupBackupSize = c.ReplicaGroupBackupSize pb.ReplicaAllowedStragglers = c.ReplicaAllowedStragglers diff --git a/go/registry/api/runtime_test.go b/go/registry/api/runtime_test.go index 3a669925824..0610645e7bc 100644 --- a/go/registry/api/runtime_test.go +++ b/go/registry/api/runtime_test.go @@ -13,12 +13,12 @@ func TestSerialization(t *testing.T) { key, _, _ := ed25519.GenerateKey(nil) c := Runtime{ ID: signature.PublicKey(key), - Code: []byte{0x12, 0x13, 0x14, 0x15, 0x16}, TEEHardware: node.TEEHardwareIntelSGX, ReplicaGroupSize: 63, ReplicaGroupBackupSize: 72, ReplicaAllowedStragglers: 81, StorageGroupSize: 90, + KeyManager: signature.PublicKey(key), } cp := c.ToProto() diff --git a/go/registry/tests/tester.go b/go/registry/tests/tester.go index d155da40b6a..add6a26426b 100644 --- a/go/registry/tests/tester.go +++ b/go/registry/tests/tester.go @@ -650,7 +650,6 @@ func NewTestRuntime(seed []byte, entity *TestEntity) (*TestRuntime, error) { rt.Runtime = &api.Runtime{ ID: rt.PrivateKey.Public(), - Code: []byte("tu ne cede malis, sed contra audentior ito"), ReplicaGroupSize: 3, ReplicaGroupBackupSize: 5, ReplicaAllowedStragglers: 1, diff --git a/go/tendermint/apps/keymanager/api.go b/go/tendermint/apps/keymanager/api.go new file mode 100644 index 00000000000..97e9677bd6c --- /dev/null +++ b/go/tendermint/apps/keymanager/api.go @@ -0,0 +1,35 @@ +// Package keymanager implementes the key manager management applicaiton. +package keymanager + +import ( + "github.com/oasislabs/ekiden/go/tendermint/api" +) + +const ( + // TransactionTag is a unique byte to identify transactions for + // the key manager application. + TransactionTag byte = 0x07 + + // AppName is the ABCI application name. + AppName string = "999_keymanager" +) + +var ( + // TagStatusUpdate is an ABCI transaction tag for a key manager status + // update (value is a CBOR serialized key manager status). + TagStatusUpdate = []byte("keymanager.status") + + // QueryApp is a query for filtering transactions processed by the + // key manager application. + QueryApp = api.QueryForEvent(api.TagApplication, []byte(AppName)) +) + +const ( + // QueryGetStatus is a path for a GetStatus query. + QueryGetStatus = AppName + "/status" + + // QueryGetStatuses is a path for a GetStatuses query. + QueryGetStatuses = AppName + "/statuses" +) + +// TODO: Policy updates etc here. diff --git a/go/tendermint/apps/keymanager/keymanager.go b/go/tendermint/apps/keymanager/keymanager.go new file mode 100644 index 00000000000..589378cddf6 --- /dev/null +++ b/go/tendermint/apps/keymanager/keymanager.go @@ -0,0 +1,251 @@ +package keymanager + +import ( + "bytes" + "encoding/hex" + + "github.com/pkg/errors" + "github.com/tendermint/tendermint/abci/types" + + "github.com/oasislabs/ekiden/go/common/cbor" + "github.com/oasislabs/ekiden/go/common/logging" + "github.com/oasislabs/ekiden/go/common/node" + epochtime "github.com/oasislabs/ekiden/go/epochtime/api" + "github.com/oasislabs/ekiden/go/genesis" + "github.com/oasislabs/ekiden/go/keymanager/api" + registry "github.com/oasislabs/ekiden/go/registry/api" + "github.com/oasislabs/ekiden/go/tendermint/abci" + tmapi "github.com/oasislabs/ekiden/go/tendermint/api" + registryapp "github.com/oasislabs/ekiden/go/tendermint/apps/registry" +) + +type keymanagerApplication struct { + logger *logging.Logger + state *abci.ApplicationState + + timeSource epochtime.BlockBackend +} + +func (app *keymanagerApplication) Name() string { + return AppName +} + +func (app *keymanagerApplication) TransactionTag() byte { + return TransactionTag +} + +func (app *keymanagerApplication) Blessed() bool { + return false +} + +func (app *keymanagerApplication) GetState(height int64) (interface{}, error) { + return newImmutableState(app.state, height) +} + +func (app *keymanagerApplication) OnRegister(state *abci.ApplicationState, queryRouter abci.QueryRouter) { + app.state = state + + // Register query handlers. + queryRouter.AddRoute(QueryGetStatus, tmapi.QueryGetByIDRequest{}, app.queryGetStatus) + queryRouter.AddRoute(QueryGetStatuses, nil, app.queryGetStatuses) +} + +func (app *keymanagerApplication) OnCleanup() {} + +func (app *keymanagerApplication) SetOption(request types.RequestSetOption) types.ResponseSetOption { + return types.ResponseSetOption{} +} + +func (app *keymanagerApplication) CheckTx(ctx *abci.Context, tx []byte) error { + // TODO: Add policy support. + return errors.New("tendermint/keymanager: transactions not supported yet") +} + +func (app *keymanagerApplication) ForeignCheckTx(ctx *abci.Context, other abci.Application, tx []byte) error { + return nil +} + +func (app *keymanagerApplication) InitChain(ctx *abci.Context, request types.RequestInitChain, doc *genesis.Document) error { + // TODO: Implement support for this, once it is sensible to do so. + // Note: Registry app needs to be moved above the keymanager one. + return nil +} + +func (app *keymanagerApplication) BeginBlock(ctx *abci.Context, request types.RequestBeginBlock) error { + if changed, epoch := app.state.EpochChanged(app.timeSource); changed { + return app.onEpochChange(ctx, epoch) + } + return nil +} + +func (app *keymanagerApplication) DeliverTx(ctx *abci.Context, tx []byte) error { + // TODO: Add policy support. + return errors.New("tendermint/keymanager: transactions not supported yet") +} + +func (app *keymanagerApplication) ForeignDeliverTx(ctx *abci.Context, other abci.Application, tx []byte) error { + return nil +} + +func (app *keymanagerApplication) EndBlock(request types.RequestEndBlock) (types.ResponseEndBlock, error) { + return types.ResponseEndBlock{}, nil +} + +func (app *keymanagerApplication) FireTimer(ctx *abci.Context, timer *abci.Timer) {} + +func (app *keymanagerApplication) queryGetStatus(s interface{}, r interface{}) ([]byte, error) { + state := s.(*immutableState) + request := r.(*tmapi.QueryGetByIDRequest) + + status, err := state.GetStatus(request.ID) + if err != nil { + return nil, err + } + return cbor.Marshal(status), nil +} + +func (app *keymanagerApplication) queryGetStatuses(s interface{}, r interface{}) ([]byte, error) { + state := s.(*immutableState) + + statuses, err := state.GetStatuses() + if err != nil { + return nil, err + } + return cbor.Marshal(statuses), nil +} + +func (app *keymanagerApplication) onEpochChange(ctx *abci.Context, epoch epochtime.EpochTime) error { + tree := app.state.DeliverTxTree() + + // Query the runtime and node lists. + regState := registryapp.NewMutableState(tree) + runtimes, _ := regState.GetRuntimes() + nodes, _ := regState.GetNodes() + registry.SortNodeList(nodes) + + // Recalculate all the key manager statuses. + // + // Note: This assumes that once a runtime is registered, it never expires. + var toEmit []*api.Status + state := NewMutableState(app.state.DeliverTxTree()) + for _, rt := range runtimes { + if rt.Kind != registry.KindKeyManager { + continue + } + + var forceEmit bool + oldStatus, err := state.GetStatus(rt.ID) + if err != nil { + // This is fatal, as it suggests state corruption. + app.logger.Error("failed to query key manager status", + "id", rt.ID, + "err", err, + ) + return errors.Wrap(err, "failed to query key manager status") + } + if oldStatus == nil { + // This must be a new key manager runtime. + forceEmit = true + oldStatus = &api.Status{ + ID: rt.ID, + } + } + + newStatus := app.generateStatus(rt, oldStatus, nodes) + if forceEmit || !bytes.Equal(cbor.Marshal(oldStatus), cbor.Marshal(newStatus)) { + app.logger.Debug("status updated", + "id", newStatus.ID, + "is_initialized", newStatus.IsInitialized, + "is_secure", newStatus.IsSecure, + "checksum", hex.EncodeToString(newStatus.Checksum), + "nodes", newStatus.Nodes, + ) + + // Set, enqueue for emit. + state.setStatus(newStatus) + toEmit = append(toEmit, newStatus) + } + } + + // Emit the update event if required. + if len(toEmit) > 0 { + ctx.EmitTag(tmapi.TagApplication, []byte(app.Name())) + ctx.EmitTag(TagStatusUpdate, cbor.Marshal(toEmit)) + } + + return nil +} + +func (app *keymanagerApplication) generateStatus(kmrt *registry.Runtime, oldStatus *api.Status, nodes []*node.Node) *api.Status { + status := &api.Status{ + ID: kmrt.ID, + IsInitialized: oldStatus.IsInitialized, + IsSecure: oldStatus.IsSecure, + Checksum: oldStatus.Checksum, + } + + for _, n := range nodes { + if !n.HasRoles(node.RoleKeyManager) { + continue + } + + var nodeRt *node.Runtime + for _, rt := range n.Runtimes { + if rt.ID.Equal(kmrt.ID) { + nodeRt = rt + break + } + } + if nodeRt == nil { + continue + } + + initResponse, err := api.VerifyExtraInfo(kmrt, nodeRt) + if err != nil { + app.logger.Error("failed to validate ExtraInfo", + "err", err, + "id", kmrt.ID, + "node_id", n.ID, + ) + continue + } + + if status.IsInitialized { + // Already initialized. Check to see if it should be added to + // the node list. + if initResponse.IsSecure != status.IsSecure { + app.logger.Error("Security status mismatch for runtime", + "id", kmrt.ID, + "node_id", n.ID, + ) + continue + } + if !bytes.Equal(initResponse.Checksum, status.Checksum) { + app.logger.Error("Checksum mismatch for runtime", + "id", kmrt.ID, + "node_id", n.ID, + ) + continue + } + } else { + // Not initialized. The first node gets to be the source + // of truth, every other node will sync off it. + + // TODO: Sanity check IsSecure/Checksum. + status.IsSecure = initResponse.IsSecure + status.IsInitialized = true + status.Checksum = initResponse.Checksum + } + + status.Nodes = append(status.Nodes, n.ID) + } + + return status +} + +func New(timeSource epochtime.BlockBackend) abci.Application { + return &keymanagerApplication{ + logger: logging.GetLogger("tendermint/keymanager"), + timeSource: timeSource, + } +} diff --git a/go/tendermint/apps/keymanager/state.go b/go/tendermint/apps/keymanager/state.go new file mode 100644 index 00000000000..c4352c6eb40 --- /dev/null +++ b/go/tendermint/apps/keymanager/state.go @@ -0,0 +1,95 @@ +package keymanager + +import ( + "fmt" + + "github.com/tendermint/iavl" + + "github.com/oasislabs/ekiden/go/common/cbor" + "github.com/oasislabs/ekiden/go/common/crypto/signature" + "github.com/oasislabs/ekiden/go/keymanager/api" + "github.com/oasislabs/ekiden/go/tendermint/abci" +) + +const stateStatusMap = "keymanager/status/%s" + +var () + +type immutableState struct { + *abci.ImmutableState +} + +func (st *immutableState) GetStatuses() ([]*api.Status, error) { + rawStatuses, err := st.getStatusesRaw() + if err != nil { + return nil, err + } + + var statuses []*api.Status + for _, raw := range rawStatuses { + var status api.Status + if err = cbor.Unmarshal(raw, &status); err != nil { + return nil, err + } + statuses = append(statuses, &status) + } + + return statuses, nil +} + +func (st *immutableState) getStatusesRaw() ([][]byte, error) { + var rawVec [][]byte + st.Snapshot.IterateRangeInclusive( + []byte(fmt.Sprintf(stateStatusMap, "")), + []byte(fmt.Sprintf(stateStatusMap, abci.LastID)), + true, + func(key, value []byte, version int64) bool { + rawVec = append(rawVec, value) + return false + }, + ) + + return rawVec, nil +} + +func (st *immutableState) GetStatus(id signature.PublicKey) (*api.Status, error) { + _, raw := st.Snapshot.Get([]byte(fmt.Sprintf(stateStatusMap, id.String()))) + if raw == nil { + return nil, nil + } + + var status api.Status + if err := cbor.Unmarshal(raw, &status); err != nil { + return nil, err + } + return &status, nil +} + +func newImmutableState(state *abci.ApplicationState, version int64) (*immutableState, error) { + inner, err := abci.NewImmutableState(state, version) + if err != nil { + return nil, err + } + return &immutableState{inner}, nil +} + +// MutableState is a mutable key manager state wrapper. +type MutableState struct { + *immutableState + + tree *iavl.MutableTree +} + +func (st *MutableState) setStatus(status *api.Status) { + st.tree.Set([]byte(fmt.Sprintf(stateStatusMap, status.ID.String())), cbor.Marshal(status)) +} + +// NewMutableState creates a new mutable key manager state wrapper. +func NewMutableState(tree *iavl.MutableTree) *MutableState { + inner := &abci.ImmutableState{Snapshot: tree.ImmutableTree} + + return &MutableState{ + immutableState: &immutableState{inner}, + tree: tree, + } +} diff --git a/go/tendermint/apps/registry/registry.go b/go/tendermint/apps/registry/registry.go index 87abd2f46e5..bd4e052c800 100644 --- a/go/tendermint/apps/registry/registry.go +++ b/go/tendermint/apps/registry/registry.go @@ -195,7 +195,7 @@ func (app *registryApplication) FireTimer(*abci.Context, *abci.Timer) { func (app *registryApplication) onEpochChange(ctx *abci.Context, epoch epochtime.EpochTime) error { state := NewMutableState(app.state.DeliverTxTree()) - nodes, err := state.getNodes() + nodes, err := state.GetNodes() if err != nil { app.logger.Error("onEpochChange: failed to get nodes", "err", err, diff --git a/go/tendermint/apps/registry/state.go b/go/tendermint/apps/registry/state.go index 3638619d00f..45845e23d22 100644 --- a/go/tendermint/apps/registry/state.go +++ b/go/tendermint/apps/registry/state.go @@ -82,7 +82,7 @@ func (s *immutableState) GetNode(id signature.PublicKey) (*node.Node, error) { return &node, nil } -func (s *immutableState) getNodes() ([]*node.Node, error) { +func (s *immutableState) GetNodes() ([]*node.Node, error) { items, err := s.getAll(stateNodeMap, &node.Node{}) if err != nil { return nil, err @@ -98,7 +98,7 @@ func (s *immutableState) getNodes() ([]*node.Node, error) { } func (s *immutableState) getNodesRaw() ([]byte, error) { - nodes, err := s.getNodes() + nodes, err := s.GetNodes() if err != nil { return nil, err } diff --git a/go/tendermint/apps/roothash/roothash.go b/go/tendermint/apps/roothash/roothash.go index 4d6242395a4..b9e1ff95af0 100644 --- a/go/tendermint/apps/roothash/roothash.go +++ b/go/tendermint/apps/roothash/roothash.go @@ -170,7 +170,9 @@ func (app *rootHashApplication) onEpochChange(ctx *abci.Context, epoch epochtime runtimes, _ := regState.GetRuntimes() newDescriptors := make(map[signature.MapKey]*registry.Runtime) for _, v := range runtimes { - newDescriptors[v.ID.ToMapKey()] = v + if v.Kind == registry.KindCompute { + newDescriptors[v.ID.ToMapKey()] = v + } } // Explicitly query the beacon for the epoch. diff --git a/go/worker/common/host/sandboxed.go b/go/worker/common/host/sandboxed.go index 752a6e203a8..02b640a438e 100644 --- a/go/worker/common/host/sandboxed.go +++ b/go/worker/common/host/sandboxed.go @@ -82,7 +82,7 @@ var ( // OnProcessStart is the function called after a worker process has been // started. -type OnProcessStart func(*protocol.Protocol) error +type OnProcessStart func(*protocol.Protocol, *node.CapabilityTEE) error // ProxySpecification contains all necessary details about a single proxy. type ProxySpecification struct { @@ -742,6 +742,7 @@ func (h *sandboxedHost) spawnWorker() (*process, error) { // nolint: gocyclo switch h.teeHardware { case node.TEEHardwareInvalid: // No initialization needed. + p.capabilityTEE = nil case node.TEEHardwareIntelSGX: if err = h.initCapabilityTEESgx(p); err != nil { return nil, errors.Wrap(err, "worker: error initializing SGX CapabilityTEE") @@ -754,7 +755,7 @@ func (h *sandboxedHost) spawnWorker() (*process, error) { // nolint: gocyclo } if h.onProcessStart != nil { - if err = h.onProcessStart(proto); err != nil { + if err = h.onProcessStart(proto, p.capabilityTEE); err != nil { return nil, errors.Wrap(err, "worker: process post-start hook failed") } } diff --git a/go/worker/keymanager/keymanager.go b/go/worker/keymanager/keymanager.go index 9346973aef1..1edcd85c64e 100644 --- a/go/worker/keymanager/keymanager.go +++ b/go/worker/keymanager/keymanager.go @@ -3,9 +3,11 @@ package keymanager import ( "context" + "encoding/hex" "fmt" "io" "strings" + "sync" "time" "github.com/pkg/errors" @@ -20,6 +22,7 @@ import ( "github.com/oasislabs/ekiden/go/common/node" "github.com/oasislabs/ekiden/go/common/service" "github.com/oasislabs/ekiden/go/ias" + "github.com/oasislabs/ekiden/go/keymanager/api" workerCommon "github.com/oasislabs/ekiden/go/worker/common" "github.com/oasislabs/ekiden/go/worker/common/host" "github.com/oasislabs/ekiden/go/worker/common/host/protocol" @@ -33,6 +36,7 @@ const ( cfgRuntimeLoader = "worker.keymanager.runtime.loader" cfgRuntimeBinary = "worker.keymanager.runtime.binary" cfgRuntimeID = "worker.keymanager.runtime.id" + cfgMayGenerate = "worker.keymanager.may_generate" rpcCallTimeout = 5 * time.Second ) @@ -40,11 +44,15 @@ const ( var ( _ service.BackgroundService = (*worker)(nil) + errMalformedResponse = fmt.Errorf("worker/keymanager: malformed response from worker") + emptyRoot hash.Hash ) type worker struct { - enabled bool + sync.Mutex + + logger *logging.Logger ctx context.Context cancelCtx context.CancelFunc @@ -57,9 +65,12 @@ type worker struct { localStorage *host.LocalStorage grpc *grpc.Server - registration *registration.Registration + registration *registration.Registration + enclaveStatus *api.SignedInitResponse + backend api.Backend - logger *logging.Logger + enabled bool + mayGenerate bool } func (w *worker) Name() string { @@ -162,7 +173,7 @@ func (w *worker) callLocal(ctx context.Context, data []byte) ([]byte, error) { w.logger.Error("malformed response from worker", "response", response, ) - return nil, fmt.Errorf("worker/keymanager: malformed response from worker") + return nil, errMalformedResponse } return resp.Response, nil @@ -173,19 +184,40 @@ func (w *worker) callLocal(ctx context.Context, data []byte) ([]byte, error) { } } -func (w *worker) onProcessStart(proto *protocol.Protocol) error { +func (w *worker) onProcessStart(proto *protocol.Protocol, tee *node.CapabilityTEE) error { + // TODO: A more natural place to do this is probably on node + // registration, or better yet periodically based on the BFT + // component. + // Initialize the key manager. type InitRequest struct { - // TODO: At some point this needs the policy, checksum, peers, etc. + Checksum []byte `codec:"checksum"` + MayGenerate bool `codec:"may_generate"` } type InitCall struct { // nolint: maligned Method string `codec:"method"` Args InitRequest `codec:"args"` } + // Query the BFT component for the policy, checksum, peers (as available). + status, err := w.backend.GetStatus(w.ctx, w.runtimeID) + if err != nil { + if err != api.ErrNoSuchKeyManager { + w.logger.Error("failed to query key manger status", + "err", err, + "id", w.runtimeID, + ) + return err + } + status = &api.Status{} + } + call := InitCall{ Method: "init", - Args: InitRequest{}, + Args: InitRequest{ + Checksum: cbor.FixSliceForSerde(status.Checksum), + MayGenerate: w.mayGenerate, + }, } req := &protocol.Body{ WorkerLocalRPCCallRequest: &protocol.WorkerLocalRPCCallRequest{ @@ -194,26 +226,127 @@ func (w *worker) onProcessStart(proto *protocol.Protocol) error { }, } - resp, err := proto.Call(w.ctx, req) + response, err := proto.Call(w.ctx, req) if err != nil { - w.logger.Error("failed to initialize key manager enclave", + w.logger.Error("failed to initialize enclave", "err", err, ) return err } + if response.Error != nil { + w.logger.Error("error initializing enclave", + "err", response.Error.Message, + ) + return fmt.Errorf("worker/keymanager: error initializing enclave: %s", response.Error.Message) + } - // TODO: Do something clever with the response. - /* - type InitResponse struct { - IsSecure bool `codec:"is_secure"` - Checksum []byte `codec:"checksum"` + resp := response.WorkerLocalRPCCallResponse + if resp == nil { + w.logger.Error("malformed response initializing enclave", + "response", response, + ) + return errMalformedResponse + } + + innerResp, err := extractMessageResponsePayload(resp.Response) + if err != nil { + w.logger.Error("failed to extract rpc response payload", + "err", err, + ) + return errors.Wrap(err, "worker/keymanager: failed to extract rpc response payload") + } + + var signedInitResp api.SignedInitResponse + if err = cbor.Unmarshal(innerResp, &signedInitResp); err != nil { + w.logger.Error("failed to parse response initializing enclave", + "err", err, + "response", innerResp, + ) + return errors.Wrap(err, "worker/keymanager: failed to parse response initializing enclave") + } + + // Validate the signature. + if tee != nil { + var signingKey signature.PublicKey + + switch tee.Hardware { + case node.TEEHardwareInvalid: + signingKey = api.TestPublicKey + case node.TEEHardwareIntelSGX: + signingKey = tee.RAK + default: + return fmt.Errorf("worker/keymanager: unknown TEE hardware: %v", tee.Hardware) } - */ - _ = resp + + if err = signedInitResp.Verify(signingKey); err != nil { + return errors.Wrap(err, "worker/keymanager: failed to validate initialziation response signature") + } + } + + if !signedInitResp.InitResponse.IsSecure { + w.logger.Warn("Key manager enclave build is INSECURE") + } + + w.logger.Info("Key manager initialized", + "checksum", hex.EncodeToString(signedInitResp.InitResponse.Checksum), + ) + + // Cache the key manager enclave status. + w.Lock() + defer w.Unlock() + + w.enclaveStatus = &signedInitResp return nil } +func extractMessageResponsePayload(raw []byte) ([]byte, error) { + // Because of how serde_cbor serializes unit enums, simply de-serializing + // the response into a struct is not possible. Do this the hard way. + // + // This could alternatively be done by changing the rust side, or maybe + // this should be a general protocol helper, but this is probably the + // only place that will need such a thing. + // + // See: runtime/src/rcp/types.rs + type MessageResponseBody struct { + Status string `codec:""` + Value interface{} `codec:""` + } + type MessageResponse struct { + Type string `codec:""` + Inner struct { + Body MessageResponseBody `codec:"body"` + } `codec:""` + } + + var msg MessageResponse + if err := cbor.Unmarshal(raw, &msg); err != nil { + return nil, errors.Wrap(err, "malformed message envelope") + } + + if mType := msg.Type; mType != "Response" { + return nil, fmt.Errorf("message is not a response: '%s'", mType) + } + + switch msg.Inner.Body.Status { + case "Success": + case "Error": + if msg.Inner.Body.Value == nil { + return nil, fmt.Errorf("unknown rpc response failure (nil)") + } + mErr, ok := msg.Inner.Body.Value.(string) + if !ok { + return nil, fmt.Errorf("unknown rpc response failure (%T)", msg.Inner.Body.Value) + } + return nil, fmt.Errorf("rpc failure: '%s'", mErr) + default: + return nil, fmt.Errorf("unknown rpc response status: '%s'", msg.Inner.Body.Status) + } + + return cbor.Marshal(msg.Inner.Body.Value), nil +} + func (w *worker) onNodeRegistration(n *node.Node) error { tee, err := w.workerHost.WaitForCapabilityTEE(w.ctx) if err != nil { @@ -223,11 +356,21 @@ func (w *worker) onNodeRegistration(n *node.Node) error { return err } + // Pull out the enclave status to be appended to the node registration. + w.Lock() + enclaveStatus := w.enclaveStatus + w.Unlock() + if enclaveStatus == nil { + w.logger.Error("enclave not initialized") + return fmt.Errorf("worker/keymanager: enclave not initialized") + } + // Add the key manager runtime to the node descriptor. Done here instead // of in the registration's generic handler since the registration handler // only knows about normal runtimes. rtDesc := &node.Runtime{ - ID: w.runtimeID, + ID: w.runtimeID, + ExtraInfo: cbor.Marshal(enclaveStatus), } rtDesc.Capabilities.TEE = tee n.Runtimes = append(n.Runtimes, rtDesc) @@ -238,7 +381,7 @@ func (w *worker) onNodeRegistration(n *node.Node) error { } // New constructs a new key manager worker. -func New(dataDir string, ias *ias.IAS, grpc *grpc.Server, r *registration.Registration, workerCommonCfg *workerCommon.Config) (service.BackgroundService, bool, error) { +func New(dataDir string, ias *ias.IAS, grpc *grpc.Server, r *registration.Registration, workerCommonCfg *workerCommon.Config, backend api.Backend) (service.BackgroundService, bool, error) { var teeHardware node.TEEHardware s := viper.GetString(cfgTEEHardware) switch strings.ToLower(s) { @@ -255,7 +398,7 @@ func New(dataDir string, ias *ias.IAS, grpc *grpc.Server, r *registration.Regist ctx, cancelFn := context.WithCancel(context.Background()) w := &worker{ - enabled: viper.GetBool(cfgEnabled), + logger: logging.GetLogger("worker/keymanager"), ctx: ctx, cancelCtx: cancelFn, stopCh: make(chan struct{}), @@ -263,7 +406,9 @@ func New(dataDir string, ias *ias.IAS, grpc *grpc.Server, r *registration.Regist initCh: make(chan struct{}), grpc: grpc, registration: r, - logger: logging.GetLogger("worker/keymanager"), + backend: backend, + enabled: viper.GetBool(cfgEnabled), + mayGenerate: viper.GetBool(cfgMayGenerate), } if w.enabled { @@ -315,6 +460,7 @@ func RegisterFlags(cmd *cobra.Command) { cmd.Flags().String(cfgRuntimeLoader, "", "Path to key manager worker process binary") cmd.Flags().String(cfgRuntimeBinary, "", "Path to key manager runtime binary") cmd.Flags().String(cfgRuntimeID, "", "Key manager Runtime ID") + cmd.Flags().Bool(cfgMayGenerate, false, "Key manager may generate new master secret") } for _, v := range []string{ @@ -324,6 +470,7 @@ func RegisterFlags(cmd *cobra.Command) { cfgRuntimeLoader, cfgRuntimeBinary, cfgRuntimeID, + cfgMayGenerate, } { viper.BindPFlag(v, cmd.Flags().Lookup(v)) // nolint: errcheck } diff --git a/go/worker/registration/registration.go b/go/worker/registration/registration.go index ba052629c86..9555406a918 100644 --- a/go/worker/registration/registration.go +++ b/go/worker/registration/registration.go @@ -15,6 +15,7 @@ import ( "github.com/oasislabs/ekiden/go/common/identity" "github.com/oasislabs/ekiden/go/common/logging" "github.com/oasislabs/ekiden/go/common/node" + "github.com/oasislabs/ekiden/go/ekiden/cmd/common/flags" epochtime "github.com/oasislabs/ekiden/go/epochtime/api" registry "github.com/oasislabs/ekiden/go/registry/api" workerCommon "github.com/oasislabs/ekiden/go/worker/common" @@ -206,7 +207,9 @@ func getEntityPrivKey(dataDir string) (*signature.PrivateKey, error) { err error ) - if f := viper.GetString(cfgEntityPrivateKey); f != "" { + if flags.DebugTestEntity() { + _, entityPrivKey, err = entity.TestEntity() + } else if f := viper.GetString(cfgEntityPrivateKey); f != "" { // Load PEM. entityPrivKey = new(signature.PrivateKey) if err = entityPrivKey.LoadPEM(f, nil); err != nil { diff --git a/keymanager-client/src/client.rs b/keymanager-client/src/client.rs index 78fc8a91ce6..44821d75769 100644 --- a/keymanager-client/src/client.rs +++ b/keymanager-client/src/client.rs @@ -34,8 +34,6 @@ struct Inner { get_or_create_secret_keys_cache: RwLock>, /// Local cache for the get_public_key KeyManager endpoint. get_public_key_cache: RwLock>, - /// Local cache for the get_long_term_public_key KeyManager endpoint. - get_long_term_public_key_cache: RwLock>, } /// A key manager client which talks to a remote key manager enclave. @@ -51,7 +49,6 @@ impl RemoteClient { rpc_client: Client::new(client), get_or_create_secret_keys_cache: RwLock::new(LruCache::new(keys_cache_sizes)), get_public_key_cache: RwLock::new(LruCache::new(keys_cache_sizes)), - get_long_term_public_key_cache: RwLock::new(LruCache::new(keys_cache_sizes)), }), } } @@ -108,10 +105,6 @@ impl KeyManagerClient for RemoteClient { let mut cache = self.inner.get_public_key_cache.write().unwrap(); cache.clear(); drop(cache); - - let mut cache = self.inner.get_long_term_public_key_cache.write().unwrap(); - cache.clear(); - drop(cache); } fn get_or_create_keys(&self, ctx: Context, contract_id: ContractId) -> BoxFuture { @@ -163,31 +156,12 @@ impl KeyManagerClient for RemoteClient { ) } - fn get_long_term_public_key( - &self, - ctx: Context, - contract_id: ContractId, - ) -> BoxFuture> { - let mut cache = self.inner.get_long_term_public_key_cache.write().unwrap(); - if let Some(key) = cache.get(&contract_id) { - return Box::new(future::ok(Some(key.clone()))); - } - - // No entry in cache, fetch from key manager. - let inner = self.inner.clone(); + fn replicate_master_secret(&self, ctx: Context) -> BoxFuture> { Box::new( self.inner .rpc_client - .get_long_term_public_key(ctx, RequestIds::new(inner.runtime_id, contract_id)) - .and_then(move |key| match key { - Some(key) => { - let mut cache = inner.get_long_term_public_key_cache.write().unwrap(); - cache.put(contract_id, key.clone()); - - Ok(Some(key)) - } - None => Ok(None), - }), + .replicate_master_secret(ctx, ReplicateRequest {}) + .and_then(move |rsp| Ok(Some(rsp.master_secret))), ) } } diff --git a/keymanager-client/src/lib.rs b/keymanager-client/src/lib.rs index cea12e28807..e069cf0b3ec 100644 --- a/keymanager-client/src/lib.rs +++ b/keymanager-client/src/lib.rs @@ -37,12 +37,8 @@ pub trait KeyManagerClient: Send + Sync { contract_id: ContractId, ) -> BoxFuture>; - /// Get long-term public key for a contract. - fn get_long_term_public_key( - &self, - ctx: Context, - contract_id: ContractId, - ) -> BoxFuture>; + /// Get a copy of the master secret for replication. + fn replicate_master_secret(&self, ctx: Context) -> BoxFuture>; } impl KeyManagerClient for Arc { @@ -62,12 +58,8 @@ impl KeyManagerClient for Arc { KeyManagerClient::get_public_key(&**self, ctx, contract_id) } - fn get_long_term_public_key( - &self, - ctx: Context, - contract_id: ContractId, - ) -> BoxFuture> { - KeyManagerClient::get_long_term_public_key(&**self, ctx, contract_id) + fn replicate_master_secret(&self, ctx: Context) -> BoxFuture> { + KeyManagerClient::replicate_master_secret(&**self, ctx) } } diff --git a/keymanager-client/src/mock.rs b/keymanager-client/src/mock.rs index fd3b8179f4a..2bbfc2fc49c 100644 --- a/keymanager-client/src/mock.rs +++ b/keymanager-client/src/mock.rs @@ -48,25 +48,14 @@ impl KeyManagerClient for MockClient { let keys = self.keys.lock().unwrap(); let result = keys.get(&contract_id).map(|ck| SignedPublicKey { key: ck.input_keypair.get_pk(), - timestamp: Some(0), + checksum: vec![], signature: Signature::default(), }); Box::new(future::ok(result)) } - fn get_long_term_public_key( - &self, - _ctx: Context, - contract_id: ContractId, - ) -> BoxFuture> { - let keys = self.keys.lock().unwrap(); - let result = keys.get(&contract_id).map(|ck| SignedPublicKey { - key: ck.input_keypair.get_pk(), - timestamp: Some(0), - signature: Signature::default(), - }); - - Box::new(future::ok(result)) + fn replicate_master_secret(&self, _ctx: Context) -> BoxFuture> { + unimplemented!(); } } diff --git a/keymanager-runtime/Cargo.toml b/keymanager-runtime/Cargo.toml index d42844aea56..9b00c395d8c 100644 --- a/keymanager-runtime/Cargo.toml +++ b/keymanager-runtime/Cargo.toml @@ -12,15 +12,16 @@ threads = 2 [dependencies] ekiden-runtime = { path = "../runtime" } ekiden-keymanager-api = { path = "./api" } +ekiden-keymanager-client = { path = "../keymanager-client" } failure = "0.1.5" lazy_static = "1.3.0" lru = "0.1.15" # TODO: Change to released version when 0.10.0 is released. serde_cbor = { git = "https://github.com/pyfisch/cbor", rev = "114ecaeac53799d0bf81ca8d1b980c7c419d76fe" } -byteorder = "1.3.1" io-context = "0.2.0" rand = "0.6.5" sgx-isa = { version = "0.2.0", features = ["sgxstd"] } sp800-185 = "0.2.0" +tiny-keccak = "1.4.2" x25519-dalek = "0.5.1" zeroize = "0.6" diff --git a/keymanager-runtime/api/src/api.rs b/keymanager-runtime/api/src/api.rs index 2b6bc649599..75037be22a5 100644 --- a/keymanager-runtime/api/src/api.rs +++ b/keymanager-runtime/api/src/api.rs @@ -17,7 +17,12 @@ impl_bytes!(MasterSecret, 32, "A 256 bit master secret."); /// Key manager initialization request. #[derive(Clone, Serialize, Deserialize)] pub struct InitRequest { - // TODO: Policy, peers, checksum, etc. + /// True iff the enclave may generate a new master secret. + pub may_generate: bool, + /// Checksum for validating replication. + #[serde(with = "serde_bytes")] + pub checksum: Vec, + // TODO: Policy. } /// Key manager initialization response. @@ -30,6 +35,30 @@ pub struct InitResponse { pub checksum: Vec, } +/// Context used for th einit response signature. +pub const INIT_RESPONSE_CONTEXT: [u8; 8] = *b"EkKmIniR"; + +/// Signed InitResponse. +#[derive(Clone, Serialize, Deserialize)] +pub struct SignedInitResponse { + /// InitResponse. + pub init_response: InitResponse, + /// Sign(init_response). + pub signature: Signature, +} + +/// Key manager replication request. +#[derive(Clone, Serialize, Deserialize)] +pub struct ReplicateRequest { + // Empty. +} + +/// Key manager replication response. +#[derive(Clone, Serialize, Deserialize)] +pub struct ReplicateResponse { + pub master_secret: MasterSecret, +} + /// Request runtime/contract id tuple. #[derive(Clone, Serialize, Deserialize)] pub struct RequestIds { @@ -61,6 +90,9 @@ pub struct ContractKey { pub input_keypair: InputKeyPair, /// State encryption key pub state_key: StateKey, + /// Checksum of the key manager state. + #[serde(with = "serde_bytes")] + pub checksum: Vec, } impl ContractKey { @@ -77,25 +109,28 @@ impl ContractKey { PublicKey(*pk.as_bytes()), PrivateKey(sk.to_bytes()), state_key, + vec![], ) } /// Create a set of `ContractKey`. - pub fn new(pk: PublicKey, sk: PrivateKey, k: StateKey) -> Self { + pub fn new(pk: PublicKey, sk: PrivateKey, k: StateKey, sum: Vec) -> Self { Self { input_keypair: InputKeyPair { pk, sk }, state_key: k, + checksum: sum, } } /// Create a set of `ContractKey` with only the public key. - pub fn from_public_key(k: PublicKey) -> Self { + pub fn from_public_key(k: PublicKey, sum: Vec) -> Self { Self { input_keypair: InputKeyPair { pk: k, sk: PrivateKey::default(), }, state_key: StateKey::default(), + checksum: sum, } } } @@ -130,9 +165,10 @@ pub const PUBLIC_KEY_CONTEXT: [u8; 8] = *b"EkKmPubK"; pub struct SignedPublicKey { /// Public key. pub key: PublicKey, - /// Timestamp representing the expiry of the returned key. - pub timestamp: Option, - /// Sign(sk, (key || timestamp)) from the key manager. + /// Checksum of the key manager state. + #[serde(with = "serde_bytes")] + pub checksum: Vec, + /// Sign(sk, (key || checksum)) from the key manager. pub signature: Signature, } @@ -141,10 +177,14 @@ pub struct SignedPublicKey { pub enum KeyManagerError { #[fail(display = "client session is not authenticated")] NotAuthenticated, + #[fail(display = "client session authentication is invalid")] + InvalidAuthentication, #[fail(display = "key manager is not initialized")] NotInitialized, - #[fail(display = "key manager is already initialized")] - AlreadyInitialized, + #[fail(display = "key manager state corrupted")] + StateCorrupted, + #[fail(display = "key manager replication required")] + ReplicationRequired, } runtime_api! { @@ -152,5 +192,5 @@ runtime_api! { pub fn get_public_key(RequestIds) -> Option; - pub fn get_long_term_public_key(RequestIds) -> Option; + pub fn replicate_master_secret(ReplicateRequest) -> ReplicateResponse; } diff --git a/keymanager-runtime/src/kdf.rs b/keymanager-runtime/src/kdf.rs index 18f2caed33c..462925718f6 100644 --- a/keymanager-runtime/src/kdf.rs +++ b/keymanager-runtime/src/kdf.rs @@ -1,20 +1,24 @@ ///! Key Derivation Function use std::sync::{Arc, RwLock}; -use byteorder::{BigEndian, WriteBytesExt}; use failure::Fallible; +use io_context::Context as IoContext; use lazy_static::lazy_static; use lru::LruCache; use rand::{rngs::OsRng, Rng}; +use serde_cbor; use sgx_isa::Keypolicy; use sp800_185::{CShake, KMac}; +use tiny_keccak::sha3_256; use x25519_dalek; use zeroize::Zeroize; use ekiden_keymanager_api::{ - ContractKey, KeyManagerError, MasterSecret, PrivateKey, PublicKey, RequestIds, SignedPublicKey, - StateKey, PUBLIC_KEY_CONTEXT, + ContractKey, InitRequest, InitResponse, KeyManagerError, MasterSecret, PrivateKey, PublicKey, + ReplicateResponse, RequestIds, SignedInitResponse, SignedPublicKey, StateKey, + INIT_RESPONSE_CONTEXT, PUBLIC_KEY_CONTEXT, }; +use ekiden_keymanager_client::KeyManagerClient; use ekiden_runtime::{ common::{ crypto::{ @@ -23,7 +27,9 @@ use ekiden_runtime::{ }, sgx::egetkey::egetkey, }, + executor::Executor, rpc::Context as RpcContext, + runtime_context, storage::StorageContext, BUILD_INFO, }; @@ -68,6 +74,10 @@ const MASTER_SECRET_STORAGE_KEY: &'static [u8] = b"keymanager_master_secret"; const MASTER_SECRET_STORAGE_SIZE: usize = 32 + TAG_SIZE + NONCE_SIZE; const MASTER_SECRET_SEAL_CONTEXT: &'static [u8] = b"Ekiden Keymanager Seal master secret v0"; +pub(crate) struct Context { + pub km_client: Arc, +} + /// Kdf, which derives key manager keys from a master secret. pub struct Kdf { inner: RwLock, @@ -82,7 +92,15 @@ struct Inner { } impl Inner { + fn reset(&mut self) { + self.master_secret = None; + self.checksum = None; + self.signer = None; + self.cache.clear(); + } + fn derive_contract_key(&self, req: &RequestIds) -> Fallible { + let checksum = self.get_checksum()?; let mut contract_secret = self.derive_contract_secret(req)?; // Note: The `name` parameter for cSHAKE is reserved for use by NIST. @@ -106,6 +124,7 @@ impl Inner { PublicKey(*pk.as_bytes()), PrivateKey(sk.to_bytes()), state_key, + checksum, )) } @@ -117,8 +136,7 @@ impl Inner { let mut k = [0u8; 32]; - // KMAC256(master_secret, MRENCLAVE_km || runtimeID || contractID, 32, "ekiden-derive-runtime-secret") - // XXX: We don't pass in the MRENCLAVE yet. + // KMAC256(master_secret, runtimeID || contractID, 32, "ekiden-derive-runtime-secret") let mut f = KMac::new_kmac256(master_secret.as_ref(), &RUNTIME_KDF_CUSTOM); f.update(req.runtime_id.as_ref()); f.update(req.contract_id.as_ref()); @@ -126,6 +144,13 @@ impl Inner { Ok(k.to_vec()) } + + fn get_checksum(&self) -> Fallible> { + match self.checksum.as_ref() { + Some(checksum) => Ok(checksum.clone()), + None => Err(KeyManagerError::NotInitialized.into()), + } + } } impl Kdf { @@ -147,47 +172,132 @@ impl Kdf { /// Initialize the KDF internal state. #[cfg_attr(not(target_env = "sgx"), allow(unused))] - pub fn init(&self, ctx: &RpcContext) -> Fallible> { + pub fn init( + &self, + req: &InitRequest, + ctx: &mut RpcContext, + //client: Arc, + ) -> Fallible { let mut inner = self.inner.write().unwrap(); - // Initialization should be idempotent. - if inner.master_secret.is_none() { + // How initialization proceeds depends on the state and the request. + // + // WARNING: Once a master secret has been persisted to disk, it is + // intended that manual intervention by the operator is required to + // remove/alter it. + if inner.master_secret.is_some() { + // A master secret is set. This enclave has initialized successfully + // at least once. + + let checksum = inner.get_checksum()?; + if req.checksum.len() > 0 && req.checksum != checksum { + // The init request provided a checksum and there was a mismatch. + // The global key manager state disagrees with the enclave state. + inner.reset(); + return Err(KeyManagerError::StateCorrupted.into()); + } + } else if req.checksum.len() > 0 { + // A master secret is not set, and there is a checksum in the + // request. An enclave somewhere, has initialized at least + // once. + + // Attempt to load the master secret. + let (master_secret, did_replicate) = match Self::load_master_secret() { + Some(master_secret) => (master_secret, false), + None => { + // Couldn't load, fetch the master secret from another + // enclave instance. + + let rctx = runtime_context!(ctx, Context); + + let result = rctx + .km_client + .replicate_master_secret(IoContext::create_child(&ctx.io_ctx)); + let master_secret = + Executor::with_current(|executor| executor.block_on(result))?; + (master_secret.unwrap(), true) + } + }; + + let checksum = Self::checksum_master_secret(&master_secret); + if req.checksum != checksum { + // We either loaded or replicated something that does + // not match the rest of the world. + inner.reset(); + return Err(KeyManagerError::StateCorrupted.into()); + } + + // The loaded/replicated master secret is consistent with the rest + // of the world. Ok to proceed. + if did_replicate { + Self::save_master_secret(&master_secret); + } + inner.master_secret = Some(master_secret); + inner.checksum = Some(checksum); + } else { + // A master secret is not set, and there is no checksum in the + // request. Either this key manager instance has never been + // initialized, or our view of the external state is not current. + + // Attempt to load the master secret, the caller may just be + // behind the rest of the world. let master_secret = match Self::load_master_secret() { Some(master_secret) => master_secret, - None => Self::generate_master_secret(), + None => { + // Unable to load, perhaps we can generate? + if !req.may_generate { + return Err(KeyManagerError::ReplicationRequired.into()); + } + + Self::generate_master_secret() + } }; - inner.master_secret = Some(MasterSecret::from(master_secret)); + + // Loaded or generated a master secret. There is no checksum to + // compare against, but that is expected when bootstrapping or + // lagging. + inner.checksum = Some(Self::checksum_master_secret(&master_secret)); + inner.master_secret = Some(master_secret); } - // (re)-generate the checksum, based on the possibly updated RAK. - let mut k = [0u8; 32]; - let mut f = KMac::new_kmac256( - inner.master_secret.as_ref().unwrap().as_ref(), - &CHECKSUM_CUSTOM, - ); + // If we make it this far, we have a master secret and checksum + // that either matches the global state, will become the global + // state, or should become the global state (rare). + // + // It is ok to generate a response. + // The RAK (signing key) may have changed since the last init call. #[cfg(target_env = "sgx")] { let signer: Arc = ctx.rak.clone(); inner.signer = Some(signer); - - f.update(ctx.rak.public_key().unwrap().as_ref()); } - #[cfg(not(target_env = "sgx"))] { let priv_key = Arc::new(signature::PrivateKey::from_pkcs8(INSECURE_SIGNING_KEY_PKCS8).unwrap()); - f.update(priv_key.public_key().as_ref()); - let signer: Arc = priv_key; inner.signer = Some(signer); } - f.finalize(&mut k); - inner.checksum = Some(k.to_vec()); - return Ok(inner.checksum.as_ref().unwrap().clone()); + // Build the response and sign it with the RAK. + let init_response = InitResponse { + is_secure: BUILD_INFO.is_secure, + checksum: inner.checksum.as_ref().unwrap().clone(), + }; + + let body = serde_cbor::to_vec(&init_response)?; + let signature = inner + .signer + .as_ref() + .unwrap() + .sign(&INIT_RESPONSE_CONTEXT, &body)?; + + Ok(SignedInitResponse { + init_response, + signature, + }) } // Get or create keys. @@ -214,17 +324,13 @@ impl Kdf { } /// Signs the public key using the key manager key. - pub fn sign_public_key( - &self, - key: PublicKey, - timestamp: Option, - ) -> Fallible { + pub fn sign_public_key(&self, key: PublicKey) -> Fallible { let mut body = key.as_ref().to_vec(); - if let Some(ts) = timestamp { - body.write_u64::(ts).unwrap(); - } let inner = self.inner.read().unwrap(); + let checksum = inner.get_checksum()?; + body.extend_from_slice(&checksum); + let signer = match inner.signer.as_ref() { Some(rak) => rak, None => return Err(KeyManagerError::NotInitialized.into()), @@ -233,12 +339,22 @@ impl Kdf { Ok(SignedPublicKey { key, - timestamp, + checksum, signature, }) } - fn load_master_secret() -> Option> { + // Replicate master secret. + pub fn replicate_master_secret(&self) -> Fallible { + let inner = self.inner.read().unwrap(); + + match inner.master_secret { + Some(master_secret) => Ok(ReplicateResponse { master_secret }), + None => Err(KeyManagerError::NotInitialized.into()), + } + } + + fn load_master_secret() -> Option { let ciphertext = StorageContext::with_current(|_mkvs, untrusted_local| { untrusted_local.get(MASTER_SECRET_STORAGE_KEY.to_vec()) }) @@ -263,21 +379,17 @@ impl Kdf { .open(&nonce, ciphertext.to_vec(), vec![]) .expect("persisted state is corrupted"); - Some(plaintext) + Some(MasterSecret::from(plaintext)) } - fn generate_master_secret() -> Vec { + fn save_master_secret(master_secret: &MasterSecret) { let mut rng = OsRng::new().unwrap(); - // TODO: Support static keying for debugging. - let mut master_secret = [0u8; 32]; - rng.fill(&mut master_secret); - // Encrypt the master secret. let mut nonce = [0u8; NONCE_SIZE]; rng.fill(&mut nonce); let d2 = Self::new_d2(); - let mut ciphertext = d2.seal(&nonce, master_secret.to_vec(), vec![]); + let mut ciphertext = d2.seal(&nonce, master_secret.as_ref().to_vec(), vec![]); ciphertext.extend_from_slice(&nonce); // Persist the encrypted master secret. @@ -285,8 +397,28 @@ impl Kdf { untrusted_local.insert(MASTER_SECRET_STORAGE_KEY.to_vec(), ciphertext) }) .expect("failed to persist master secret"); + } + + fn generate_master_secret() -> MasterSecret { + let mut rng = OsRng::new().unwrap(); + + // TODO: Support static keying for debugging. + let mut master_secret = [0u8; 32]; + rng.fill(&mut master_secret); + let master_secret = MasterSecret::from(master_secret.to_vec()); + + Self::save_master_secret(&master_secret); + + master_secret + } + + fn checksum_master_secret(master_secret: &MasterSecret) -> Vec { + let mut tmp = master_secret.as_ref().to_vec().clone(); + tmp.extend_from_slice(&CHECKSUM_CUSTOM); + let checksum = sha3_256(&tmp); + tmp.zeroize(); - master_secret.to_vec() + checksum.to_vec() } fn new_d2() -> DeoxysII { diff --git a/keymanager-runtime/src/main.rs b/keymanager-runtime/src/main.rs index ef0240eda9e..05668db1f46 100644 --- a/keymanager-runtime/src/main.rs +++ b/keymanager-runtime/src/main.rs @@ -1,5 +1,5 @@ -extern crate byteorder; extern crate ekiden_keymanager_api; +extern crate ekiden_keymanager_client; extern crate ekiden_runtime; extern crate failure; extern crate io_context; @@ -8,45 +8,41 @@ extern crate lru; extern crate rand; extern crate serde_cbor; extern crate sp800_185; +extern crate tiny_keccak; extern crate x25519_dalek; extern crate zeroize; +use std::{str::FromStr, sync::Arc}; + mod kdf; mod methods; use failure::Fallible; use ekiden_keymanager_api::*; +use ekiden_keymanager_client::RemoteClient; use ekiden_runtime::{ + common::{runtime::RuntimeId, sgx::avr}, + rak::RAK, register_runtime_rpc_methods, rpc::{ dispatcher::{Method as RpcMethod, MethodDescriptor as RpcMethodDescriptor}, Context as RpcContext, }, - RpcDemux, RpcDispatcher, TxnDispatcher, BUILD_INFO, + Protocol, RpcDemux, RpcDispatcher, TxnDispatcher, }; use self::kdf::Kdf; /// Initialize the Kdf. -fn init(_req: &InitRequest, ctx: &mut RpcContext) -> Fallible { - // TODO: Based on the InitRequest, and persisted state (if any): - // * Load the persisted state. - // * Generate a new master secret. - // * Replicate the master secret. - - let checksum = Kdf::global().init(&ctx)?; - - Ok(InitResponse { - is_secure: BUILD_INFO.is_secure, - checksum, - }) +fn init_kdf(req: &InitRequest, ctx: &mut RpcContext) -> Fallible { + Kdf::global().init(&req, ctx) } fn main() { // Initializer. - let init = |_: &_, - _: &_, + let init = |protocol: &Arc, + rak: &Arc, _rpc_demux: &mut RpcDemux, rpc: &mut RpcDispatcher, _txn: &mut TxnDispatcher| { @@ -64,10 +60,36 @@ fn main() { RpcMethodDescriptor { name: "init".to_string(), }, - init, + init_kdf, ), true, ); + + // HACK: There is no nice way of passing in the runtime ID at compile + // time yet. + let runtime_id = + RuntimeId::from_str("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + .unwrap(); + + // We will only replicate from ourselves for now, once migration + // support is required, this needs to change somehow. + let mr_enclave = match avr::get_enclave_identity() { + Some(id) => Some(id.mr_enclave), + None => None, + }; + let km_client = Arc::new(RemoteClient::new_runtime( + runtime_id, + mr_enclave, + protocol.clone(), + rak.clone(), + 1, // Not used, doesn't matter. + )); + + rpc.set_context_initializer(move |ctx: &mut RpcContext| { + ctx.runtime = Box::new(kdf::Context { + km_client: km_client.clone(), + }) + }); }; // Start the runtime. diff --git a/keymanager-runtime/src/methods.rs b/keymanager-runtime/src/methods.rs index 0766b9b8734..d935906ce52 100644 --- a/keymanager-runtime/src/methods.rs +++ b/keymanager-runtime/src/methods.rs @@ -3,12 +3,10 @@ use ekiden_keymanager_api::*; use ekiden_runtime::rpc::Context as RpcContext; use failure::Fallible; -use crate::kdf::Kdf; +#[cfg(target_env = "sgx")] +use ekiden_runtime::common::sgx::avr::get_enclave_identity; -// We have not implemented key-expiry yet. So give all keys the maximum expiry of 2^53-1 -// because (as a convenience) that is the maximum safe number to use in JavaScript and its -// more than enough to account for enough time. -static MAX_KEY_TIMESTAMP: u64 = (1 << 53) - 1; +use crate::kdf::Kdf; /// See `Kdf::get_or_create_keys`. pub fn get_or_create_keys(req: &RequestIds, ctx: &mut RpcContext) -> Fallible { @@ -33,17 +31,41 @@ pub fn get_public_key( ) -> Fallible> { let kdf = Kdf::global(); let pk = kdf.get_public_key(req)?; - pk.map_or(Ok(None), |pk| { - Ok(Some(kdf.sign_public_key(pk, Some(MAX_KEY_TIMESTAMP))?)) - }) + pk.map_or(Ok(None), |pk| Ok(Some(kdf.sign_public_key(pk)?))) } -/// See `Kdf::get_public_key`. -pub fn get_long_term_public_key( - req: &RequestIds, - _ctx: &mut RpcContext, -) -> Fallible> { - let kdf = Kdf::global(); - let pk = kdf.get_public_key(req)?; - pk.map_or(Ok(None), |pk| Ok(Some(kdf.sign_public_key(pk, None)?))) +/// See `Kdf::replicate_master_secret`. +#[cfg_attr(not(target_env = "sgx"), allow(unused))] +pub fn replicate_master_secret( + _req: &ReplicateRequest, + ctx: &mut RpcContext, +) -> Fallible { + #[cfg(target_env = "sgx")] + { + can_replicate(ctx)?; + } + + Kdf::global().replicate_master_secret() +} + +#[cfg(target_env = "sgx")] +fn can_replicate(ctx: &mut RpcContext) -> Fallible<()> { + let si = ctx.session_info.as_ref(); + let si = si.ok_or(KeyManagerError::NotAuthenticated)?; + + let their_id = &si.authenticated_avr; + + let our_id = match get_enclave_identity() { + Some(id) => id, + None => return Err(KeyManagerError::NotInitialized.into()), + }; + + // Always support replication to other key manager enclave instances. + if our_id.mr_signer == their_id.mr_signer && our_id.mr_enclave == their_id.mr_enclave { + return Ok(()); + } + + // TODO: Check the dynamic policy (for migration support). + + Err(KeyManagerError::InvalidAuthentication.into()) } diff --git a/runtime/src/dispatcher.rs b/runtime/src/dispatcher.rs index 538be523bcd..fbe253aa328 100644 --- a/runtime/src/dispatcher.rs +++ b/runtime/src/dispatcher.rs @@ -394,7 +394,7 @@ impl Dispatcher { Context::create_child(&ctx), protocol.clone(), )); - let rpc_ctx = RpcContext::new(self.rak.clone(), session_info); + let rpc_ctx = RpcContext::new(ctx.clone(), self.rak.clone(), session_info); let response = StorageContext::enter(&mut mkvs, untrusted_local.clone(), || { rpc_dispatcher.dispatch(req, rpc_ctx) @@ -488,7 +488,7 @@ impl Dispatcher { Context::create_child(&ctx), protocol.clone(), )); - let rpc_ctx = RpcContext::new(self.rak.clone(), None); + let rpc_ctx = RpcContext::new(ctx.clone(), self.rak.clone(), None); let response = StorageContext::enter(&mut mkvs, untrusted_local.clone(), || { rpc_dispatcher.dispatch_local(req, rpc_ctx) }); diff --git a/runtime/src/rpc/context.rs b/runtime/src/rpc/context.rs index 0bd7bc1d8f8..73ebec642e9 100644 --- a/runtime/src/rpc/context.rs +++ b/runtime/src/rpc/context.rs @@ -1,6 +1,8 @@ //! RPC call context. use std::{any::Any, sync::Arc}; +use io_context::Context as IoContext; + use super::session::SessionInfo; use crate::rak::RAK; @@ -8,6 +10,8 @@ struct NoRuntimeContext; /// RPC call context. pub struct Context { + /// I/O context. + pub io_ctx: Arc, /// The current RAK if any. pub rak: Arc, /// Information about the session the RPC call was delivered over. @@ -18,8 +22,13 @@ pub struct Context { impl Context { /// Construct new transaction context. - pub fn new(rak: Arc, session_info: Option>) -> Self { + pub fn new( + io_ctx: Arc, + rak: Arc, + session_info: Option>, + ) -> Self { Self { + io_ctx, rak, session_info, runtime: Box::new(NoRuntimeContext), diff --git a/runtime/src/rpc/dispatcher.rs b/runtime/src/rpc/dispatcher.rs index 4805002269c..6ebe9163825 100644 --- a/runtime/src/rpc/dispatcher.rs +++ b/runtime/src/rpc/dispatcher.rs @@ -17,6 +17,21 @@ enum DispatchError { MethodNotFound { method: String }, } +/// Custom context initializer. +pub trait ContextInitializer { + /// Called to initialize the context. + fn init(&self, ctx: &mut Context); +} + +impl ContextInitializer for F +where + F: Fn(&mut Context), +{ + fn init(&self, ctx: &mut Context) { + (*self)(ctx) + } +} + /// Descriptor of a RPC API method. #[derive(Clone, Debug)] pub struct MethodDescriptor { @@ -115,6 +130,8 @@ pub struct Dispatcher { methods: HashMap, /// Registered local RPC methods. local_methods: HashMap, + /// Registered context initializer. + ctx_initializer: Option>, } impl Dispatcher { @@ -123,6 +140,7 @@ impl Dispatcher { Self { methods: HashMap::new(), local_methods: HashMap::new(), + ctx_initializer: None, } } @@ -134,8 +152,20 @@ impl Dispatcher { }; } + /// Configure context initializer. + pub fn set_context_initializer(&mut self, initializer: I) + where + I: ContextInitializer + 'static, + { + self.ctx_initializer = Some(Box::new(initializer)); + } + /// Dispatch request. pub fn dispatch(&self, request: Request, mut ctx: Context) -> Response { + if let Some(ref ctx_init) = self.ctx_initializer { + ctx_init.init(&mut ctx); + } + match self.dispatch_fallible(request, &mut ctx, false) { Ok(response) => response, Err(error) => Response { diff --git a/scripts/regenerate_single_node.sh b/scripts/regenerate_single_node.sh new file mode 100755 index 00000000000..4542f02f690 --- /dev/null +++ b/scripts/regenerate_single_node.sh @@ -0,0 +1,88 @@ +#!/bin/bash -e + +DATADIR=$(mktemp -d --tmpdir ekiden-regenerate-XXXXXXXXXX) + +EKIDEN_BINARY=${EKIDEN_BINARY:-"./go/ekiden/ekiden"} +EKIDEN_RUNTIME_ID=${EKIDEN_RUNTIME_ID:-"0000000000000000000000000000000000000000000000000000000000000000"} +EKIDEN_KM_RUNTIME_ID=${EKIDEN_KM_RUNTIME_ID:-"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"} + +SINGLE_NODE_DIR=${SINGLE_NODE_DIR:-"./configs/single_node/"} +SINGLE_NODE_SGX_DIR=${SINGLE_NODE_SGX_DIR:-"./configs/single_node_sgx/"} + +# +# Non-SGX config. +# + +${EKIDEN_BINARY}\ + registry runtime init_genesis \ + --datadir ${DATADIR} \ + --debug.allow_test_keys \ + --debug.test_entity \ + --runtime.id ${EKIDEN_RUNTIME_ID} \ + --runtime.replica_group_size 1 \ + --runtime.replica_group_backup_size 0 \ + --runtime.storage_group_size 1 \ + --runtime.keymanager ${EKIDEN_KM_RUNTIME_ID} \ + --runtime.kind compute \ + --runtime.genesis.file runtime_genesis_nosgx.json + +${EKIDEN_BINARY} \ + registry runtime init_genesis \ + --datadir ${DATADIR} \ + --debug.allow_test_keys \ + --debug.test_entity \ + --runtime.id ${EKIDEN_KM_RUNTIME_ID} \ + --runtime.kind keymanager \ + --runtime.genesis.file keymanager_genesis_nosgx.json + +${EKIDEN_BINARY} \ + genesis init \ + --datadir ${DATADIR} \ + --debug.allow_test_keys \ + --debug.test_entity \ + --genesis_file ${DATADIR}/genesis_nosgx.json \ + --runtime ${DATADIR}/keymanager_genesis_nosgx.json \ + --runtime ${DATADIR}/runtime_genesis_nosgx.json \ + --validator ${SINGLE_NODE_DIR}/validator-44f1c4b3a161a889e6876ba92c20c3f63dd1ecf204adab6ca436566497b01628.json + +cp ${DATADIR}/genesis_nosgx.json ${SINGLE_NODE_DIR}/genesis.json + +# +# SGX config. +# + +${EKIDEN_BINARY}\ + registry runtime init_genesis \ + --datadir ${DATADIR} \ + --debug.allow_test_keys \ + --debug.test_entity \ + --runtime.id ${EKIDEN_RUNTIME_ID} \ + --runtime.replica_group_size 1 \ + --runtime.replica_group_backup_size 0 \ + --runtime.storage_group_size 1 \ + --runtime.keymanager ${EKIDEN_KM_RUNTIME_ID} \ + --runtime.kind compute \ + --runtime.tee_hardware intel-sgx \ + --runtime.genesis.file runtime_genesis_sgx.json + +${EKIDEN_BINARY} \ + registry runtime init_genesis \ + --datadir ${DATADIR} \ + --debug.allow_test_keys \ + --debug.test_entity \ + --runtime.id ${EKIDEN_KM_RUNTIME_ID} \ + --runtime.kind keymanager \ + --runtime.tee_hardware intel-sgx \ + --runtime.genesis.file keymanager_genesis_sgx.json + +${EKIDEN_BINARY} \ + genesis init \ + --datadir ${DATADIR} \ + --debug.allow_test_keys \ + --debug.test_entity \ + --genesis_file ${DATADIR}/genesis_sgx.json \ + --runtime ${DATADIR}/keymanager_genesis_sgx.json \ + --runtime ${DATADIR}/runtime_genesis_sgx.json \ + --validator ${SINGLE_NODE_SGX_DIR}/validator-44f1c4b3a161a889e6876ba92c20c3f63dd1ecf204adab6ca436566497b01628.json + +cp ${DATADIR}/genesis_sgx.json ${SINGLE_NODE_SGX_DIR}/genesis.json