diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 7889b52afa141..872f916f4cf05 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -202,7 +202,6 @@ check-web-wasm: - time cargo web build -p substrate-keystore - time cargo web build -p substrate-executor - time cargo web build -p substrate-network - - time cargo web build -p substrate-offchain - time cargo web build -p substrate-panic-handler - time cargo web build -p substrate-peerset - time cargo web build -p substrate-primitives @@ -336,7 +335,7 @@ check_warnings: - docker push $CONTAINER_IMAGE:$VERSION - docker push $CONTAINER_IMAGE:latest -publish-docker-substrate: +publish-docker-substrate: stage: publish <<: *publish-docker-release # collect VERSION artifact here to pass it on to kubernetes diff --git a/Cargo.lock b/Cargo.lock index 1c6a4f19231f8..b1936f4177091 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -211,11 +211,6 @@ name = "bitmask" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "bitvec" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "bitvec" version = "0.14.0" @@ -865,7 +860,7 @@ dependencies = [ "hashmap_core 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -917,7 +912,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" name = "fork-tree" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1322,6 +1317,18 @@ dependencies = [ "want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "hyper-tls" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "idna" version = "0.1.5" @@ -1332,12 +1339,22 @@ dependencies = [ "unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "idna" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "impl-codec" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1409,24 +1426,25 @@ dependencies = [ [[package]] name = "jsonrpc-client-transports" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-pubsub 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-pubsub 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "websocket 0.23.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-core" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1438,15 +1456,15 @@ dependencies = [ [[package]] name = "jsonrpc-core-client" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "jsonrpc-client-transports 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-client-transports 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-derive" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "proc-macro-crate 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1457,12 +1475,12 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-server-utils 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-server-utils 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1471,10 +1489,10 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1482,12 +1500,12 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "globset 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1498,15 +1516,15 @@ dependencies = [ [[package]] name = "jsonrpc-ws-server" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-server-utils 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-server-utils 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ws 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ws 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1652,7 +1670,7 @@ dependencies = [ "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "libsecp256k1 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "multistream-select 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "multistream-select 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "parity-multiaddr 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-multihash 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2220,7 +2238,7 @@ dependencies = [ [[package]] name = "multistream-select" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2286,13 +2304,13 @@ dependencies = [ "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", "hex-literal 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "node-executor 2.0.0", "node-primitives 2.0.0", "node-rpc 2.0.0", "node-runtime 2.0.0", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -2336,7 +2354,7 @@ dependencies = [ "node-primitives 2.0.0", "node-runtime 2.0.0", "node-testing 2.0.0", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", "srml-balances 2.0.0", @@ -2361,7 +2379,7 @@ dependencies = [ name = "node-primitives" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", @@ -2376,15 +2394,15 @@ name = "node-rpc" version = "2.0.0" dependencies = [ "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core-client 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-derive 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-pubsub 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core-client 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-derive 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-pubsub 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "node-primitives 2.0.0", "node-runtime 2.0.0", "node-testing 2.0.0", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "substrate-client 2.0.0", @@ -2400,7 +2418,7 @@ dependencies = [ "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core-client 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core-client 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "node-primitives 2.0.0", "substrate-rpc 2.0.0", @@ -2412,7 +2430,7 @@ version = "2.0.0" dependencies = [ "integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "node-primitives 2.0.0", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2462,7 +2480,7 @@ dependencies = [ "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "node-template-runtime 2.0.0", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "substrate-basic-authorship 2.0.0", @@ -2487,7 +2505,7 @@ dependencies = [ name = "node-template-runtime" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", @@ -2518,7 +2536,7 @@ dependencies = [ "node-executor 2.0.0", "node-primitives 2.0.0", "node-runtime 2.0.0", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", "srml-balances 2.0.0", @@ -2684,16 +2702,6 @@ name = "parity-bytes" version = "0.1.0" source = "git+https://github.com/paritytech/parity-common?rev=b0317f649ab2c665b7987b8475878fc4d2e1f81d#b0317f649ab2c665b7987b8475878fc4d2e1f81d" -[[package]] -name = "parity-codec" -version = "4.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", - "bitvec 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "parity-multiaddr" version = "0.5.0" @@ -2727,7 +2735,7 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2735,7 +2743,6 @@ dependencies = [ "byte-slice-cast 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "parity-scale-codec-derive 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", - "vecarray 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2922,6 +2929,11 @@ name = "percent-encoding" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "percent-encoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "pin-utils" version = "0.1.0-alpha.4" @@ -3288,7 +3300,7 @@ dependencies = [ "error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3680,7 +3692,7 @@ version = "2.0.0" dependencies = [ "blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", "criterion 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "proc-macro-crate 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3702,7 +3714,7 @@ dependencies = [ "environmental 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "hash-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "libsecp256k1 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "sr-std 2.0.0", "substrate-offchain 2.0.0", @@ -3719,7 +3731,7 @@ dependencies = [ "integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "paste 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "primitive-types 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3736,7 +3748,7 @@ name = "sr-sandbox" version = "2.0.0" dependencies = [ "assert_matches 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "sr-std 2.0.0", "substrate-primitives 2.0.0", @@ -3748,7 +3760,7 @@ dependencies = [ name = "sr-staking-primitives" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "sr-std 2.0.0", ] @@ -3765,7 +3777,7 @@ name = "sr-version" version = "2.0.0" dependencies = [ "impl-serde 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "sr-std 2.0.0", @@ -3775,7 +3787,7 @@ dependencies = [ name = "srml-assets" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -3790,7 +3802,7 @@ name = "srml-aura" version = "2.0.0" dependencies = [ "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", @@ -3810,10 +3822,11 @@ dependencies = [ name = "srml-authority-discovery" version = "0.1.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", + "sr-staking-primitives 2.0.0", "sr-std 2.0.0", "srml-im-online 0.1.0", "srml-session 2.0.0", @@ -3827,7 +3840,7 @@ dependencies = [ name = "srml-authorship" version = "0.1.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", "sr-std 2.0.0", @@ -3843,7 +3856,7 @@ version = "2.0.0" dependencies = [ "hex-literal 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", @@ -3863,7 +3876,7 @@ dependencies = [ name = "srml-balances" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", @@ -3880,7 +3893,7 @@ name = "srml-collective" version = "2.0.0" dependencies = [ "hex-literal 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", @@ -3899,7 +3912,7 @@ dependencies = [ "assert_matches 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "hex-literal 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parity-wasm 0.31.3 (registry+https://github.com/rust-lang/crates.io-index)", "pwasm-utils 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3920,7 +3933,7 @@ dependencies = [ name = "srml-democracy" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", @@ -3937,7 +3950,7 @@ name = "srml-elections" version = "2.0.0" dependencies = [ "hex-literal 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", @@ -3953,10 +3966,11 @@ dependencies = [ name = "srml-example" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", + "sr-std 2.0.0", "srml-balances 2.0.0", "srml-support 2.0.0", "srml-system 2.0.0", @@ -3968,7 +3982,7 @@ name = "srml-executive" version = "2.0.0" dependencies = [ "hex-literal 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -3984,7 +3998,7 @@ dependencies = [ name = "srml-finality-tracker" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -3999,7 +4013,7 @@ dependencies = [ name = "srml-generic-asset" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -4013,7 +4027,7 @@ dependencies = [ name = "srml-grandpa" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -4031,7 +4045,7 @@ dependencies = [ name = "srml-im-online" version = "0.1.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -4048,7 +4062,7 @@ dependencies = [ name = "srml-indices" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "ref_thread_local 0.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4065,7 +4079,7 @@ dependencies = [ name = "srml-membership" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -4079,7 +4093,7 @@ dependencies = [ name = "srml-metadata" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-std 2.0.0", "substrate-primitives 2.0.0", @@ -4089,7 +4103,7 @@ dependencies = [ name = "srml-offences" version = "1.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -4105,7 +4119,7 @@ dependencies = [ name = "srml-scored-pool" version = "1.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -4121,7 +4135,7 @@ name = "srml-session" version = "2.0.0" dependencies = [ "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", @@ -4140,7 +4154,7 @@ dependencies = [ name = "srml-staking" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4162,7 +4176,7 @@ dependencies = [ name = "srml-sudo" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -4179,7 +4193,7 @@ version = "2.0.0" dependencies = [ "bitmask 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "once_cell 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "paste 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4228,7 +4242,7 @@ dependencies = [ name = "srml-support-test" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", @@ -4243,7 +4257,7 @@ name = "srml-system" version = "2.0.0" dependencies = [ "criterion 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", @@ -4258,7 +4272,7 @@ dependencies = [ name = "srml-timestamp" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -4273,7 +4287,7 @@ dependencies = [ name = "srml-treasury" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -4365,7 +4379,7 @@ dependencies = [ "hex-literal 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "node-primitives 2.0.0", "node-runtime 2.0.0", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", @@ -4390,7 +4404,7 @@ dependencies = [ name = "substrate-application-crypto" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -4403,7 +4417,7 @@ dependencies = [ name = "substrate-authority-discovery-primitives" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "sr-std 2.0.0", "substrate-client 2.0.0", @@ -4415,7 +4429,7 @@ version = "2.0.0" dependencies = [ "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "substrate-client 2.0.0", "substrate-consensus-common 2.0.0", @@ -4485,7 +4499,7 @@ dependencies = [ "kvdb 0.1.0 (git+https://github.com/paritytech/parity-common?rev=b0317f649ab2c665b7987b8475878fc4d2e1f81d)", "kvdb-memorydb 0.1.0 (git+https://github.com/paritytech/parity-common?rev=b0317f649ab2c665b7987b8475878fc4d2e1f81d)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sr-api-macros 2.0.0", "sr-primitives 2.0.0", @@ -4514,7 +4528,7 @@ dependencies = [ "kvdb-rocksdb 0.1.4 (git+https://github.com/paritytech/parity-common?rev=b0317f649ab2c665b7987b8475878fc4d2e1f81d)", "linked-hash-map 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "substrate-client 2.0.0", @@ -4537,7 +4551,7 @@ dependencies = [ "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", "futures-timer 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -4566,7 +4580,7 @@ dependencies = [ name = "substrate-consensus-aura-primitives" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "sr-std 2.0.0", "substrate-application-crypto 2.0.0", @@ -4587,7 +4601,7 @@ dependencies = [ "num-bigint 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "num-rational 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "schnorrkel 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4619,7 +4633,7 @@ dependencies = [ name = "substrate-consensus-babe-primitives" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "schnorrkel 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "sr-std 2.0.0", @@ -4637,7 +4651,7 @@ dependencies = [ "futures-timer 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "libp2p 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "sr-std 2.0.0", @@ -4655,7 +4669,7 @@ dependencies = [ "exit-future 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rhododendron 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", @@ -4678,7 +4692,7 @@ dependencies = [ "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", "futures-timer 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "substrate-client 2.0.0", @@ -4713,7 +4727,7 @@ dependencies = [ "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "libsecp256k1 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parity-wasm 0.31.3 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", @@ -4741,7 +4755,7 @@ dependencies = [ "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4756,7 +4770,6 @@ dependencies = [ "substrate-keystore 2.0.0", "substrate-network 2.0.0", "substrate-primitives 2.0.0", - "substrate-service 2.0.0", "substrate-telemetry 2.0.0", "substrate-test-runtime-client 2.0.0", "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4769,7 +4782,7 @@ dependencies = [ name = "substrate-finality-grandpa-primitives" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "sr-std 2.0.0", @@ -4781,7 +4794,7 @@ dependencies = [ name = "substrate-inherents" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "sr-std 2.0.0", @@ -4833,7 +4846,7 @@ dependencies = [ "linked_hash_set 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "lru-cache 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "quickcheck 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4865,11 +4878,18 @@ dependencies = [ name = "substrate-offchain" version = "2.0.0" dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-timer 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "substrate-client 2.0.0", "substrate-client-db 2.0.0", @@ -4879,6 +4899,7 @@ dependencies = [ "substrate-primitives 2.0.0", "substrate-test-runtime-client 2.0.0", "substrate-transaction-pool 2.0.0", + "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4925,8 +4946,9 @@ dependencies = [ "hex-literal 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "impl-serde 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "primitive-types 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4953,12 +4975,12 @@ dependencies = [ "derive_more 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core-client 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-derive 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-pubsub 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core-client 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-derive 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-pubsub 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4967,7 +4989,6 @@ dependencies = [ "sr-primitives 2.0.0", "sr-version 2.0.0", "substrate-client 2.0.0", - "substrate-executor 2.0.0", "substrate-keystore 2.0.0", "substrate-network 2.0.0", "substrate-primitives 2.0.0", @@ -4982,10 +5003,10 @@ dependencies = [ name = "substrate-rpc-servers" version = "2.0.0" dependencies = [ - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-http-server 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-pubsub 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-ws-server 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-http-server 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-pubsub 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-ws-server 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", @@ -5024,7 +5045,7 @@ dependencies = [ "node-primitives 2.0.0", "node-runtime 2.0.0", "parity-multiaddr 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5089,7 +5110,7 @@ version = "2.0.0" dependencies = [ "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "substrate-primitives 2.0.0", ] @@ -5102,7 +5123,7 @@ dependencies = [ "hex-literal 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "substrate-panic-handler 2.0.0", "substrate-primitives 2.0.0", @@ -5138,7 +5159,7 @@ version = "2.0.0" dependencies = [ "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", "hash-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "substrate-client 2.0.0", "substrate-client-db 2.0.0", @@ -5156,7 +5177,7 @@ dependencies = [ "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "memory-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -5188,7 +5209,7 @@ dependencies = [ name = "substrate-test-runtime-client" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "substrate-primitives 2.0.0", "substrate-test-client 2.0.0", @@ -5204,7 +5225,7 @@ dependencies = [ "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", @@ -5218,7 +5239,7 @@ version = "2.0.0" dependencies = [ "derive_more 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "substrate-client 2.0.0", @@ -5237,7 +5258,7 @@ dependencies = [ "hex-literal 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "memory-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "sr-std 2.0.0", "substrate-primitives 2.0.0", "trie-bench 0.16.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5638,7 +5659,7 @@ name = "transaction-factory" version = "0.0.1" dependencies = [ "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "substrate-cli 2.0.0", "substrate-client 2.0.0", @@ -5656,7 +5677,7 @@ dependencies = [ "hash-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "memory-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "trie-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "trie-root 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "trie-standardmap 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5724,7 +5745,7 @@ name = "twox-hash" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -5823,6 +5844,16 @@ dependencies = [ "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "url" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "utf8-ranges" version = "1.0.3" @@ -5838,16 +5869,6 @@ name = "vec_map" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "vecarray" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "parity-codec 4.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", - "typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "vergen" version = "3.0.4" @@ -6135,7 +6156,7 @@ dependencies = [ [[package]] name = "ws" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -6147,7 +6168,7 @@ dependencies = [ "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "sha-1 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -6240,7 +6261,6 @@ dependencies = [ "checksum bindgen 0.47.3 (registry+https://github.com/rust-lang/crates.io-index)" = "df683a55b54b41d5ea8ebfaebb5aa7e6b84e3f3006a78f010dadc9ca88469260" "checksum bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d155346769a6855b86399e9bc3814ab343cd3d62c7e985113d46a0ec3c281fd" "checksum bitmask 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5da9b3d9f6f585199287a473f4f8dfab6566cf827d15c00c219f53c645687ead" -"checksum bitvec 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b67491e1cc6f37da6c4415cd743cb8d2e2c65388acc91ca3094a054cbf3cbd0c" "checksum bitvec 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9633b74910e1870f50f5af189b08487195cdb83c0e27a71d6f64d5e09dd0538b" "checksum blake2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "91721a6330935673395a0607df4d49a9cb90ae12d259f1b3e0a3f6e1d486872e" "checksum blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" @@ -6365,7 +6385,9 @@ dependencies = [ "checksum humantime 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3ca7e5f2e110db35f93b837c81797f3714500b81d517bf20c431b16d3ca4f114" "checksum hyper 0.10.16 (registry+https://github.com/rust-lang/crates.io-index)" = "0a0652d9a2609a968c14be1a9ea00bf4b1d64e2e1f53a1b51b6fff3a6e829273" "checksum hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)" = "7cb44cbce9d8ee4fb36e4c0ad7b794ac44ebaad924b9c8291a63215bb44c2c8f" +"checksum hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3a800d6aa50af4b5850b2b0f659625ce9504df908e9733b635720483be26174f" "checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" +"checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" "checksum impl-codec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "78c441b3d2b5e24b407161e76d482b7bbd29b5da357707839ac40d95152f031f" "checksum impl-serde 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5158079de9d4158e0ce1de3ae0bd7be03904efc40b3d7dd8b8c301cbf6b52b56" "checksum impl-serde 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7d26be4b97d738552ea423f76c4f681012ff06c3fa36fa968656b3679f60b4a1" @@ -6377,14 +6399,14 @@ dependencies = [ "checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358" "checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" "checksum js-sys 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)" = "da3ea71161651a4cd97d999b2da139109c537b15ab33abc8ae4ead38deac8a03" -"checksum jsonrpc-client-transports 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0bb6fd4acf48d1f17eb7b0e27ab7043c16f063ad0aa7020ec92a431648286c2f" -"checksum jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "34d379861584fe4e3678f6ae9ee60b41726df2989578c1dc0f90190dfc92dbe0" -"checksum jsonrpc-core-client 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d6b0a3dc76953d88cdb47f5fe4ae21abcabc8d7edf4951ebce42db5c722d6698" -"checksum jsonrpc-derive 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9e2d4475549bc0126690788ed5107573c8917f97db5298f0043fb73d46fc498" -"checksum jsonrpc-http-server 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad55e8dd67c2c5b16436738b0baf319a6b353feba7401dbc1508a0bd8bd451f" -"checksum jsonrpc-pubsub 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "583f5930821dbc043236fe5d672d496ead7ff83d21351146598386c66fe8722a" -"checksum jsonrpc-server-utils 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "04f18ca34046c249751fe90428e77e9570beaa03b33a108e74418a586063d07d" -"checksum jsonrpc-ws-server 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aee1265de937bd53ad0fc95ff5817314922ce009fa99a04a09fdf449b140ddf6" +"checksum jsonrpc-client-transports 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "39577db48b004cffb4c5b8e5c9b993c177c52599ecbee88711e815acf65144db" +"checksum jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dd42951eb35079520ee29b7efbac654d85821b397ef88c8151600ef7e2d00217" +"checksum jsonrpc-core-client 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f047c10738edee7c3c6acf5241a0ce33df32ef9230c1a7fb03e4a77ee72c992f" +"checksum jsonrpc-derive 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "29f9149f785deaae92a4c834a9a1a83a4313b8cfedccf15362cd4cf039a64501" +"checksum jsonrpc-http-server 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4edd28922653d79e4f6c0f5d0a1034a4edbc5f9cf6cad8ec85e2a685713e3708" +"checksum jsonrpc-pubsub 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2c08b444cc0ed70263798834343d0ac875e664257df8079160f23ac1ea79446" +"checksum jsonrpc-server-utils 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44561bfdd31401bad790527f1e951dde144f2341ddc3e1b859d32945e1a34eff" +"checksum jsonrpc-ws-server 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d230ff76a8e4a3fb068aab6ba23d0c4e7d6e3b41bca524daa33988b04b065265" "checksum keccak 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" "checksum keccak-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3468207deea1359a0e921591ae9b4c928733d94eb9d6a2eeda994cfd59f42cf8" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" @@ -6443,7 +6465,7 @@ dependencies = [ "checksum mio-extras 2.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "46e73a04c2fa6250b8d802134d56d554a9ec2922bf977777c805ea5def61ce40" "checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" -"checksum multistream-select 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "51a032ec01abdbe99a1165cd3e518bdd4bd7ca509a59ae9adf186d240399b90c" +"checksum multistream-select 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e8f3cb4c93f2d79811fc11fa01faab99d8b7b8cbe024b602c27434ff2b08a59d" "checksum names 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ef320dab323286b50fb5cdda23f61c796a72a89998ab565ca32525c5c556f2da" "checksum native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4b2df1a4c22fd44a62147fd8f13dd0f95c9d8ca7b2610299b2a2f9cf8964274e" "checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" @@ -6466,10 +6488,9 @@ dependencies = [ "checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37" "checksum owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13" "checksum parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common?rev=b0317f649ab2c665b7987b8475878fc4d2e1f81d)" = "" -"checksum parity-codec 4.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2900f06356edf90de66a2922db622b36178dca71e85625eae58d0d9cc6cff2ac" "checksum parity-multiaddr 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "045b3c7af871285146300da35b1932bb6e4639b66c7c98e85d06a32cbc4e8fa7" "checksum parity-multihash 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "df3a17dc27848fd99e4f87eb0f8c9baba6ede0a6d555400c850ca45254ef4ce3" -"checksum parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "00fd14ff806ad82cea9a8f909bb116443d92efda7c9acd4502690af64741ad81" +"checksum parity-scale-codec 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "65582b5c02128a4b0fa60fb3e070216e9c84be3e4a8f1b74bc37e15a25e58daf" "checksum parity-scale-codec-derive 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a81f3cd93ed368a8e41c4e79538e99ca6e8f536096de23e3a0bc3e782093ce28" "checksum parity-send-wrapper 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" "checksum parity-util-mem 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2005637ccf93dbb60c85081ccaaf3f945f573da48dcc79f27f9646caa3ec1dc" @@ -6489,6 +6510,7 @@ dependencies = [ "checksum pbkdf2 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" "checksum peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" "checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" +"checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" "checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" "checksum pkg-config 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c1d2cfa5a714db3b5f24f0915e74fcdf91d09d496ba61329705dda7774d2af" "checksum ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e3cbf9f658cdb5000fcf6f362b8ea2ba154b9f146a61c7a20d647034c6b6561b" @@ -6643,10 +6665,10 @@ dependencies = [ "checksum unsigned-varint 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2c64cdf40b4a9645534a943668681bcb219faf51874d4b65d2e0abda1b10a2ab" "checksum untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "55cd1f4b4e96b46aeb8d4855db4a7a9bd96eeeb5c6a1ab54593328761642ce2f" "checksum url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" +"checksum url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "75b414f6c464c879d7f9babf951f23bc3743fb7313c081b2e6ca719067ea9d61" "checksum utf8-ranges 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9d50aa7650df78abf942826607c62468ce18d9019673d4a2ebe1865dbb96ffde" "checksum vcpkg 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "33dd455d0f96e90a75803cfeb7f948768c08d70a6de9a8d2362461935698bf95" "checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" -"checksum vecarray 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d4d68a73b7d7d950c6558b6009e9fba229fb67562bda9fd02198f614f4ecf83f" "checksum vergen 3.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "6aba5e34f93dc7051dfad05b98a18e9156f27e7b431fe1d2398cb6061c0a1dba" "checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" @@ -6677,7 +6699,7 @@ dependencies = [ "checksum winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7168bab6e1daee33b4557efd0e95d5ca70a03706d39fa5f3fe7a236f584b03c9" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" "checksum wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "561ed901ae465d6185fa7864d63fbd5720d0ef718366c9a4dc83cf6170d7e9ba" -"checksum ws 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ec91ea61b83ce033c43c06c52ddc7532f465c0153281610d44c58b74083aee1a" +"checksum ws 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8a6f5bb86663ff4d1639408410f50bf6050367a8525d644d49a6894cd618a631" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" "checksum x25519-dalek 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7ee1585dc1484373cbc1cee7aafda26634665cf449436fd6e24bfd1fad230538" "checksum xdg 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d089681aa106a86fade1b0128fb5daf07d5867a509ab036d99988dec80429a57" diff --git a/README.adoc b/README.adoc index aaab6df3cc2f2..91c473c347e05 100644 --- a/README.adoc +++ b/README.adoc @@ -133,7 +133,7 @@ First let's get a template chainspec that you can edit. We'll use the "staging" substrate build-spec --chain=staging > ~/chainspec.json ---- -Now, edit `~/chainspec.json` in your editor. There are a lot of individual fields for each module, and one very large one which contains the Webassembly code blob for this chain. The easiest field to edit is the block `period`. Change it to 10 (seconds): +Now, edit `~/chainspec.json` in your editor. There are a lot of individual fields for each module, and one very large one which contains the WebAssembly code blob for this chain. The easiest field to edit is the block `period`. Change it to 10 (seconds): [source, json] ---- @@ -160,7 +160,7 @@ It won't do much until you start producing blocks though, so to do that you'll n [source, shell] ---- -substrate --chain ~/mychain.json --validator --key ... +substrate --chain ~/mychain.json --validator ---- You can distribute `mychain.json` so that everyone can synchronize and (depending on your authorities list) validate on your chain. @@ -281,9 +281,9 @@ cargo run \-- --dev Detailed logs may be shown by running the node with the following environment variables set: `RUST_LOG=debug RUST_BACKTRACE=1 cargo run \-- --dev`. -If you want to see the multi-node consensus algorithm in action locally, then you can create a local testnet with two validator nodes for Alice and Bob, who are the initial authorities of the genesis chain specification that have been endowed with a testnet DOTs. We'll give each node a name and expose them so they are listed on link:https://telemetry.polkadot.io/#/Local%20Testnet[Telemetry] . You'll need two terminals windows open. +If you want to see the multi-node consensus algorithm in action locally, then you can create a local testnet with two validator nodes for Alice and Bob, who are the initial authorities of the genesis chain specification that have been endowed with a testnet DOTs. We'll give each node a name and expose them so they are listed on link:https://telemetry.polkadot.io/#/Local%20Testnet[Telemetry]. You'll need two terminal windows open. -We'll start Alice's substrate node first on default TCP port 30333 with her chain database stored locally at `/tmp/alice`. The Bootnode ID of her node is `QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR`, which is generated from the `--node-key` value that we specify below: +We'll start Alice's Substrate node first on default TCP port 30333 with her chain database stored locally at `/tmp/alice`. The Bootnode ID of her node is `QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR`, which is generated from the `--node-key` value that we specify below: [source, shell] cargo run --release \-- \ @@ -294,7 +294,7 @@ cargo run --release \-- \ --telemetry-url ws://telemetry.polkadot.io:1024 \ --validator -In the second terminal, we'll run the following to start Bob's substrate node on a different TCP port of 30334, and with his chain database stored locally at `/tmp/bob`. We'll specify a value for the `--bootnodes` option that will connect his node to Alice's Bootnode ID on TCP port 30333: +In the second terminal, we'll run the following to start Bob's Substrate node on a different TCP port of 30334, and with his chain database stored locally at `/tmp/bob`. We'll specify a value for the `--bootnodes` option that will connect his node to Alice's Bootnode ID on TCP port 30333: [source, shell] cargo run --release \-- \ @@ -378,6 +378,38 @@ git checkout -b v1.0 origin/v1.0 You can then follow the same steps for building and running as described above in <>. +== Key management + +Keys in Substrate are stored in the keystore in the file system. To store keys into this keystore, +you need to use one of the two provided RPC calls. If your keys are encrypted or should be encrypted +by the keystore, you need to provide the key using one of the cli arguments `--password`, +`--password-interactive` or `--password-filename`. + +=== Recommended RPC call + +For most users who want to run a validator node, the `author_rotateKeys` RPC call is sufficient. +The RPC call will generate `N` Session keys for you and return their public keys. `N` is the number +of session keys configured in the runtime. The output of the RPC call can be used as input for the +`session::set_keys` transaction. + +``` +curl -H 'Content-Type: application/json' --data '{ "jsonrpc":"2.0", "method":"author_rotateKeys", "id":1 }' localhost:9933 +``` + +=== Advanced RPC call + +If the Session keys need to match a fixed seed, they can be set individually key by key. The RPC call +expects the key seed and the key type. The key types supported by default in Substrate are listed +https://github.com/paritytech/substrate/blob/master/core/primitives/src/crypto.rs#L767[here], but the +user can declare any key type. + +``` +curl -H 'Content-Type: application/json' --data '{ "jsonrpc":"2.0", "method":"author_insertKey", "params":["KEY_TYPE", "SEED"],"id":1 }' localhost:9933 +``` + +`KEY_TYPE` - needs to be replaced with the 4-character key type identifier. +`SEED` - is the seed of the key. + == Documentation === Viewing documentation for Substrate packages diff --git a/core/application-crypto/src/traits.rs b/core/application-crypto/src/traits.rs index d7f1eafe35407..323c9c3e54c55 100644 --- a/core/application-crypto/src/traits.rs +++ b/core/application-crypto/src/traits.rs @@ -17,6 +17,7 @@ use primitives::crypto::{KeyTypeId, CryptoType, IsWrappedBy, Public}; #[cfg(feature = "std")] use primitives::crypto::Pair; +use codec::Codec; /// An application-specific key. pub trait AppKey: 'static + Send + Sync + Sized + CryptoType + Clone { @@ -72,7 +73,7 @@ pub trait AppSignature: AppKey + Eq + PartialEq + MaybeDebugHash { /// A runtime interface for a public key. pub trait RuntimePublic: Sized { /// The signature that will be generated when signing with the corresponding private key. - type Signature; + type Signature: Codec + MaybeDebugHash + Eq + PartialEq + Clone; /// Returns all public keys for the given key type in the keystore. fn all(key_type: KeyTypeId) -> crate::Vec; @@ -97,7 +98,7 @@ pub trait RuntimePublic: Sized { /// A runtime interface for an application's public key. pub trait RuntimeAppPublic: Sized { /// The signature that will be generated when signing with the corresponding private key. - type Signature; + type Signature: Codec + MaybeDebugHash + Eq + PartialEq + Clone; /// Returns all public keys for this application in the keystore. fn all() -> crate::Vec; diff --git a/core/cli/src/informant.rs b/core/cli/src/informant.rs index b5a2f03d79546..52a5f67c26d59 100644 --- a/core/cli/src/informant.rs +++ b/core/cli/src/informant.rs @@ -21,22 +21,12 @@ use futures::{Future, Stream}; use futures03::{StreamExt as _, TryStreamExt as _}; use log::{info, warn}; use sr_primitives::{generic::BlockId, traits::Header}; -use service::{Service, Components}; -use tokio::runtime::TaskExecutor; +use service::AbstractService; mod display; -/// Spawn informant on the event loop -#[deprecated(note = "Please use informant::build instead, and then create the task manually")] -pub fn start(service: &Service, exit: ::exit_future::Exit, handle: TaskExecutor) where - C: Components, -{ - handle.spawn(exit.until(build(service)).map(|_| ())); -} - /// Creates an informant in the form of a `Future` that must be polled regularly. -pub fn build(service: &Service) -> impl Future -where C: Components { +pub fn build(service: &impl AbstractService) -> impl Future { let client = service.client(); let mut display = display::InformantDisplay::new(); diff --git a/core/cli/src/lib.rs b/core/cli/src/lib.rs index ef5290413166d..6e9955ca1a48b 100644 --- a/core/cli/src/lib.rs +++ b/core/cli/src/lib.rs @@ -29,8 +29,8 @@ pub mod informant; use client::ExecutionStrategies; use service::{ config::Configuration, - ServiceFactory, FactoryFullConfiguration, RuntimeGenesis, - FactoryGenesis, PruningMode, ChainSpec, + ServiceBuilderExport, ServiceBuilderImport, ServiceBuilderRevert, + RuntimeGenesis, PruningMode, ChainSpec, }; use network::{ self, multiaddr::Protocol, @@ -317,13 +317,17 @@ pub struct ParseAndPrepareExport<'a> { impl<'a> ParseAndPrepareExport<'a> { /// Runs the command and exports from the chain. - pub fn run( + pub fn run_with_builder( self, + builder: F, spec_factory: S, exit: E, ) -> error::Result<()> - where S: FnOnce(&str) -> Result>>, String>, - F: ServiceFactory, + where S: FnOnce(&str) -> Result>, String>, + F: FnOnce(Configuration) -> Result, + B: ServiceBuilderExport, + C: Default, + G: RuntimeGenesis, E: IntoExit { let config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; @@ -338,9 +342,8 @@ impl<'a> ParseAndPrepareExport<'a> { None => Box::new(stdout()), }; - service::chain_ops::export_blocks::( - config, exit.into_exit(), file, from.into(), to.map(Into::into), json - ).map_err(Into::into) + builder(config)?.export_blocks(exit.into_exit(), file, from.into(), to.map(Into::into), json)?; + Ok(()) } } @@ -352,13 +355,17 @@ pub struct ParseAndPrepareImport<'a> { impl<'a> ParseAndPrepareImport<'a> { /// Runs the command and imports to the chain. - pub fn run( + pub fn run_with_builder( self, + builder: F, spec_factory: S, exit: E, ) -> error::Result<()> - where S: FnOnce(&str) -> Result>>, String>, - F: ServiceFactory, + where S: FnOnce(&str) -> Result>, String>, + F: FnOnce(Configuration) -> Result, + B: ServiceBuilderImport, + C: Default, + G: RuntimeGenesis, E: IntoExit { let mut config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; @@ -377,7 +384,7 @@ impl<'a> ParseAndPrepareImport<'a> { }, }; - let fut = service::chain_ops::import_blocks::(config, exit.into_exit(), file)?; + let fut = builder(config)?.import_blocks(exit.into_exit(), file)?; tokio::run(fut); Ok(()) } @@ -440,67 +447,23 @@ pub struct ParseAndPrepareRevert<'a> { impl<'a> ParseAndPrepareRevert<'a> { /// Runs the command and reverts the chain. - pub fn run( + pub fn run_with_builder( self, + builder: F, spec_factory: S ) -> error::Result<()> - where S: FnOnce(&str) -> Result>>, String>, - F: ServiceFactory { + where S: FnOnce(&str) -> Result>, String>, + F: FnOnce(Configuration) -> Result, + B: ServiceBuilderRevert, + C: Default, + G: RuntimeGenesis { let config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; let blocks = self.params.num; - Ok(service::chain_ops::revert_chain::(config, blocks.into())?) + builder(config)?.revert_chain(blocks.into())?; + Ok(()) } } -/// Parse command line interface arguments and executes the desired command. -/// -/// # Return value -/// -/// A result that indicates if any error occurred. -/// If no error occurred and a custom subcommand was found, the subcommand is returned. -/// The user needs to handle this subcommand on its own. -/// -/// # Remarks -/// -/// `CC` is a custom subcommand. This needs to be an `enum`! If no custom subcommand is required, -/// `NoCustom` can be used as type here. -/// `RP` are custom parameters for the run command. This needs to be a `struct`! The custom -/// parameters are visible to the user as if they were normal run command parameters. If no custom -/// parameters are required, `NoCustom` can be used as type here. -#[deprecated( - note = "Use parse_and_prepare instead; see the source code of parse_and_execute for how to transition" -)] -pub fn parse_and_execute<'a, F, CC, RP, S, RS, E, I, T>( - spec_factory: S, - version: &VersionInfo, - impl_name: &'static str, - args: I, - exit: E, - run_service: RS, -) -> error::Result> -where - F: ServiceFactory, - S: FnOnce(&str) -> Result>>, String>, - CC: StructOpt + Clone + GetLogFilter, - RP: StructOpt + Clone + AugmentClap, - E: IntoExit, - RS: FnOnce(E, RunCmd, RP, FactoryFullConfiguration) -> Result<(), String>, - I: IntoIterator, - T: Into + Clone, -{ - match parse_and_prepare::(version, impl_name, args) { - ParseAndPrepare::Run(cmd) => cmd.run(spec_factory, exit, run_service), - ParseAndPrepare::BuildSpec(cmd) => cmd.run(spec_factory), - ParseAndPrepare::ExportBlocks(cmd) => cmd.run::(spec_factory, exit), - ParseAndPrepare::ImportBlocks(cmd) => cmd.run::(spec_factory, exit), - ParseAndPrepare::PurgeChain(cmd) => cmd.run(spec_factory), - ParseAndPrepare::RevertChain(cmd) => cmd.run::(spec_factory), - ParseAndPrepare::CustomCommand(cmd) => return Ok(Some(cmd)) - }?; - - Ok(None) -} - /// Create a `NodeKeyConfig` from the given `NodeKeyParams` in the context /// of an optional network config storage directory. fn node_key_config

(params: NodeKeyParams, net_config_dir: &Option

) diff --git a/core/cli/src/params.rs b/core/cli/src/params.rs index 72adc552b9ecd..0f58527287eb7 100644 --- a/core/cli/src/params.rs +++ b/core/cli/src/params.rs @@ -441,7 +441,11 @@ lazy_static::lazy_static! { /// The Cli values for all test accounts. static ref TEST_ACCOUNTS_CLI_VALUES: Vec = { keyring::Sr25519Keyring::iter().map(|a| { - let help = format!("Shortcut for `--key //{} --name {}`.", a, a); + let help = format!( + "Shortcut for `--name {} --validator` with session keys for `{}` added to keystore.", + a, + a, + ); let conflicts_with = keyring::Sr25519Keyring::iter() .filter(|b| a != *b) .map(|b| b.to_string().to_lowercase()) diff --git a/core/client/db/src/cache/mod.rs b/core/client/db/src/cache/mod.rs index 68245a54eeeac..f53eb54ca04f6 100644 --- a/core/client/db/src/cache/mod.rs +++ b/core/client/db/src/cache/mod.rs @@ -299,8 +299,8 @@ impl BlockchainCache for DbCacheSync { key: &CacheKeyId, at: &BlockId, ) -> Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)> { - let cache = self.0.read(); - let storage = cache.cache_at.get(key)?.storage(); + let mut cache = self.0.write(); + let storage = cache.get_cache(*key).storage(); let db = storage.db(); let columns = storage.columns(); let at = match *at { diff --git a/core/client/db/src/light.rs b/core/client/db/src/light.rs index 2ee2c2ec54531..86408a155d197 100644 --- a/core/client/db/src/light.rs +++ b/core/client/db/src/light.rs @@ -1030,26 +1030,37 @@ pub(crate) mod tests { #[test] fn cache_can_be_initialized_after_genesis_inserted() { - let db = LightStorage::::new_test(); + let (genesis_hash, storage) = { + let db = LightStorage::::new_test(); + + // before cache is initialized => None + assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None); + + // insert genesis block (no value for cache is provided) + let mut genesis_hash = None; + insert_block(&db, HashMap::new(), || { + let header = default_header(&Default::default(), 0); + genesis_hash = Some(header.hash()); + header + }); - // before cache is initialized => None - assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None); + // after genesis is inserted => None + assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None); - // insert genesis block (no value for cache is provided) - let mut genesis_hash = None; - insert_block(&db, HashMap::new(), || { - let header = default_header(&Default::default(), 0); - genesis_hash = Some(header.hash()); - header - }); + // initialize cache + db.cache().initialize(b"test", vec![42]).unwrap(); - // after genesis is inserted => None - assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None); + // after genesis is inserted + cache is initialized => Some + assert_eq!( + db.cache().get_at(b"test", &BlockId::Number(0)), + Some(((0, genesis_hash.unwrap()), None, vec![42])), + ); - // initialize cache - db.cache().initialize(b"test", vec![42]).unwrap(); + (genesis_hash, db.db) + }; - // after genesis is inserted + cache is initialized => Some + // restart && check that after restart value is read from the cache + let db = LightStorage::::from_kvdb(storage as Arc<_>).expect("failed to create test-db"); assert_eq!( db.cache().get_at(b"test", &BlockId::Number(0)), Some(((0, genesis_hash.unwrap()), None, vec![42])), diff --git a/core/client/src/client.rs b/core/client/src/client.rs index c6234dc8d1cca..a57584575cc23 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -27,8 +27,9 @@ use codec::{Encode, Decode}; use hash_db::{Hasher, Prefix}; use primitives::{ Blake2Hasher, H256, ChangesTrieConfiguration, convert_hash, - NeverNativeValue, ExecutionContext, - storage::{StorageKey, StorageData, well_known_keys}, NativeOrEncoded + NeverNativeValue, ExecutionContext, NativeOrEncoded, + storage::{StorageKey, StorageData, well_known_keys}, + offchain, }; use substrate_telemetry::{telemetry, SUBSTRATE_INFO}; use sr_primitives::{ @@ -1471,8 +1472,6 @@ impl CallRuntimeAt for Client where context: ExecutionContext, recorder: &Option>>>, ) -> error::Result> { - let enable_keystore = context.enable_keystore(); - let manager = match context { ExecutionContext::BlockConstruction => self.execution_strategies.block_construction.get_manager(), @@ -1480,16 +1479,17 @@ impl CallRuntimeAt for Client where self.execution_strategies.syncing.get_manager(), ExecutionContext::Importing => self.execution_strategies.importing.get_manager(), - ExecutionContext::OffchainWorker(_) => + ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.has_all() => self.execution_strategies.offchain_worker.get_manager(), - ExecutionContext::Other => + ExecutionContext::OffchainCall(_) => self.execution_strategies.other.get_manager(), }; + let capabilities = context.capabilities(); let mut offchain_extensions = match context { - ExecutionContext::OffchainWorker(ext) => Some(ext), + ExecutionContext::OffchainCall(ext) => ext.map(|x| x.0), _ => None, - }; + }.map(|ext| offchain::LimitedExternalities::new(capabilities, ext)); self.executor.contextual_call::<_, _, fn(_,_) -> _,_,_>( || core_api.initialize_block(at, &self.prepare_environment_block(at)?), @@ -1502,7 +1502,7 @@ impl CallRuntimeAt for Client where native_call, offchain_extensions.as_mut(), recorder, - enable_keystore, + capabilities.has(offchain::Capability::Keystore), ) } diff --git a/core/client/src/genesis.rs b/core/client/src/genesis.rs index 3ac93f4f577b6..4791a34d17427 100644 --- a/core/client/src/genesis.rs +++ b/core/client/src/genesis.rs @@ -54,8 +54,7 @@ mod tests { native_executor_instance!( Executor, test_client::runtime::api::dispatch, - test_client::runtime::native_version, - test_client::runtime::WASM_BINARY + test_client::runtime::native_version ); fn executor() -> executor::NativeExecutor { @@ -153,6 +152,7 @@ mod tests { vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 1000, None, + vec![], ).genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); @@ -181,6 +181,7 @@ mod tests { vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 1000, None, + vec![], ).genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); @@ -209,6 +210,7 @@ mod tests { vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 68, None, + vec![], ).genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); diff --git a/core/client/src/light/call_executor.rs b/core/client/src/light/call_executor.rs index 2367aaf806510..746f36069d69e 100644 --- a/core/client/src/light/call_executor.rs +++ b/core/client/src/light/call_executor.rs @@ -471,7 +471,7 @@ pub fn check_execution_proof( Default::default(), Default::default(), request.header.hash(), - request.header.digest().clone(), + Default::default(), ); execution_proof_check_on_trie_backend::( &trie_backend, @@ -498,7 +498,7 @@ pub fn check_execution_proof( #[cfg(test)] mod tests { use consensus::BlockOrigin; - use test_client::{self, runtime::Header, ClientExt, TestClient}; + use test_client::{self, runtime::{Header, Digest}, ClientExt, TestClient}; use executor::NativeExecutor; use crate::backend::{Backend, NewBlockState}; use crate::in_mem::Backend as InMemBackend; @@ -509,8 +509,7 @@ mod tests { fn execution_proof_is_generated_and_checked() { fn execute(remote_client: &TestClient, at: u64, method: &'static str) -> (Vec, Vec) { let remote_block_id = BlockId::Number(at); - let remote_root = remote_client.state_at(&remote_block_id) - .unwrap().storage_root(::std::iter::empty()).0; + let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); // 'fetch' execution proof from remote node let (remote_result, remote_execution_proof) = remote_client.execution_proof( @@ -523,13 +522,7 @@ mod tests { let local_executor = NativeExecutor::::new(None); let local_result = check_execution_proof(&local_executor, &RemoteCallRequest { block: test_client::runtime::Hash::default(), - header: test_client::runtime::Header { - state_root: remote_root.into(), - parent_hash: Default::default(), - number: at, - extrinsics_root: Default::default(), - digest: Default::default(), - }, + header: remote_header, method: method.into(), call_data: vec![], retry_count: None, @@ -540,10 +533,12 @@ mod tests { // prepare remote client let remote_client = test_client::new(); - for _ in 1..3 { + for i in 1u32..3u32 { + let mut digest = Digest::default(); + digest.push(sr_primitives::generic::DigestItem::Other::(i.to_le_bytes().to_vec())); remote_client.import_justified( BlockOrigin::Own, - remote_client.new_block(Default::default()).unwrap().bake().unwrap(), + remote_client.new_block(digest).unwrap().bake().unwrap(), Default::default(), ).unwrap(); } diff --git a/core/consensus/aura/primitives/src/lib.rs b/core/consensus/aura/primitives/src/lib.rs index 84671e5ae6864..e4620fcdbfdd4 100644 --- a/core/consensus/aura/primitives/src/lib.rs +++ b/core/consensus/aura/primitives/src/lib.rs @@ -23,38 +23,38 @@ use substrate_client::decl_runtime_apis; use rstd::vec::Vec; use sr_primitives::ConsensusEngineId; -mod app_sr25519 { - use app_crypto::{app_crypto, key_types::AURA, sr25519}; - app_crypto!(sr25519, AURA); -} - pub mod sr25519 { + mod app_sr25519 { + use app_crypto::{app_crypto, key_types::AURA, sr25519}; + app_crypto!(sr25519, AURA); + } + /// An Aura authority keypair using S/R 25519 as its crypto. #[cfg(feature = "std")] - pub type AuthorityPair = super::app_sr25519::Pair; + pub type AuthorityPair = app_sr25519::Pair; /// An Aura authority signature using S/R 25519 as its crypto. - pub type AuthoritySignature = super::app_sr25519::Signature; + pub type AuthoritySignature = app_sr25519::Signature; /// An Aura authority identifier using S/R 25519 as its crypto. - pub type AuthorityId = super::app_sr25519::Public; -} - -mod app_ed25519 { - use app_crypto::{app_crypto, key_types::AURA, ed25519}; - app_crypto!(ed25519, AURA); + pub type AuthorityId = app_sr25519::Public; } pub mod ed25519 { + mod app_ed25519 { + use app_crypto::{app_crypto, key_types::AURA, ed25519}; + app_crypto!(ed25519, AURA); + } + /// An Aura authority keypair using Ed25519 as its crypto. #[cfg(feature = "std")] - pub type AuthorityPair = super::app_ed25519::Pair; + pub type AuthorityPair = app_ed25519::Pair; /// An Aura authority signature using Ed25519 as its crypto. - pub type AuthoritySignature = super::app_ed25519::Signature; + pub type AuthoritySignature = app_ed25519::Signature; /// An Aura authority identifier using Ed25519 as its crypto. - pub type AuthorityId = super::app_ed25519::Public; + pub type AuthorityId = app_ed25519::Public; } /// The `ConsensusEngineId` of AuRa. diff --git a/core/executor/src/native_executor.rs b/core/executor/src/native_executor.rs index 937e0033539f0..1ebf7f31464a7 100644 --- a/core/executor/src/native_executor.rs +++ b/core/executor/src/native_executor.rs @@ -49,9 +49,6 @@ pub fn with_native_environment(ext: &mut dyn Externalities, /// Delegate for dispatching a CodeExecutor call to native code. pub trait NativeExecutionDispatch: Send + Sync { - /// Get the wasm code that the native dispatch will be equivalent to. - fn native_equivalent() -> &'static [u8]; - /// Dispatch a method and input data to be executed natively. fn dispatch(ext: &mut dyn Externalities, method: &str, data: &[u8]) -> Result>; @@ -211,19 +208,13 @@ impl CodeExecutor for NativeExecutor { + ( $pub:vis $name:ident, $dispatcher:path, $version:path $(,)?) => { /// A unit struct which implements `NativeExecutionDispatch` feeding in the hard-coded runtime. $pub struct $name; - $crate::native_executor_instance!(IMPL $name, $dispatcher, $version, $code); + $crate::native_executor_instance!(IMPL $name, $dispatcher, $version); }; - (IMPL $name:ident, $dispatcher:path, $version:path, $code:expr) => { + (IMPL $name:ident, $dispatcher:path, $version:path) => { impl $crate::NativeExecutionDispatch for $name { - fn native_equivalent() -> &'static [u8] { - // WARNING!!! This assumes that the runtime was built *before* the main project. Until we - // get a proper build script, this must be strictly adhered to or things will go wrong. - $code - } - fn dispatch( ext: &mut $crate::Externalities<$crate::Blake2Hasher>, method: &str, diff --git a/core/finality-grandpa/Cargo.toml b/core/finality-grandpa/Cargo.toml index 22237c5a0b5a0..393ee45db5776 100644 --- a/core/finality-grandpa/Cargo.toml +++ b/core/finality-grandpa/Cargo.toml @@ -23,7 +23,6 @@ serde_json = "1.0" client = { package = "substrate-client", path = "../client" } inherents = { package = "substrate-inherents", path = "../../core/inherents" } network = { package = "substrate-network", path = "../network" } -service = { package = "substrate-service", path = "../service", optional = true } srml-finality-tracker = { path = "../../srml/finality-tracker" } fg_primitives = { package = "substrate-finality-grandpa-primitives", path = "primitives" } grandpa = { package = "finality-grandpa", version = "0.9.0", features = ["derive-codec"] } @@ -37,7 +36,3 @@ babe_primitives = { package = "substrate-consensus-babe-primitives", path = "../ env_logger = "0.6" tokio = "0.1.17" tempfile = "3.1" - -[features] -default = ["service-integration"] -service-integration = ["service"] diff --git a/core/finality-grandpa/primitives/src/lib.rs b/core/finality-grandpa/primitives/src/lib.rs index b92444e26295c..1f103a548d1a6 100644 --- a/core/finality-grandpa/primitives/src/lib.rs +++ b/core/finality-grandpa/primitives/src/lib.rs @@ -52,6 +52,12 @@ pub type AuthorityWeight = u64; /// The index of an authority. pub type AuthorityIndex = u64; +/// The identifier of a GRANDPA set. +pub type SetId = u64; + +/// The round indicator. +pub type RoundNumber = u64; + /// A scheduled change of authority set. #[cfg_attr(feature = "std", derive(Debug, Serialize))] #[derive(Clone, Eq, PartialEq, Encode, Decode)] diff --git a/core/finality-grandpa/src/aux_schema.rs b/core/finality-grandpa/src/aux_schema.rs index 78c1741d51903..599604c1d32bf 100644 --- a/core/finality-grandpa/src/aux_schema.rs +++ b/core/finality-grandpa/src/aux_schema.rs @@ -26,7 +26,7 @@ use grandpa::round::State as RoundState; use sr_primitives::traits::{Block as BlockT, NumberFor}; use log::{info, warn}; use substrate_telemetry::{telemetry, CONSENSUS_INFO}; -use fg_primitives::AuthorityId; +use fg_primitives::{AuthorityId, AuthorityWeight, SetId, RoundNumber}; use crate::authorities::{AuthoritySet, SharedAuthoritySet, PendingChange, DelayKind}; use crate::consensus_changes::{SharedConsensusChanges, ConsensusChanges}; @@ -47,16 +47,16 @@ const CURRENT_VERSION: u32 = 2; #[cfg_attr(test, derive(PartialEq))] pub enum V1VoterSetState { /// The voter set state, currently paused. - Paused(u64, RoundState), + Paused(RoundNumber, RoundState), /// The voter set state, currently live. - Live(u64, RoundState), + Live(RoundNumber, RoundState), } -type V0VoterSetState = (u64, RoundState); +type V0VoterSetState = (RoundNumber, RoundState); #[derive(Debug, Clone, Encode, Decode, PartialEq)] struct V0PendingChange { - next_authorities: Vec<(AuthorityId, u64)>, + next_authorities: Vec<(AuthorityId, AuthorityWeight)>, delay: N, canon_height: N, canon_hash: H, @@ -64,8 +64,8 @@ struct V0PendingChange { #[derive(Debug, Clone, Encode, Decode, PartialEq)] struct V0AuthoritySet { - current_authorities: Vec<(AuthorityId, u64)>, - set_id: u64, + current_authorities: Vec<(AuthorityId, AuthorityWeight)>, + set_id: SetId, pending_changes: Vec>, } @@ -267,7 +267,7 @@ pub(crate) fn load_persistent( -> ClientResult> where B: AuxStore, - G: FnOnce() -> ClientResult>, + G: FnOnce() -> ClientResult>, { let version: Option = load_decode(backend, VERSION_KEY)?; let consensus_changes = load_decode(backend, CONSENSUS_CHANGES_KEY)? @@ -448,7 +448,7 @@ mod test { let authorities = vec![(AuthorityId::default(), 100)]; let set_id = 3; - let round_number: u64 = 42; + let round_number: RoundNumber = 42; let round_state = RoundState:: { prevote_ghost: Some((H256::random(), 32)), finalized: None, @@ -536,7 +536,7 @@ mod test { let authorities = vec![(AuthorityId::default(), 100)]; let set_id = 3; - let round_number: u64 = 42; + let round_number: RoundNumber = 42; let round_state = RoundState:: { prevote_ghost: Some((H256::random(), 32)), finalized: None, diff --git a/core/finality-grandpa/src/communication/gossip.rs b/core/finality-grandpa/src/communication/gossip.rs index 5fd09620e925b..831be7ad14531 100644 --- a/core/finality-grandpa/src/communication/gossip.rs +++ b/core/finality-grandpa/src/communication/gossip.rs @@ -386,12 +386,14 @@ impl Misbehavior { struct PeerInfo { view: View, + roles: Roles, } impl PeerInfo { - fn new() -> Self { + fn new(roles: Roles) -> Self { PeerInfo { view: View::default(), + roles, } } } @@ -408,8 +410,8 @@ impl Default for Peers { } impl Peers { - fn new_peer(&mut self, who: PeerId) { - self.inner.insert(who, PeerInfo::new()); + fn new_peer(&mut self, who: PeerId, roles: Roles) { + self.inner.insert(who, PeerInfo::new(roles)); } fn peer_disconnected(&mut self, who: &PeerId) { @@ -503,12 +505,13 @@ struct Inner { config: crate::Config, next_rebroadcast: Instant, pending_catch_up: PendingCatchUp, + catch_up_enabled: bool, } type MaybeMessage = Option<(Vec, NeighborPacket>)>; impl Inner { - fn new(config: crate::Config) -> Self { + fn new(config: crate::Config, catch_up_enabled: bool) -> Self { Inner { local_view: None, peers: Peers::default(), @@ -516,6 +519,7 @@ impl Inner { next_rebroadcast: Instant::now() + REBROADCAST_AFTER, authorities: Vec::new(), pending_catch_up: PendingCatchUp::None, + catch_up_enabled, config, } } @@ -804,14 +808,21 @@ impl Inner { } fn try_catch_up(&mut self, who: &PeerId) -> (Option>, Option) { + if !self.catch_up_enabled { + return (None, None); + } + let mut catch_up = None; let mut report = None; // if the peer is on the same set and ahead of us by a margin bigger // than `CATCH_UP_THRESHOLD` then we should ask it for a catch up - // message. + // message. we only send catch-up requests to authorities, observers + // won't be able to reply since they don't follow the full GRANDPA + // protocol and therefore might not have the vote data available. if let (Some(peer), Some(local_view)) = (self.peers.peer(who), &self.local_view) { - if peer.view.set_id == local_view.set_id && + if peer.roles.is_authority() && + peer.view.set_id == local_view.set_id && peer.view.round.0.saturating_sub(CATCH_UP_THRESHOLD) > local_view.round.0 { // send catch up request if allowed @@ -917,13 +928,17 @@ pub(super) struct GossipValidator { } impl GossipValidator { - /// Create a new gossip-validator. This initialized the current set to 0. - pub(super) fn new(config: crate::Config, set_state: environment::SharedVoterSetState) - -> (GossipValidator, ReportStream) - { + /// Create a new gossip-validator. The current set is initialized to 0. If + /// `catch_up_enabled` is set to false then the validator will not issue any + /// catch up requests (useful e.g. when running just the GRANDPA observer). + pub(super) fn new( + config: crate::Config, + set_state: environment::SharedVoterSetState, + catch_up_enabled: bool, + ) -> (GossipValidator, ReportStream) { let (tx, rx) = mpsc::unbounded(); let val = GossipValidator { - inner: parking_lot::RwLock::new(Inner::new(config)), + inner: parking_lot::RwLock::new(Inner::new(config, catch_up_enabled)), set_state, report_sender: tx, }; @@ -1023,10 +1038,10 @@ impl GossipValidator { } impl network_gossip::Validator for GossipValidator { - fn new_peer(&self, context: &mut dyn ValidatorContext, who: &PeerId, _roles: Roles) { + fn new_peer(&self, context: &mut dyn ValidatorContext, who: &PeerId, roles: Roles) { let packet = { let mut inner = self.inner.write(); - inner.peers.new_peer(who.clone()); + inner.peers.new_peer(who.clone(), roles); inner.local_view.as_ref().map(|v| { NeighborPacket { @@ -1314,7 +1329,7 @@ mod tests { assert!(res.unwrap().is_none()); // connect & disconnect. - peers.new_peer(id.clone()); + peers.new_peer(id.clone(), Roles::AUTHORITY); peers.peer_disconnected(&id); let res = peers.update_peer_state(&id, update.clone()); @@ -1350,7 +1365,7 @@ mod tests { let mut peers = Peers::default(); let id = PeerId::random(); - peers.new_peer(id.clone()); + peers.new_peer(id.clone(), Roles::AUTHORITY); let mut check_update = move |update: NeighborPacket<_>| { let view = peers.update_peer_state(&id, update.clone()).unwrap().unwrap(); @@ -1370,7 +1385,7 @@ mod tests { let mut peers = Peers::default(); let id = PeerId::random(); - peers.new_peer(id.clone()); + peers.new_peer(id.clone(), Roles::AUTHORITY); peers.update_peer_state(&id, NeighborPacket { round: Round(10), @@ -1408,6 +1423,7 @@ mod tests { let (val, _) = GossipValidator::::new( config(), voter_set_state(), + true, ); let set_id = 1; @@ -1443,6 +1459,7 @@ mod tests { let (val, _) = GossipValidator::::new( config(), voter_set_state(), + true, ); let set_id = 1; let auth = AuthorityId::from_slice(&[1u8; 32]); @@ -1487,6 +1504,7 @@ mod tests { let (val, _) = GossipValidator::::new( config(), voter_set_state(), + true, ); let set_id = 1; @@ -1555,6 +1573,7 @@ mod tests { let (val, _) = GossipValidator::::new( config(), set_state.clone(), + true, ); let set_id = 1; @@ -1567,7 +1586,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded let mut inner = val.inner.write(); - inner.peers.new_peer(peer.clone()); + inner.peers.new_peer(peer.clone(), Roles::AUTHORITY); let res = inner.handle_catch_up_request( &peer, @@ -1609,6 +1628,7 @@ mod tests { let (val, _) = GossipValidator::::new( config(), set_state.clone(), + true, ); // the validator starts at set id 2 @@ -1617,7 +1637,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer.clone()); + val.inner.write().peers.new_peer(peer.clone(), Roles::AUTHORITY); let send_request = |set_id, round| { let mut inner = val.inner.write(); @@ -1682,4 +1702,151 @@ mod tests { false, ); } + + #[test] + fn issues_catch_up_request_on_neighbor_packet_import() { + let (val, _) = GossipValidator::::new( + config(), + voter_set_state(), + true, + ); + + // the validator starts at set id 1. + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // add the peer making the request to the validator, + // otherwise it is discarded. + let peer = PeerId::random(); + val.inner.write().peers.new_peer(peer.clone(), Roles::AUTHORITY); + + let import_neighbor_message = |set_id, round| { + let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( + &peer, + NeighborPacket { + round: Round(round), + set_id: SetId(set_id), + commit_finalized_height: 42, + }, + ); + + catch_up_request + }; + + // importing a neighbor message from a peer in the same set in a later + // round should lead to a catch up request for the previous round. + match import_neighbor_message(1, 42) { + Some(GossipMessage::CatchUpRequest(request)) => { + assert_eq!(request.set_id, SetId(1)); + assert_eq!(request.round, Round(41)); + }, + _ => panic!("expected catch up message"), + } + + // we note that we're at round 41. + val.note_round(Round(41), |_, _| {}); + + // if we import a neighbor message within CATCH_UP_THRESHOLD then we + // won't request a catch up. + match import_neighbor_message(1, 42) { + None => {}, + _ => panic!("expected no catch up message"), + } + + // or if the peer is on a lower round. + match import_neighbor_message(1, 40) { + None => {}, + _ => panic!("expected no catch up message"), + } + + // we also don't request a catch up if the peer is in a different set. + match import_neighbor_message(2, 42) { + None => {}, + _ => panic!("expected no catch up message"), + } + } + + #[test] + fn doesnt_send_catch_up_requests_when_disabled() { + // we create a gossip validator with catch up requests disabled. + let (val, _) = GossipValidator::::new( + config(), + voter_set_state(), + false, + ); + + // the validator starts at set id 1. + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // add the peer making the request to the validator, + // otherwise it is discarded. + let peer = PeerId::random(); + val.inner.write().peers.new_peer(peer.clone(), Roles::AUTHORITY); + + // importing a neighbor message from a peer in the same set in a later + // round should lead to a catch up request but since they're disabled + // we should get `None`. + let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( + &peer, + NeighborPacket { + round: Round(42), + set_id: SetId(1), + commit_finalized_height: 50, + }, + ); + + match catch_up_request { + None => {}, + _ => panic!("expected no catch up message"), + } + } + + #[test] + fn doesnt_send_catch_up_requests_to_non_authorities() { + let (val, _) = GossipValidator::::new( + config(), + voter_set_state(), + true, + ); + + // the validator starts at set id 1. + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // add the peers making the requests to the validator, + // otherwise it is discarded. + let peer_authority = PeerId::random(); + let peer_full = PeerId::random(); + + val.inner.write().peers.new_peer(peer_authority.clone(), Roles::AUTHORITY); + val.inner.write().peers.new_peer(peer_full.clone(), Roles::FULL); + + let import_neighbor_message = |peer| { + let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( + &peer, + NeighborPacket { + round: Round(42), + set_id: SetId(1), + commit_finalized_height: 50, + }, + ); + + catch_up_request + }; + + // importing a neighbor message from a peer in the same set in a later + // round should lead to a catch up request but since the node is not an + // authority we should get `None`. + if import_neighbor_message(peer_full).is_some() { + panic!("expected no catch up message"); + } + + // importing the same neighbor message from a peer who is an authority + // should lead to a catch up request. + match import_neighbor_message(peer_authority) { + Some(GossipMessage::CatchUpRequest(request)) => { + assert_eq!(request.set_id, SetId(1)); + assert_eq!(request.round, Round(41)); + }, + _ => panic!("expected catch up message"), + } + } } diff --git a/core/finality-grandpa/src/communication/mod.rs b/core/finality-grandpa/src/communication/mod.rs index 2aa2618535948..732c14c1a9ce3 100644 --- a/core/finality-grandpa/src/communication/mod.rs +++ b/core/finality-grandpa/src/communication/mod.rs @@ -50,7 +50,9 @@ use crate::environment::HasVoted; use gossip::{ GossipMessage, FullCatchUpMessage, FullCommitMessage, VoteOrPrecommitMessage, GossipValidator }; -use fg_primitives::{AuthorityPair, AuthorityId, AuthoritySignature}; +use fg_primitives::{ + AuthorityPair, AuthorityId, AuthoritySignature, SetId as SetIdNumber, RoundNumber, +}; pub mod gossip; mod periodic; @@ -129,12 +131,12 @@ pub trait Network: Clone + Send + 'static { } /// Create a unique topic for a round and set-id combo. -pub(crate) fn round_topic(round: u64, set_id: u64) -> B::Hash { +pub(crate) fn round_topic(round: RoundNumber, set_id: SetIdNumber) -> B::Hash { <::Hashing as HashT>::hash(format!("{}-{}", set_id, round).as_bytes()) } /// Create a unique topic for global messages on a set ID. -pub(crate) fn global_topic(set_id: u64) -> B::Hash { +pub(crate) fn global_topic(set_id: SetIdNumber) -> B::Hash { <::Hashing as HashT>::hash(format!("{}-GLOBAL", set_id).as_bytes()) } @@ -236,19 +238,25 @@ pub(crate) struct NetworkBridge> { impl> NetworkBridge { /// Create a new NetworkBridge to the given NetworkService. Returns the service /// handle and a future that must be polled to completion to finish startup. - /// If a voter set state is given it registers previous round votes with the - /// gossip service. + /// On creation it will register previous rounds' votes with the gossip + /// service taken from the VoterSetState. pub(crate) fn new( service: N, config: crate::Config, set_state: crate::environment::SharedVoterSetState, on_exit: impl Future + Clone + Send + 'static, + catch_up_enabled: bool, ) -> ( Self, impl futures::Future + Send + 'static, ) { - let (validator, report_stream) = GossipValidator::new(config, set_state.clone()); + let (validator, report_stream) = GossipValidator::new( + config, + set_state.clone(), + catch_up_enabled, + ); + let validator = Arc::new(validator); service.register_validator(validator.clone()); @@ -612,25 +620,25 @@ impl> Clone for NetworkBridge { } } -fn localized_payload(round: u64, set_id: u64, message: &E) -> Vec { +fn localized_payload(round: RoundNumber, set_id: SetIdNumber, message: &E) -> Vec { (message, round, set_id).encode() } -/// Type-safe wrapper around u64 when indicating that it's a round number. +/// Type-safe wrapper around a round number. #[derive(Debug, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Encode, Decode)] -pub struct Round(pub u64); +pub struct Round(pub RoundNumber); -/// Type-safe wrapper around u64 when indicating that it's a set ID. +/// Type-safe wrapper around a set ID. #[derive(Debug, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Encode, Decode)] -pub struct SetId(pub u64); +pub struct SetId(pub SetIdNumber); // check a message. pub(crate) fn check_message_sig( message: &Message, id: &AuthorityId, signature: &AuthoritySignature, - round: u64, - set_id: u64, + round: RoundNumber, + set_id: SetIdNumber, ) -> Result<(), ()> { let as_public = id.clone(); let encoded_raw = localized_payload(round, set_id, message); @@ -650,8 +658,8 @@ pub(crate) fn check_message_sig( /// `ed25519` and `BLS` signatures (which we might use in the future), care must /// be taken when switching to different key types. struct OutgoingMessages> { - round: u64, - set_id: u64, + round: RoundNumber, + set_id: SetIdNumber, locals: Option<(AuthorityPair, AuthorityId)>, sender: mpsc::UnboundedSender>, network: N, @@ -845,8 +853,8 @@ fn check_catch_up( fn check_signatures<'a, B, I>( messages: I, - round: u64, - set_id: u64, + round: RoundNumber, + set_id: SetIdNumber, mut signatures_checked: usize, ) -> Result where B: BlockT, @@ -913,7 +921,7 @@ impl> CommitsOut { /// Create a new commit output stream. pub(crate) fn new( network: N, - set_id: u64, + set_id: SetIdNumber, is_voter: bool, gossip_validator: Arc>, ) -> Self { @@ -927,10 +935,10 @@ impl> CommitsOut { } impl> Sink for CommitsOut { - type SinkItem = (u64, Commit); + type SinkItem = (RoundNumber, Commit); type SinkError = Error; - fn start_send(&mut self, input: (u64, Commit)) -> StartSend { + fn start_send(&mut self, input: (RoundNumber, Commit)) -> StartSend { if !self.is_voter { return Ok(AsyncSink::Ready); } diff --git a/core/finality-grandpa/src/communication/tests.rs b/core/finality-grandpa/src/communication/tests.rs index de5a084039268..b537c29dcab9c 100644 --- a/core/finality-grandpa/src/communication/tests.rs +++ b/core/finality-grandpa/src/communication/tests.rs @@ -182,6 +182,7 @@ fn make_test_network() -> ( config(), voter_set_state(), Exit, + true, ); ( @@ -447,8 +448,8 @@ fn peer_with_higher_view_leads_to_catch_up_request() { let (tester, mut net) = make_test_network(); let test = tester .and_then(move |tester| { - // register a peer. - tester.gossip_validator.new_peer(&mut NoopContext, &id, network::config::Roles::FULL); + // register a peer with authority role. + tester.gossip_validator.new_peer(&mut NoopContext, &id, network::config::Roles::AUTHORITY); Ok((tester, id)) }) .and_then(move |(tester, id)| { diff --git a/core/finality-grandpa/src/environment.rs b/core/finality-grandpa/src/environment.rs index b20e8ab5dfcbb..c0474cb0368c8 100644 --- a/core/finality-grandpa/src/environment.rs +++ b/core/finality-grandpa/src/environment.rs @@ -51,7 +51,7 @@ use crate::authorities::{AuthoritySet, SharedAuthoritySet}; use crate::consensus_changes::SharedConsensusChanges; use crate::justification::GrandpaJustification; use crate::until_imported::UntilVoteTargetImported; -use fg_primitives::{AuthorityId, AuthoritySignature}; +use fg_primitives::{AuthorityId, AuthoritySignature, SetId, RoundNumber}; type HistoricalVotes = grandpa::HistoricalVotes< ::Hash, @@ -65,7 +65,7 @@ type HistoricalVotes = grandpa::HistoricalVotes< #[derive(Debug, Clone, Decode, Encode, PartialEq)] pub struct CompletedRound { /// The round number. - pub number: u64, + pub number: RoundNumber, /// The round state (prevote ghost, estimate, finalized, etc.) pub state: RoundState>, /// The target block base used for voting in the round. @@ -80,7 +80,7 @@ pub struct CompletedRound { #[derive(Debug, Clone, PartialEq)] pub struct CompletedRounds { rounds: Vec>, - set_id: u64, + set_id: SetId, voters: Vec, } @@ -100,7 +100,7 @@ impl codec::EncodeLike for CompletedRounds {} impl Decode for CompletedRounds { fn decode(value: &mut I) -> Result { - <(Vec>, u64, Vec)>::decode(value) + <(Vec>, SetId, Vec)>::decode(value) .map(|(rounds, set_id, voters)| CompletedRounds { rounds: rounds.into(), set_id, @@ -113,7 +113,7 @@ impl CompletedRounds { /// Create a new completed rounds tracker with NUM_LAST_COMPLETED_ROUNDS capacity. pub(crate) fn new( genesis: CompletedRound, - set_id: u64, + set_id: SetId, voters: &AuthoritySet>, ) -> CompletedRounds @@ -126,7 +126,7 @@ impl CompletedRounds { } /// Get the set-id and voter set of the completed rounds. - pub fn set_info(&self) -> (u64, &[AuthorityId]) { + pub fn set_info(&self) -> (SetId, &[AuthorityId]) { (self.set_id, &self.voters[..]) } @@ -162,7 +162,7 @@ impl CompletedRounds { /// A map with voter status information for currently live rounds, /// which votes have we cast and what are they. -pub type CurrentRounds = BTreeMap>; +pub type CurrentRounds = BTreeMap>; /// The state of the current voter set, whether it is currently active or not /// and information related to the previously completed rounds. Current round @@ -190,7 +190,7 @@ impl VoterSetState { /// the given genesis state and the given authorities. Round 1 is added as a /// current round (with state `HasVoted::No`). pub(crate) fn live( - set_id: u64, + set_id: SetId, authority_set: &AuthoritySet>, genesis_state: (Block::Hash, NumberFor), ) -> VoterSetState { @@ -237,7 +237,7 @@ impl VoterSetState { /// Returns the voter set state validating that it includes the given round /// in current rounds and that the voter isn't paused. - pub fn with_current_round(&self, round: u64) + pub fn with_current_round(&self, round: RoundNumber) -> Result<(&CompletedRounds, &CurrentRounds), Error> { if let VoterSetState::Live { completed_rounds, current_rounds } = self { @@ -344,7 +344,7 @@ impl SharedVoterSetState { } /// Return vote status information for the current round. - pub(crate) fn has_voted(&self, round: u64) -> HasVoted { + pub(crate) fn has_voted(&self, round: RoundNumber) -> HasVoted { match &*self.inner.read() { VoterSetState::Live { current_rounds, .. } => { current_rounds.get(&round).and_then(|has_voted| match has_voted { @@ -375,7 +375,7 @@ pub(crate) struct Environment, RA, SC> { pub(crate) authority_set: SharedAuthoritySet>, pub(crate) consensus_changes: SharedConsensusChanges>, pub(crate) network: crate::communication::NetworkBridge, - pub(crate) set_id: u64, + pub(crate) set_id: SetId, pub(crate) voter_set_state: SharedVoterSetState, } @@ -554,7 +554,7 @@ where fn round_data( &self, - round: u64 + round: RoundNumber, ) -> voter::RoundData { let now = Instant::now(); let prevote_timer = Delay::new(now + self.config.gossip_duration * 2); @@ -601,7 +601,7 @@ where } } - fn proposed(&self, round: u64, propose: PrimaryPropose) -> Result<(), Self::Error> { + fn proposed(&self, round: RoundNumber, propose: PrimaryPropose) -> Result<(), Self::Error> { let local_id = crate::is_voter(&self.voters, &self.config.keystore); let local_id = match local_id { @@ -641,7 +641,7 @@ where Ok(()) } - fn prevoted(&self, round: u64, prevote: Prevote) -> Result<(), Self::Error> { + fn prevoted(&self, round: RoundNumber, prevote: Prevote) -> Result<(), Self::Error> { let local_id = crate::is_voter(&self.voters, &self.config.keystore); let local_id = match local_id { @@ -683,7 +683,7 @@ where Ok(()) } - fn precommitted(&self, round: u64, precommit: Precommit) -> Result<(), Self::Error> { + fn precommitted(&self, round: RoundNumber, precommit: Precommit) -> Result<(), Self::Error> { let local_id = crate::is_voter(&self.voters, &self.config.keystore); let local_id = match local_id { @@ -737,7 +737,7 @@ where fn completed( &self, - round: u64, + round: RoundNumber, state: RoundState>, base: (Block::Hash, NumberFor), historical_votes: &HistoricalVotes, @@ -794,7 +794,13 @@ where Ok(()) } - fn finalize_block(&self, hash: Block::Hash, number: NumberFor, round: u64, commit: Commit) -> Result<(), Self::Error> { + fn finalize_block( + &self, + hash: Block::Hash, + number: NumberFor, + round: RoundNumber, + commit: Commit, + ) -> Result<(), Self::Error> { finalize_block( &*self.inner, &self.authority_set, @@ -818,7 +824,7 @@ where fn prevote_equivocation( &self, - _round: u64, + _round: RoundNumber, equivocation: ::grandpa::Equivocation, Self::Signature> ) { warn!(target: "afg", "Detected prevote equivocation in the finality worker: {:?}", equivocation); @@ -827,7 +833,7 @@ where fn precommit_equivocation( &self, - _round: u64, + _round: RoundNumber, equivocation: Equivocation, Self::Signature> ) { warn!(target: "afg", "Detected precommit equivocation in the finality worker: {:?}", equivocation); @@ -837,11 +843,11 @@ where pub(crate) enum JustificationOrCommit { Justification(GrandpaJustification), - Commit((u64, Commit)), + Commit((RoundNumber, Commit)), } -impl From<(u64, Commit)> for JustificationOrCommit { - fn from(commit: (u64, Commit)) -> JustificationOrCommit { +impl From<(RoundNumber, Commit)> for JustificationOrCommit { + fn from(commit: (RoundNumber, Commit)) -> JustificationOrCommit { JustificationOrCommit::Commit(commit) } } diff --git a/core/finality-grandpa/src/lib.rs b/core/finality-grandpa/src/lib.rs index 428fa1aeb8232..d6f4d768472a3 100644 --- a/core/finality-grandpa/src/lib.rs +++ b/core/finality-grandpa/src/lib.rs @@ -93,10 +93,6 @@ mod light_import; mod observer; mod until_imported; -#[cfg(feature="service-integration")] -mod service_integration; -#[cfg(feature="service-integration")] -pub use service_integration::{LinkHalfForService, BlockImportForService, BlockImportForLightService}; pub use communication::Network; pub use finality_proof::FinalityProofProvider; pub use light_import::light_block_import; @@ -107,8 +103,7 @@ use environment::{Environment, VoterSetState}; use import::GrandpaBlockImport; use until_imported::UntilGlobalMessageBlocksImported; use communication::NetworkBridge; -use service::TelemetryOnConnect; -use fg_primitives::AuthoritySignature; +use fg_primitives::{AuthoritySignature, SetId, AuthorityWeight}; // Re-export these two because it's just so damn convenient. pub use fg_primitives::{AuthorityId, ScheduledChange}; @@ -267,8 +262,8 @@ impl, RA> BlockStatus for Arc { pub(crate) canon_number: N, pub(crate) canon_hash: H, - pub(crate) set_id: u64, - pub(crate) authorities: Vec<(AuthorityId, u64)>, + pub(crate) set_id: SetId, + pub(crate) authorities: Vec<(AuthorityId, AuthorityWeight)>, } /// Commands issued to the voter. @@ -399,7 +394,7 @@ where } fn global_communication, B, E, N, RA>( - set_id: u64, + set_id: SetId, voters: &Arc>, client: &Arc>, network: &NetworkBridge, @@ -484,7 +479,7 @@ pub struct GrandpaParams, N, RA, SC, X> { /// Handle to a future that will resolve on exit. pub on_exit: X, /// If supplied, can be used to hook on telemetry connection established events. - pub telemetry_on_connect: Option, + pub telemetry_on_connect: Option>, } /// Run a GRANDPA voter as a task. Provide configuration and a link to a @@ -524,13 +519,14 @@ pub fn run_grandpa_voter, N, RA, SC, X>( config.clone(), persistent_data.set_state.clone(), on_exit.clone(), + true, ); register_finality_tracker_inherent_data_provider(client.clone(), &inherent_data_providers)?; let telemetry_task = if let Some(telemetry_on_connect) = telemetry_on_connect { let authorities = persistent_data.authority_set.clone(); - let events = telemetry_on_connect.telemetry_connection_sinks + let events = telemetry_on_connect .for_each(move |_| { telemetry!(CONSENSUS_INFO; "afg.authority_set"; "authority_set_id" => ?authorities.set_id(), diff --git a/core/finality-grandpa/src/light_import.rs b/core/finality-grandpa/src/light_import.rs index dbdabe96294b1..4d5381f1cc4b2 100644 --- a/core/finality-grandpa/src/light_import.rs +++ b/core/finality-grandpa/src/light_import.rs @@ -36,7 +36,7 @@ use sr_primitives::Justification; use sr_primitives::traits::{ NumberFor, Block as BlockT, Header as HeaderT, ProvideRuntimeApi, DigestFor, }; -use fg_primitives::{GrandpaApi, AuthorityId}; +use fg_primitives::{self, GrandpaApi, AuthorityId}; use sr_primitives::generic::BlockId; use primitives::{H256, Blake2Hasher}; @@ -192,7 +192,7 @@ impl LightAuthoritySet { /// Get a genesis set with given authorities. pub fn genesis(initial: Vec<(AuthorityId, u64)>) -> Self { LightAuthoritySet { - set_id: 0, + set_id: fg_primitives::SetId::default(), authorities: initial, } } diff --git a/core/finality-grandpa/src/observer.rs b/core/finality-grandpa/src/observer.rs index bce292262e028..8a2d539f49694 100644 --- a/core/finality-grandpa/src/observer.rs +++ b/core/finality-grandpa/src/observer.rs @@ -175,8 +175,10 @@ pub fn run_grandpa_observer, N, RA, SC>( network, config.clone(), persistent_data.set_state.clone(), - on_exit.clone() + on_exit.clone(), + false, ); + let observer_work = ObserverWork::new( client, network, diff --git a/core/finality-grandpa/src/service_integration.rs b/core/finality-grandpa/src/service_integration.rs deleted file mode 100644 index 9f19b9204190b..0000000000000 --- a/core/finality-grandpa/src/service_integration.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2018-2019 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -/// Integrate grandpa finality with substrate service - -use client; -use service::{FullBackend, FullExecutor, LightBackend, LightExecutor, ServiceFactory}; - -pub type BlockImportForService = crate::GrandpaBlockImport< - FullBackend, - FullExecutor, - ::Block, - ::RuntimeApi, - client::Client< - FullBackend, - FullExecutor, - ::Block, - ::RuntimeApi - >, - ::SelectChain, ->; - -pub type LinkHalfForService = crate::LinkHalf< - FullBackend, - FullExecutor, - ::Block, - ::RuntimeApi, - ::SelectChain ->; - -pub type BlockImportForLightService = crate::light_import::GrandpaLightBlockImport< - LightBackend, - LightExecutor, - ::Block, - ::RuntimeApi, ->; diff --git a/core/finality-grandpa/src/tests.rs b/core/finality-grandpa/src/tests.rs index f634fd114ae3d..c82982e20d494 100644 --- a/core/finality-grandpa/src/tests.rs +++ b/core/finality-grandpa/src/tests.rs @@ -1215,6 +1215,7 @@ fn voter_persists_its_votes() { config.clone(), set_state, Exit, + true, ); runtime.block_on(routing_work).unwrap(); diff --git a/core/offchain/Cargo.toml b/core/offchain/Cargo.toml index 4c8891eb6b14c..691fdea91cc48 100644 --- a/core/offchain/Cargo.toml +++ b/core/offchain/Cargo.toml @@ -7,13 +7,20 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] +bytes = "0.4" client = { package = "substrate-client", path = "../../core/client" } +fnv = "1.0" +futures01 = { package = "futures", version = "0.1" } futures-preview = "=0.3.0-alpha.17" +futures-timer = "0.2.1" +hyper = "0.12.33" +hyper-tls = "0.3.2" log = "0.4" offchain-primitives = { package = "substrate-offchain-primitives", path = "./primitives" } codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } parking_lot = "0.9.0" primitives = { package = "substrate-primitives", path = "../../core/primitives" } +rand = "0.7" sr-primitives = { path = "../../core/sr-primitives" } transaction_pool = { package = "substrate-transaction-pool", path = "../../core/transaction-pool" } network = { package = "substrate-network", path = "../../core/network" } @@ -23,6 +30,7 @@ keystore = { package = "substrate-keystore", path = "../keystore" } env_logger = "0.6" client-db = { package = "substrate-client-db", path = "../../core/client/db/", default-features = true } test-client = { package = "substrate-test-runtime-client", path = "../../core/test-runtime/client" } +tokio = "0.1" [features] default = [] diff --git a/core/offchain/src/api.rs b/core/offchain/src/api.rs index 225e7c3f725a4..0057dfd273b8b 100644 --- a/core/offchain/src/api.rs +++ b/core/offchain/src/api.rs @@ -17,13 +17,12 @@ use std::{ str::FromStr, sync::Arc, - convert::{TryFrom, TryInto}, - time::{SystemTime, Duration}, + convert::TryFrom, thread::sleep, }; use client::backend::OffchainStorage; -use futures::{StreamExt as _, Future, future, channel::mpsc}; +use futures::{StreamExt as _, Future, FutureExt as _, future, channel::mpsc}; use log::{info, debug, warn, error}; use network::{PeerId, Multiaddr, NetworkStateInfo}; use codec::{Encode, Decode}; @@ -34,6 +33,9 @@ use primitives::offchain::{ use sr_primitives::{generic::BlockId, traits::{self, Extrinsic}}; use transaction_pool::txpool::{Pool, ChainApi}; +mod http; +mod timestamp; + /// A message between the offchain extension and the processing thread. enum ExtMessage { SubmitExtrinsic(Vec), @@ -49,6 +51,8 @@ pub(crate) struct Api { _at: BlockId, /// Is this node a potential validator? is_validator: bool, + /// Everything HTTP-related is handled by a different struct. + http: http::HttpApi, } fn unavailable_yet(name: &str) -> R { @@ -89,29 +93,11 @@ where } fn timestamp(&mut self) -> Timestamp { - let now = SystemTime::now(); - let epoch_duration = now.duration_since(SystemTime::UNIX_EPOCH); - match epoch_duration { - Err(_) => { - // Current time is earlier than UNIX_EPOCH. - Timestamp::from_unix_millis(0) - }, - Ok(d) => { - let duration = d.as_millis(); - // Assuming overflow won't happen for a few hundred years. - Timestamp::from_unix_millis(duration.try_into() - .expect("epoch milliseconds won't overflow u64 for hundreds of years; qed")) - } - } + timestamp::now() } fn sleep_until(&mut self, deadline: Timestamp) { - // Get current timestamp. - let now = self.timestamp(); - // Calculate the diff with the deadline. - let diff = deadline.diff(&now); - // Call thread::sleep for the diff duration. - sleep(Duration::from_millis(diff.millis())); + sleep(timestamp::timestamp_from_now(deadline)); } fn random_seed(&mut self) -> [u8; 32] { @@ -149,58 +135,53 @@ where fn http_request_start( &mut self, - _method: &str, - _uri: &str, + method: &str, + uri: &str, _meta: &[u8] ) -> Result { - unavailable_yet::<()>("http_request_start"); - Err(()) + self.http.request_start(method, uri) } fn http_request_add_header( &mut self, - _request_id: HttpRequestId, - _name: &str, - _value: &str + request_id: HttpRequestId, + name: &str, + value: &str ) -> Result<(), ()> { - unavailable_yet::<()>("http_request_add_header"); - Err(()) + self.http.request_add_header(request_id, name, value) } fn http_request_write_body( &mut self, - _request_id: HttpRequestId, - _chunk: &[u8], - _deadline: Option + request_id: HttpRequestId, + chunk: &[u8], + deadline: Option ) -> Result<(), HttpError> { - unavailable_yet::<()>("http_request_write_body"); - Err(HttpError::IoError) + self.http.request_write_body(request_id, chunk, deadline) } fn http_response_wait( &mut self, ids: &[HttpRequestId], - _deadline: Option + deadline: Option ) -> Vec { - unavailable_yet::<()>("http_response_wait"); - ids.iter().map(|_| HttpRequestStatus::Unknown).collect() + self.http.response_wait(ids, deadline) } fn http_response_headers( &mut self, - _request_id: HttpRequestId + request_id: HttpRequestId ) -> Vec<(Vec, Vec)> { - unavailable_yet("http_response_headers") + self.http.response_headers(request_id) } fn http_response_read_body( &mut self, - _request_id: HttpRequestId, - _buffer: &mut [u8], - _deadline: Option + request_id: HttpRequestId, + buffer: &mut [u8], + deadline: Option ) -> Result { - unavailable_yet::<()>("http_response_read_body"); - Err(HttpError::IoError) + self.http.response_read_body(request_id, buffer, deadline) } } @@ -276,6 +257,8 @@ pub(crate) struct AsyncApi { receiver: Option>, transaction_pool: Arc>, at: BlockId, + /// Everything HTTP-related is handled by a different struct. + http: Option, } impl AsyncApi { @@ -289,18 +272,22 @@ impl AsyncApi { ) -> (Api, AsyncApi) { let (sender, rx) = mpsc::unbounded(); + let (http_api, http_worker) = http::http(); + let api = Api { sender, db, network_state, _at: at, is_validator, + http: http_api, }; let async_api = AsyncApi { receiver: Some(rx), transaction_pool, at, + http: Some(http_worker), }; (api, async_api) @@ -309,13 +296,17 @@ impl AsyncApi { /// Run a processing task for the API pub fn process(mut self) -> impl Future { let receiver = self.receiver.take().expect("Take invoked only once."); + let http = self.http.take().expect("Take invoked only once."); - receiver.for_each(move |msg| { + let extrinsics = receiver.for_each(move |msg| { match msg { ExtMessage::SubmitExtrinsic(ext) => self.submit_extrinsic(ext), } future::ready(()) - }) + }); + + future::join(extrinsics, http) + .map(|((), ())| ()) } fn submit_extrinsic(&mut self, ext: Vec) { @@ -340,7 +331,7 @@ impl AsyncApi { #[cfg(test)] mod tests { use super::*; - use std::convert::TryFrom; + use std::{convert::{TryFrom, TryInto}, time::SystemTime}; use sr_primitives::traits::Zero; use client_db::offchain::LocalStorage; use network::PeerId; diff --git a/core/offchain/src/api/http.rs b/core/offchain/src/api/http.rs new file mode 100644 index 0000000000000..9ea666862c3d9 --- /dev/null +++ b/core/offchain/src/api/http.rs @@ -0,0 +1,1004 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! This module is composed of two structs: [`HttpApi`] and [`HttpWorker`]. Calling the [`http`] +//! function returns a pair of [`HttpApi`] and [`HttpWorker`] that share some state. +//! +//! The [`HttpApi`] is (indirectly) passed to the runtime when calling an offchain worker, while +//! the [`HttpWorker`] must be processed in the background. The [`HttpApi`] mimicks the API of the +//! HTTP-related methods available to offchain workers. +//! +//! The reason for this design is driven by the fact that HTTP requests should continue running +//! (i.e.: the socket should continue being processed) in the background even if the runtime isn't +//! actively calling any function. + +use crate::api::timestamp; +use bytes::Buf as _; +use fnv::FnvHashMap; +use futures::{prelude::*, channel::mpsc, compat::Compat01As03}; +use log::{warn, error}; +use primitives::offchain::{HttpRequestId, Timestamp, HttpRequestStatus, HttpError}; +use std::{fmt, io::Read as _, mem, pin::Pin, task::Context, task::Poll}; + +/// Creates a pair of [`HttpApi`] and [`HttpWorker`]. +pub fn http() -> (HttpApi, HttpWorker) { + let (to_worker, from_api) = mpsc::unbounded(); + let (to_api, from_worker) = mpsc::unbounded(); + + let api = HttpApi { + to_worker, + from_worker: from_worker.fuse(), + // We start with a random ID for the first HTTP request, to prevent mischievous people from + // writing runtime code with hardcoded IDs. + next_id: HttpRequestId(rand::random::() % 2000), + requests: FnvHashMap::default(), + }; + + let engine = HttpWorker { + to_api, + from_api, + // TODO: don't unwrap; we should fall back to the HttpConnector if we fail to create the + // Https one; there doesn't seem to be any built-in way to do this + http_client: HyperClient::new(), + requests: Vec::new(), + }; + + (api, engine) +} + +/// Provides HTTP capabilities. +/// +/// Since this struct is a helper for offchain workers, its API is mimicking the API provided +/// to offchain workers. +pub struct HttpApi { + /// Used to sends messages to the worker. + to_worker: mpsc::UnboundedSender, + /// Used to receive messages from the worker. + /// We use a `Fuse` in order to have an extra protection against panicking. + from_worker: stream::Fuse>, + /// Id to assign to the next HTTP request that is started. + next_id: HttpRequestId, + /// List of HTTP requests in preparation or in progress. + requests: FnvHashMap, +} + +/// One active request within `HttpApi`. +enum HttpApiRequest { + /// The request object is being constructed locally and not started yet. + NotDispatched(hyper::Request, hyper::body::Sender), + /// The request has been dispatched and we're in the process of sending out the body (if the + /// field is `Some`) or waiting for a response (if the field is `None`). + Dispatched(Option), + /// Received a response. + Response(HttpApiRequestRp), + /// A request has been dispatched but the worker notified us of an error. We report this + /// failure to the user as an `IoError` and remove the request from the list as soon as + /// possible. + Fail(hyper::Error), +} + +/// A request within `HttpApi` that has received a response. +struct HttpApiRequestRp { + /// We might still be writing the request's body when the response comes. + /// This field allows to continue writing that body. + sending_body: Option, + /// Status code of the response. + status_code: hyper::StatusCode, + /// Headers of the response. + headers: hyper::HeaderMap, + /// Body of the response, as a channel of `Chunk` objects. + /// While the code is designed to drop the `Receiver` once it ends, we wrap it within a + /// `Fuse` in order to be extra precautious about panics. + /// Elements extracted from the channel are first put into `current_read_chunk`. + /// If the channel produces an error, then that is translated into an `IoError` and the request + /// is removed from the list. + body: stream::Fuse>>, + /// Chunk that has been extracted from the channel and that is currently being read. + /// Reading data from the response should read from this field in priority. + current_read_chunk: Option>, +} + +impl HttpApi { + /// Mimicks the corresponding method in the offchain API. + pub fn request_start( + &mut self, + method: &str, + uri: &str + ) -> Result { + // Start by building the prototype of the request. + // We do this first so that we don't touch anything in `self` if building the prototype + // fails. + let (body_sender, body) = hyper::Body::channel(); + let mut request = hyper::Request::new(body); + *request.method_mut() = hyper::Method::from_bytes(method.as_bytes()).map_err(|_| ())?; + *request.uri_mut() = hyper::Uri::from_shared(From::from(uri)).map_err(|_| ())?; + + let new_id = self.next_id; + debug_assert!(!self.requests.contains_key(&new_id)); + match self.next_id.0.checked_add(1) { + Some(new_id) => self.next_id.0 = new_id, + None => { + error!("Overflow in offchain worker HTTP request ID assignment"); + return Err(()); + } + }; + self.requests.insert(new_id, HttpApiRequest::NotDispatched(request, body_sender)); + + Ok(new_id) + } + + /// Mimicks the corresponding method in the offchain API. + pub fn request_add_header( + &mut self, + request_id: HttpRequestId, + name: &str, + value: &str + ) -> Result<(), ()> { + let request = match self.requests.get_mut(&request_id) { + Some(&mut HttpApiRequest::NotDispatched(ref mut rq, _)) => rq, + _ => return Err(()) + }; + + let name = hyper::header::HeaderName::from_bytes(name.as_bytes()).map_err(|_| ())?; + let value = hyper::header::HeaderValue::from_str(value).map_err(|_| ())?; + // Note that we're always appending headers and never replacing old values. + // We assume here that the user knows what they're doing. + request.headers_mut().append(name, value); + Ok(()) + } + + /// Mimicks the corresponding method in the offchain API. + pub fn request_write_body( + &mut self, + request_id: HttpRequestId, + chunk: &[u8], + deadline: Option + ) -> Result<(), HttpError> { + // Extract the request from the list. + // Don't forget to add it back if necessary when returning. + let mut request = match self.requests.remove(&request_id) { + None => return Err(HttpError::Invalid), + Some(r) => r, + }; + + let mut deadline = timestamp::deadline_to_future(deadline); + // Closure that writes data to a sender, taking the deadline into account. Can return `Ok` + // (if the body has been written), or `DeadlineReached`, or `IoError`. + // If `IoError` is returned, don't forget to remove the request from the list. + let mut poll_sender = move |sender: &mut hyper::body::Sender| -> Result<(), HttpError> { + let mut when_ready = future::maybe_done(Compat01As03::new( + futures01::future::poll_fn(|| sender.poll_ready()) + )); + futures::executor::block_on(future::select(&mut when_ready, &mut deadline)); + match when_ready { + future::MaybeDone::Done(Ok(())) => {} + future::MaybeDone::Done(Err(_)) => return Err(HttpError::IoError), + future::MaybeDone::Future(_) | + future::MaybeDone::Gone => { + debug_assert!(if let future::MaybeDone::Done(_) = deadline { true } else { false }); + return Err(HttpError::DeadlineReached) + } + }; + + match sender.send_data(hyper::Chunk::from(chunk.to_owned())) { + Ok(()) => Ok(()), + Err(_chunk) => { + error!("HTTP sender refused data despite being ready"); + Err(HttpError::IoError) + }, + } + }; + + loop { + request = match request { + HttpApiRequest::NotDispatched(request, sender) => { + // If the request is not dispatched yet, dispatch it and loop again. + let _ = self.to_worker.unbounded_send(ApiToWorker::Dispatch { + id: request_id, + request + }); + HttpApiRequest::Dispatched(Some(sender)) + } + + HttpApiRequest::Dispatched(Some(mut sender)) => + if !chunk.is_empty() { + match poll_sender(&mut sender) { + Err(HttpError::IoError) => return Err(HttpError::IoError), + other => { + self.requests.insert( + request_id, + HttpApiRequest::Dispatched(Some(sender)) + ); + return other + } + } + } else { + // Writing an empty body is a hint that we should stop writing. Dropping + // the sender. + self.requests.insert(request_id, HttpApiRequest::Dispatched(None)); + return Ok(()) + } + + HttpApiRequest::Response(mut response @ HttpApiRequestRp { sending_body: Some(_), .. }) => + if !chunk.is_empty() { + match poll_sender(response.sending_body.as_mut() + .expect("Can only enter this match branch if Some; qed")) { + Err(HttpError::IoError) => return Err(HttpError::IoError), + other => { + self.requests.insert(request_id, HttpApiRequest::Response(response)); + return other + } + } + + } else { + // Writing an empty body is a hint that we should stop writing. Dropping + // the sender. + self.requests.insert(request_id, HttpApiRequest::Response(HttpApiRequestRp { + sending_body: None, + ..response + })); + return Ok(()) + } + + HttpApiRequest::Fail(_) => + // If the request has already failed, return without putting back the request + // in the list. + return Err(HttpError::IoError), + + v @ HttpApiRequest::Dispatched(None) | + v @ HttpApiRequest::Response(HttpApiRequestRp { sending_body: None, .. }) => { + // We have already finished sending this body. + self.requests.insert(request_id, v); + return Err(HttpError::Invalid) + } + } + } + } + + /// Mimicks the corresponding method in the offchain API. + pub fn response_wait( + &mut self, + ids: &[HttpRequestId], + deadline: Option + ) -> Vec { + // First of all, dispatch all the non-dispatched requests and drop all senders so that the + // user can't write anymore data. + for id in ids { + match self.requests.get_mut(id) { + Some(HttpApiRequest::NotDispatched(_, _)) => {} + Some(HttpApiRequest::Dispatched(sending_body)) | + Some(HttpApiRequest::Response(HttpApiRequestRp { sending_body, .. })) => { + let _ = sending_body.take(); + continue + } + _ => continue + }; + + let (request, _sender) = match self.requests.remove(id) { + Some(HttpApiRequest::NotDispatched(rq, s)) => (rq, s), + _ => unreachable!("we checked for NotDispatched above; qed") + }; + + let _ = self.to_worker.unbounded_send(ApiToWorker::Dispatch { + id: *id, + request + }); + + // We also destroy the sender in order to forbid writing more data. + self.requests.insert(*id, HttpApiRequest::Dispatched(None)); + } + + let mut deadline = timestamp::deadline_to_future(deadline); + + loop { + // Within that loop, first try to see if we have all the elements for a response. + // This includes the situation where the deadline is reached. + { + let mut output = Vec::with_capacity(ids.len()); + let mut must_wait_more = false; + for id in ids { + output.push(match self.requests.get_mut(id) { + None => HttpRequestStatus::Invalid, + Some(HttpApiRequest::NotDispatched(_, _)) => + unreachable!("we replaced all the NotDispatched with Dispatched earlier; qed"), + Some(HttpApiRequest::Dispatched(_)) => { + must_wait_more = true; + HttpRequestStatus::DeadlineReached + }, + Some(HttpApiRequest::Fail(_)) => HttpRequestStatus::IoError, + Some(HttpApiRequest::Response(HttpApiRequestRp { status_code, .. })) => + HttpRequestStatus::Finished(status_code.as_u16()), + }); + } + debug_assert_eq!(output.len(), ids.len()); + + // Are we ready to call `return`? + let is_done = if let future::MaybeDone::Done(_) = deadline { + true + } else if !must_wait_more { + true + } else { + false + }; + + if is_done { + // Requests in "fail" mode are purged before returning. + debug_assert_eq!(output.len(), ids.len()); + for n in (0..ids.len()).rev() { + if let HttpRequestStatus::IoError = output[n] { + self.requests.remove(&ids[n]); + } + } + return output + } + } + + // Grab next message from the worker. We call `continue` if deadline is reached so that + // we loop back and `return`. + let next_message = { + let mut next_msg = future::maybe_done(self.from_worker.next()); + futures::executor::block_on(future::select(&mut next_msg, &mut deadline)); + if let future::MaybeDone::Done(msg) = next_msg { + msg + } else { + debug_assert!(if let future::MaybeDone::Done(_) = deadline { true } else { false }); + continue + } + }; + + // Update internal state based on received message. + match next_message { + Some(WorkerToApi::Response { id, status_code, headers, body }) => + match self.requests.remove(&id) { + Some(HttpApiRequest::Dispatched(sending_body)) => { + self.requests.insert(id, HttpApiRequest::Response(HttpApiRequestRp { + sending_body, + status_code, + headers, + body: body.fuse(), + current_read_chunk: None, + })); + } + None => {} // can happen if we detected an IO error when sending the body + _ => error!("State mismatch between the API and worker"), + } + + Some(WorkerToApi::Fail { id, error }) => + match self.requests.remove(&id) { + Some(HttpApiRequest::Dispatched(_)) => { + self.requests.insert(id, HttpApiRequest::Fail(error)); + } + None => {} // can happen if we detected an IO error when sending the body + _ => error!("State mismatch between the API and worker"), + } + + None => { + error!("Worker has crashed"); + return ids.iter().map(|_| HttpRequestStatus::IoError).collect() + } + } + + } + } + + /// Mimicks the corresponding method in the offchain API. + pub fn response_headers( + &mut self, + request_id: HttpRequestId + ) -> Vec<(Vec, Vec)> { + // Do an implicit non-blocking wait on the request. + let _ = self.response_wait(&[request_id], Some(timestamp::now())); + + let headers = match self.requests.get(&request_id) { + Some(HttpApiRequest::Response(HttpApiRequestRp { headers, .. })) => headers, + _ => return Vec::new() + }; + + headers + .iter() + .map(|(name, value)| (name.as_str().as_bytes().to_owned(), value.as_bytes().to_owned())) + .collect() + } + + /// Mimicks the corresponding method in the offchain API. + pub fn response_read_body( + &mut self, + request_id: HttpRequestId, + buffer: &mut [u8], + deadline: Option + ) -> Result { + // Do an implicit wait on the request. + let _ = self.response_wait(&[request_id], deadline); + + // Remove the request from the list and handle situations where the request is invalid or + // in the wrong state. + let mut response = match self.requests.remove(&request_id) { + Some(HttpApiRequest::Response(r)) => r, + // Because we called `response_wait` above, we know that the deadline has been reached + // and we still haven't received a response. + Some(rq @ HttpApiRequest::Dispatched(_)) => { + self.requests.insert(request_id, rq); + return Err(HttpError::DeadlineReached) + }, + // The request has failed. + Some(HttpApiRequest::Fail { .. }) => + return Err(HttpError::IoError), + // Request hasn't been dispatched yet; reading the body is invalid. + Some(rq @ HttpApiRequest::NotDispatched(_, _)) => { + self.requests.insert(request_id, rq); + return Err(HttpError::Invalid) + } + None => return Err(HttpError::Invalid) + }; + + // Convert the deadline into a `Future` that resolves when the deadline is reached. + let mut deadline = timestamp::deadline_to_future(deadline); + + loop { + // First read from `current_read_chunk`. + if let Some(mut current_read_chunk) = response.current_read_chunk.take() { + match current_read_chunk.read(buffer) { + Ok(0) => {} + Ok(n) => { + self.requests.insert(request_id, HttpApiRequest::Response(HttpApiRequestRp { + current_read_chunk: Some(current_read_chunk), + .. response + })); + return Ok(n) + }, + Err(err) => { + // This code should never be reached unless there's a logic error somewhere. + error!("Failed to read from current read chunk: {:?}", err); + return Err(HttpError::IoError) + } + } + } + + // If we reach here, that means the `current_read_chunk` is empty and needs to be + // filled with a new chunk from `body`. We block on either the next body or the + // deadline. + let mut next_body = future::maybe_done(response.body.next()); + futures::executor::block_on(future::select(&mut next_body, &mut deadline)); + + if let future::MaybeDone::Done(next_body) = next_body { + match next_body { + Some(Ok(chunk)) => response.current_read_chunk = Some(chunk.reader()), + Some(Err(_)) => return Err(HttpError::IoError), + None => return Ok(0), // eof + } + } + + if let future::MaybeDone::Done(_) = deadline { + self.requests.insert(request_id, HttpApiRequest::Response(response)); + return Err(HttpError::DeadlineReached) + } + } + } +} + +impl fmt::Debug for HttpApi { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_list() + .entries(self.requests.iter()) + .finish() + } +} + +impl fmt::Debug for HttpApiRequest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + HttpApiRequest::NotDispatched(_, _) => + f.debug_tuple("HttpApiRequest::NotDispatched").finish(), + HttpApiRequest::Dispatched(_) => + f.debug_tuple("HttpApiRequest::Dispatched").finish(), + HttpApiRequest::Response(HttpApiRequestRp { status_code, headers, .. }) => + f.debug_tuple("HttpApiRequest::Response").field(status_code).field(headers).finish(), + HttpApiRequest::Fail(err) => + f.debug_tuple("HttpApiRequest::Fail").field(err).finish(), + } + } +} + +/// Message send from the API to the worker. +enum ApiToWorker { + /// Dispatches a new HTTP request. + Dispatch { + /// ID to send back when the response comes back. + id: HttpRequestId, + /// Request to start executing. + request: hyper::Request, + } +} + +/// Message send from the API to the worker. +enum WorkerToApi { + /// A request has succeeded. + Response { + /// The ID that was passed to the worker. + id: HttpRequestId, + /// Status code of the response. + status_code: hyper::StatusCode, + /// Headers of the response. + headers: hyper::HeaderMap, + /// Body of the response, as a channel of `Chunk` objects. + /// We send the body back through a channel instead of returning the hyper `Body` object + /// because we don't want the `HttpApi` to have to drive the reading. + /// Instead, reading an item from the channel will notify the worker task, which will push + /// the next item. + /// Can also be used to send an error, in case an error happend on the HTTP socket. After + /// an error is sent, the channel will close. + body: mpsc::Receiver>, + }, + /// A request has failed because of an error. The request is then no longer valid. + Fail { + /// The ID that was passed to the worker. + id: HttpRequestId, + /// Error that happened. + error: hyper::Error, + }, +} + +/// Wraps around a `hyper::Client` with either TLS enabled or disabled. +enum HyperClient { + /// Everything is ok and HTTPS is available. + Https(hyper::Client, hyper::Body>), + /// We failed to initialize HTTPS and therefore only allow HTTP. + Http(hyper::Client), +} + +impl HyperClient { + /// Creates new hyper client. + /// + /// By default we will try to initialize the `HttpsConnector`, + /// If that's not possible we'll fall back to `HttpConnector`. + pub fn new() -> Self { + match hyper_tls::HttpsConnector::new(1) { + Ok(tls) => HyperClient::Https(hyper::Client::builder().build(tls)), + Err(e) => { + warn!("Unable to initialize TLS client. Falling back to HTTP-only: {:?}", e); + HyperClient::Http(hyper::Client::new()) + }, + } + } +} + +/// Must be continuously polled for the [`HttpApi`] to properly work. +pub struct HttpWorker { + /// Used to sends messages to the `HttpApi`. + to_api: mpsc::UnboundedSender, + /// Used to receive messages from the `HttpApi`. + from_api: mpsc::UnboundedReceiver, + /// The engine that runs HTTP requests. + http_client: HyperClient, + /// HTTP requests that are being worked on by the engine. + requests: Vec<(HttpRequestId, HttpWorkerRequest)>, +} + +/// HTTP request being processed by the worker. +enum HttpWorkerRequest { + /// Request has been dispatched and is waiting for a response from the Internet. + Dispatched(Compat01As03), + /// Progressively reading the body of the response and sending it to the channel. + ReadBody { + /// Body to read `Chunk`s from. Only used if the channel is ready to accept data. + body: Compat01As03, + /// Channel to the [`HttpApi`] where we send the chunks to. + tx: mpsc::Sender>, + }, +} + +impl Future for HttpWorker { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + // Reminder: this is continuously run in the background. + + // We use a `me` variable because the compiler isn't smart enough to allow borrowing + // multiple fields at once through a `Deref`. + let me = &mut *self; + + // We remove each element from `requests` one by one and add them back only if necessary. + for n in (0..me.requests.len()).rev() { + let (id, request) = me.requests.swap_remove(n); + match request { + HttpWorkerRequest::Dispatched(mut future) => { + // Check for an HTTP response from the Internet. + let mut response = match Future::poll(Pin::new(&mut future), cx) { + Poll::Pending => { + me.requests.push((id, HttpWorkerRequest::Dispatched(future))); + continue + }, + Poll::Ready(Ok(response)) => response, + Poll::Ready(Err(err)) => { + let _ = me.to_api.unbounded_send(WorkerToApi::Fail { + id, + error: err, + }); + continue; // don't insert the request back + } + }; + + // We received a response! Decompose it into its parts. + let status_code = response.status(); + let headers = mem::replace(response.headers_mut(), hyper::HeaderMap::new()); + let body = Compat01As03::new(response.into_body()); + + let (body_tx, body_rx) = mpsc::channel(3); + let _ = me.to_api.unbounded_send(WorkerToApi::Response { + id, + status_code, + headers, + body: body_rx, + }); + + me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx: body_tx })); + cx.waker().wake_by_ref(); // reschedule in order to poll the new future + continue + } + + HttpWorkerRequest::ReadBody { mut body, mut tx } => { + // Before reading from the HTTP response, check that `tx` is ready to accept + // a new chunk. + match tx.poll_ready(cx) { + Poll::Ready(Ok(())) => {} + Poll::Ready(Err(_)) => continue, // don't insert the request back + Poll::Pending => { + me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); + continue + } + } + + // `tx` is ready. Read a chunk from the socket and send it to the channel. + match Stream::poll_next(Pin::new(&mut body), cx) { + Poll::Ready(Some(Ok(chunk))) => { + let _ = tx.start_send(Ok(chunk)); + me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); + cx.waker().wake_by_ref(); // reschedule in order to continue reading + } + Poll::Ready(Some(Err(err))) => { + let _ = tx.start_send(Err(err)); + // don't insert the request back + }, + Poll::Ready(None) => {} // EOF; don't insert the request back + Poll::Pending => { + me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); + }, + } + } + } + } + + // Check for messages coming from the [`HttpApi`]. + match Stream::poll_next(Pin::new(&mut me.from_api), cx) { + Poll::Pending => {}, + Poll::Ready(None) => return Poll::Ready(()), // stops the worker + Poll::Ready(Some(ApiToWorker::Dispatch { id, request })) => { + let future = Compat01As03::new(match me.http_client { + HyperClient::Http(ref mut c) => c.request(request), + HyperClient::Https(ref mut c) => c.request(request), + }); + debug_assert!(me.requests.iter().all(|(i, _)| *i != id)); + me.requests.push((id, HttpWorkerRequest::Dispatched(future))); + cx.waker().wake_by_ref(); // reschedule the task to poll the request + } + } + + Poll::Pending + } +} + +impl fmt::Debug for HttpWorker { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_list() + .entries(self.requests.iter()) + .finish() + } +} + +impl fmt::Debug for HttpWorkerRequest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + HttpWorkerRequest::Dispatched(_) => + f.debug_tuple("HttpWorkerRequest::Dispatched").finish(), + HttpWorkerRequest::ReadBody { .. } => + f.debug_tuple("HttpWorkerRequest::Response").finish(), + } + } +} + +#[cfg(test)] +mod tests { + use crate::api::timestamp; + use super::http; + use futures::prelude::*; + use futures01::Future as _; + use primitives::offchain::{HttpError, HttpRequestId, HttpRequestStatus, Duration}; + + // Returns an `HttpApi` whose worker is ran in the background, and a `SocketAddr` to an HTTP + // server that runs in the background as well. + macro_rules! build_api_server { + () => {{ + let (api, worker) = http(); + // Note: we have to use tokio because hyper still uses old futures. + std::thread::spawn(move || { + tokio::run(futures::compat::Compat::new(worker.map(|()| Ok::<(), ()>(())))) + }); + let (addr_tx, addr_rx) = std::sync::mpsc::channel(); + std::thread::spawn(move || { + let server = hyper::Server::bind(&"127.0.0.1:0".parse().unwrap()) + .serve(|| { + hyper::service::service_fn_ok(move |_: hyper::Request| { + hyper::Response::new(hyper::Body::from("Hello World!")) + }) + }); + let _ = addr_tx.send(server.local_addr()); + hyper::rt::run(server.map_err(|e| panic!("{:?}", e))); + }); + (api, addr_rx.recv().unwrap()) + }}; + } + + #[test] + fn basic_localhost() { + let deadline = timestamp::now().add(Duration::from_millis(10_000)); + + // Performs an HTTP query to a background HTTP server. + + let (mut api, addr) = build_api_server!(); + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.request_write_body(id, &[], Some(deadline)).unwrap(); + + match api.response_wait(&[id], Some(deadline))[0] { + HttpRequestStatus::Finished(200) => {}, + v => panic!("Connecting to localhost failed: {:?}", v) + } + + let headers = api.response_headers(id); + assert!(headers.iter().any(|(h, _)| h.eq_ignore_ascii_case(b"Date"))); + + let mut buf = vec![0; 2048]; + let n = api.response_read_body(id, &mut buf, Some(deadline)).unwrap(); + assert_eq!(&buf[..n], b"Hello World!"); + } + + #[test] + fn request_start_invalid_call() { + let (mut api, addr) = build_api_server!(); + + match api.request_start("\0", &format!("http://{}", addr)) { + Err(()) => {} + Ok(_) => panic!() + }; + + match api.request_start("GET", "http://\0localhost") { + Err(()) => {} + Ok(_) => panic!() + }; + } + + #[test] + fn request_add_header_invalid_call() { + let (mut api, addr) = build_api_server!(); + + match api.request_add_header(HttpRequestId(0xdead), "Foo", "bar") { + Err(()) => {} + Ok(_) => panic!() + }; + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + match api.request_add_header(id, "\0", "bar") { + Err(()) => {} + Ok(_) => panic!() + }; + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + match api.request_add_header(id, "Foo", "\0") { + Err(()) => {} + Ok(_) => panic!() + }; + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.request_add_header(id, "Foo", "Bar").unwrap(); + api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); + match api.request_add_header(id, "Foo2", "Bar") { + Err(()) => {} + Ok(_) => panic!() + }; + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.response_headers(id); + match api.request_add_header(id, "Foo2", "Bar") { + Err(()) => {} + Ok(_) => panic!() + }; + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.response_read_body(id, &mut [], None).unwrap(); + match api.request_add_header(id, "Foo2", "Bar") { + Err(()) => {} + Ok(_) => panic!() + }; + } + + #[test] + fn request_write_body_invalid_call() { + let (mut api, addr) = build_api_server!(); + + match api.request_write_body(HttpRequestId(0xdead), &[1, 2, 3], None) { + Err(HttpError::Invalid) => {} + _ => panic!() + }; + + match api.request_write_body(HttpRequestId(0xdead), &[], None) { + Err(HttpError::Invalid) => {} + _ => panic!() + }; + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); + api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); + api.request_write_body(id, &[], None).unwrap(); + match api.request_write_body(id, &[], None) { + Err(HttpError::Invalid) => {} + _ => panic!() + }; + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); + api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); + api.request_write_body(id, &[], None).unwrap(); + match api.request_write_body(id, &[1, 2, 3, 4], None) { + Err(HttpError::Invalid) => {} + _ => panic!() + }; + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); + api.response_wait(&[id], None); + match api.request_write_body(id, &[], None) { + Err(HttpError::Invalid) => {} + _ => panic!() + }; + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); + api.response_wait(&[id], None); + match api.request_write_body(id, &[1, 2, 3, 4], None) { + Err(HttpError::Invalid) => {} + _ => panic!() + }; + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.response_headers(id); + match api.request_write_body(id, &[1, 2, 3, 4], None) { + Err(HttpError::Invalid) => {} + _ => panic!() + }; + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.response_headers(id); + match api.request_write_body(id, &[], None) { + Err(HttpError::Invalid) => {} + _ => panic!() + }; + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.response_read_body(id, &mut [], None).unwrap(); + match api.request_write_body(id, &[1, 2, 3, 4], None) { + Err(HttpError::Invalid) => {} + _ => panic!() + }; + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.response_read_body(id, &mut [], None).unwrap(); + match api.request_write_body(id, &[], None) { + Err(HttpError::Invalid) => {} + _ => panic!() + }; + } + + #[test] + fn response_headers_invalid_call() { + let (mut api, addr) = build_api_server!(); + assert!(api.response_headers(HttpRequestId(0xdead)).is_empty()); + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + assert!(api.response_headers(id).is_empty()); + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.request_write_body(id, &[], None).unwrap(); + while api.response_headers(id).is_empty() { + std::thread::sleep(std::time::Duration::from_millis(100)); + } + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.response_wait(&[id], None); + assert!(!api.response_headers(id).is_empty()); + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + let mut buf = [0; 128]; + while api.response_read_body(id, &mut buf, None).unwrap() != 0 {} + assert!(api.response_headers(id).is_empty()); + } + + #[test] + fn response_header_invalid_call() { + let (mut api, addr) = build_api_server!(); + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + assert!(api.response_headers(id).is_empty()); + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.request_add_header(id, "Foo", "Bar").unwrap(); + assert!(api.response_headers(id).is_empty()); + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + api.request_add_header(id, "Foo", "Bar").unwrap(); + api.request_write_body(id, &[], None).unwrap(); + // Note: this test actually sends out the request, and is supposed to test a situation + // where we haven't received any response yet. This test can theoretically fail if the + // HTTP response comes back faster than the kernel schedules our thread, but that is highly + // unlikely. + assert!(api.response_headers(id).is_empty()); + } + + #[test] + fn response_read_body_invalid_call() { + let (mut api, addr) = build_api_server!(); + let mut buf = [0; 512]; + + match api.response_read_body(HttpRequestId(0xdead), &mut buf, None) { + Err(HttpError::Invalid) => {} + _ => panic!() + } + + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + while api.response_read_body(id, &mut buf, None).unwrap() != 0 {} + match api.response_read_body(id, &mut buf, None) { + Err(HttpError::Invalid) => {} + _ => panic!() + } + } + + #[test] + fn fuzzing() { + // Uses the API in random ways to try to trigger panicks. + // Doesn't test some paths, such as waiting for multiple requests. Also doesn't test what + // happens if the server force-closes our socket. + + let (mut api, addr) = build_api_server!(); + + for _ in 0..50 { + let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); + + for _ in 0..250 { + match rand::random::() % 6 { + 0 => { let _ = api.request_add_header(id, "Foo", "Bar"); } + 1 => { let _ = api.request_write_body(id, &[1, 2, 3, 4], None); } + 2 => { let _ = api.request_write_body(id, &[], None); } + 3 => { let _ = api.response_wait(&[id], None); } + 4 => { let _ = api.response_headers(id); } + 5 => { + let mut buf = [0; 512]; + let _ = api.response_read_body(id, &mut buf, None); + } + 6 ..= 255 => unreachable!() + } + } + } + } +} diff --git a/core/offchain/src/api/timestamp.rs b/core/offchain/src/api/timestamp.rs new file mode 100644 index 0000000000000..445c7f3878474 --- /dev/null +++ b/core/offchain/src/api/timestamp.rs @@ -0,0 +1,62 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Helper methods dedicated to timestamps. + +use primitives::offchain::Timestamp; +use std::convert::TryInto; +use std::time::{SystemTime, Duration}; + +/// Returns the current time as a `Timestamp`. +pub fn now() -> Timestamp { + let now = SystemTime::now(); + let epoch_duration = now.duration_since(SystemTime::UNIX_EPOCH); + match epoch_duration { + Err(_) => { + // Current time is earlier than UNIX_EPOCH. + Timestamp::from_unix_millis(0) + }, + Ok(d) => { + let duration = d.as_millis(); + // Assuming overflow won't happen for a few hundred years. + Timestamp::from_unix_millis(duration.try_into() + .expect("epoch milliseconds won't overflow u64 for hundreds of years; qed")) + } + } +} + +/// Returns how a `Timestamp` compares to "now". +/// +/// In other words, returns `timestamp - now()`. +pub fn timestamp_from_now(timestamp: Timestamp) -> Duration { + Duration::from_millis(timestamp.diff(&now()).millis()) +} + +/// Converts the deadline into a `Future` that resolves when the deadline is reached. +/// +/// If `None`, returns a never-ending `Future`. +pub fn deadline_to_future( + deadline: Option, +) -> futures::future::MaybeDone { + use futures::future; + + future::maybe_done(match deadline { + Some(deadline) => future::Either::Left( + futures_timer::Delay::new(timestamp_from_now(deadline)) + ), + None => future::Either::Right(future::pending()) + }) +} diff --git a/core/offchain/src/lib.rs b/core/offchain/src/lib.rs index b38b202c62e08..9b785ec8bada1 100644 --- a/core/offchain/src/lib.rs +++ b/core/offchain/src/lib.rs @@ -43,7 +43,7 @@ use client::runtime_api::ApiExt; use futures::future::Future; use log::{debug, warn}; use network::NetworkStateInfo; -use primitives::ExecutionContext; +use primitives::{offchain, ExecutionContext}; use sr_primitives::{generic::BlockId, traits::{self, ProvideRuntimeApi}}; use transaction_pool::txpool::{Pool, ChainApi}; @@ -122,7 +122,7 @@ impl OffchainWorkers< debug!("Running offchain workers at {:?}", at); let run = runtime.offchain_worker_with_context( &at, - ExecutionContext::OffchainWorker(api), + ExecutionContext::OffchainCall(Some((api, offchain::Capabilities::all()))), number, ); if let Err(e) = run { diff --git a/core/offchain/src/testing.rs b/core/offchain/src/testing.rs index cdf2878c13e3f..8724ca7546604 100644 --- a/core/offchain/src/testing.rs +++ b/core/offchain/src/testing.rs @@ -248,7 +248,7 @@ impl offchain::Externalities for TestOffchainExt { ids.iter().map(|id| match state.requests.get(id) { Some(req) if req.response.is_empty() => RequestStatus::DeadlineReached, - None => RequestStatus::Unknown, + None => RequestStatus::Invalid, _ => RequestStatus::Finished(200), }).collect() } diff --git a/core/primitives/Cargo.toml b/core/primitives/Cargo.toml index b9bb141ad7616..f05907e3f205e 100644 --- a/core/primitives/Cargo.toml +++ b/core/primitives/Cargo.toml @@ -13,6 +13,7 @@ twox-hash = { version = "1.2.0", optional = true } byteorder = { version = "1.3.1", default-features = false } primitive-types = { version = "0.5.0", default-features = false, features = ["codec"] } impl-serde = { version = "0.1", optional = true } +log = { version = "0.4", optional = true } wasmi = { version = "0.5.0", optional = true } hash-db = { version = "0.15.2", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } @@ -48,6 +49,7 @@ bench = false [features] default = ["std"] std = [ + "log", "wasmi", "lazy_static", "parking_lot", diff --git a/core/primitives/src/lib.rs b/core/primitives/src/lib.rs index 21e7c878082ad..5c918e4964e37 100644 --- a/core/primitives/src/lib.rs +++ b/core/primitives/src/lib.rs @@ -88,19 +88,23 @@ pub enum ExecutionContext { Syncing, /// Context used for block construction. BlockConstruction, - /// Offchain worker context. - OffchainWorker(Box), - /// Context used for other calls. - Other, + /// Context used for offchain calls. + /// + /// This allows passing offchain extension and customizing available capabilities. + OffchainCall(Option<(Box, offchain::Capabilities)>), } impl ExecutionContext { - /// Returns if the keystore should be enabled for the current context. - pub fn enable_keystore(&self) -> bool { + /// Returns the capabilities of particular context. + pub fn capabilities(&self) -> offchain::Capabilities { use ExecutionContext::*; + match self { - Importing | Syncing | BlockConstruction => false, - OffchainWorker(_) | Other => true, + Importing | Syncing | BlockConstruction => + offchain::Capabilities::none(), + // Enable keystore by default for offchain calls. CC @bkchr + OffchainCall(None) => [offchain::Capability::Keystore][..].into(), + OffchainCall(Some((_, capabilities))) => *capabilities, } } } diff --git a/core/primitives/src/offchain.rs b/core/primitives/src/offchain.rs index 52dbf5fbee356..6f024c5c02414 100644 --- a/core/primitives/src/offchain.rs +++ b/core/primitives/src/offchain.rs @@ -61,7 +61,7 @@ impl From for u32 { /// Opaque type for offchain http requests. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -#[cfg_attr(feature = "std", derive(Debug))] +#[cfg_attr(feature = "std", derive(Debug, Hash))] pub struct HttpRequestId(pub u16); impl From for u32 { @@ -79,6 +79,8 @@ pub enum HttpError { DeadlineReached = 1, /// There was an IO Error while processing the request. IoError = 2, + /// The ID of the request is invalid in this context. + Invalid = 3, } impl TryFrom for HttpError { @@ -88,6 +90,7 @@ impl TryFrom for HttpError { match error { e if e == HttpError::DeadlineReached as u8 as u32 => Ok(HttpError::DeadlineReached), e if e == HttpError::IoError as u8 as u32 => Ok(HttpError::IoError), + e if e == HttpError::Invalid as u8 as u32 => Ok(HttpError::Invalid), _ => Err(()) } } @@ -105,18 +108,17 @@ impl From for u32 { pub enum HttpRequestStatus { /// Deadline was reached while we waited for this request to finish. /// - /// Note the deadline is controlled by the calling part, it not necessarily means - /// that the request has timed out. + /// Note the deadline is controlled by the calling part, it not necessarily + /// means that the request has timed out. DeadlineReached, - /// Request timed out. - /// - /// This means that the request couldn't be completed by the host environment - /// within a reasonable time (according to the host), has now been terminated - /// and is considered finished. - /// To retry the request you need to construct it again. - Timeout, - /// Request status of this ID is not known. - Unknown, + /// An error has occured during the request, for example a timeout or the + /// remote has closed our socket. + /// + /// The request is now considered destroyed. To retry the request you need + /// to construct it again. + IoError, + /// The passed ID is invalid in this context. + Invalid, /// The request has finished with given status code. Finished(u16), } @@ -124,9 +126,9 @@ pub enum HttpRequestStatus { impl From for u32 { fn from(status: HttpRequestStatus) -> Self { match status { - HttpRequestStatus::Unknown => 0, + HttpRequestStatus::Invalid => 0, HttpRequestStatus::DeadlineReached => 10, - HttpRequestStatus::Timeout => 20, + HttpRequestStatus::IoError => 20, HttpRequestStatus::Finished(code) => u32::from(code), } } @@ -137,9 +139,9 @@ impl TryFrom for HttpRequestStatus { fn try_from(status: u32) -> Result { match status { - 0 => Ok(HttpRequestStatus::Unknown), + 0 => Ok(HttpRequestStatus::Invalid), 10 => Ok(HttpRequestStatus::DeadlineReached), - 20 => Ok(HttpRequestStatus::Timeout), + 20 => Ok(HttpRequestStatus::IoError), 100..=999 => u16::try_from(status).map(HttpRequestStatus::Finished).map_err(|_| ()), _ => Err(()), } @@ -230,6 +232,70 @@ impl Timestamp { } } +/// Execution context extra capabilities. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[repr(u8)] +pub enum Capability { + /// Access to transaction pool. + TransactionPool = 1, + /// External http calls. + Http = 2, + /// Keystore access. + Keystore = 4, + /// Randomness source. + Randomness = 8, + /// Access to opaque network state. + NetworkState = 16, + /// Access to offchain worker DB (read only). + OffchainWorkerDbRead = 32, + /// Access to offchain worker DB (writes). + OffchainWorkerDbWrite = 64, +} + +/// A set of capabilities +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub struct Capabilities(u8); + +impl Capabilities { + /// Return an object representing an empty set of capabilities. + pub fn none() -> Self { + Self(0) + } + + /// Return an object representing all capabilities enabled. + pub fn all() -> Self { + Self(u8::max_value()) + } + + /// Return capabilities for rich offchain calls. + /// + /// Those calls should be allowed to sign and submit transactions + /// and access offchain workers database (but read only!). + pub fn rich_offchain_call() -> Self { + [ + Capability::TransactionPool, + Capability::Keystore, + Capability::OffchainWorkerDbRead, + ][..].into() + } + + /// Check if particular capability is enabled. + pub fn has(&self, capability: Capability) -> bool { + self.0 & capability as u8 != 0 + } + + /// Check if this capability object represents all capabilities. + pub fn has_all(&self) -> bool { + self == &Capabilities::all() + } +} + +impl<'a> From<&'a [Capability]> for Capabilities { + fn from(list: &'a [Capability]) -> Self { + Capabilities(list.iter().fold(0_u8, |a, b| a | *b as u8)) + } +} + /// An extended externalities for offchain workers. pub trait Externalities { /// Returns if the local node is a potential validator. @@ -291,6 +357,11 @@ pub trait Externalities { /// /// Meta is a future-reserved field containing additional, parity-scale-codec encoded parameters. /// Returns the id of newly started request. + /// + /// Returns an error if: + /// - No new request identifier could be allocated. + /// - The method or URI contain invalid characters. + /// fn http_request_start( &mut self, method: &str, @@ -299,6 +370,18 @@ pub trait Externalities { ) -> Result; /// Append header to the request. + /// + /// Calling this function multiple times with the same header name continues appending new + /// headers. In other words, headers are never replaced. + /// + /// Returns an error if: + /// - The request identifier is invalid. + /// - You have called `http_request_write_body` on that request. + /// - The name or value contain invalid characters. + /// + /// An error doesn't poison the request, and you can continue as if the call had never been + /// made. + /// fn http_request_add_header( &mut self, request_id: HttpRequestId, @@ -308,10 +391,19 @@ pub trait Externalities { /// Write a chunk of request body. /// - /// Writing an empty chunks finalises the request. + /// Calling this function with a non-empty slice may or may not start the + /// HTTP request. Calling this function with an empty chunks finalizes the + /// request and always starts it. It is no longer valid to write more data + /// afterwards. /// Passing `None` as deadline blocks forever. /// - /// Returns an error in case deadline is reached or the chunk couldn't be written. + /// Returns an error if: + /// - The request identifier is invalid. + /// - `http_response_wait` has already been called on this request. + /// - The deadline is reached. + /// - An I/O error has happened, for example the remote has closed our + /// request. The request is then considered invalid. + /// fn http_request_write_body( &mut self, request_id: HttpRequestId, @@ -325,6 +417,9 @@ pub trait Externalities { /// Note that if deadline is not provided the method will block indefinitely, /// otherwise unready responses will produce `DeadlineReached` status. /// + /// If a response returns an `IoError`, it is then considered destroyed. + /// Its id is then invalid. + /// /// Passing `None` as deadline blocks forever. fn http_response_wait( &mut self, @@ -335,6 +430,12 @@ pub trait Externalities { /// Read all response headers. /// /// Returns a vector of pairs `(HeaderKey, HeaderValue)`. + /// + /// Dispatches the request if it hasn't been done yet. It is no longer + /// valid to modify the headers or write data to the request. + /// + /// Returns an empty list if the identifier is unknown/invalid, hasn't + /// received a response, or has finished. fn http_response_headers( &mut self, request_id: HttpRequestId @@ -342,9 +443,23 @@ pub trait Externalities { /// Read a chunk of body response to given buffer. /// + /// Dispatches the request if it hasn't been done yet. It is no longer + /// valid to modify the headers or write data to the request. + /// /// Returns the number of bytes written or an error in case a deadline /// is reached or server closed the connection. /// Passing `None` as a deadline blocks forever. + /// + /// If `Ok(0)` or `Err(IoError)` is returned, the request is considered + /// destroyed. Doing another read or getting the response's headers, for + /// example, is then invalid. + /// + /// Returns an error if: + /// - The request identifier is invalid. + /// - The deadline is reached. + /// - An I/O error has happened, for example the remote has closed our + /// request. The request is then considered invalid. + /// fn http_response_read_body( &mut self, request_id: HttpRequestId, @@ -430,6 +545,123 @@ impl Externalities for Box { (&mut **self).http_response_read_body(request_id, buffer, deadline) } } +/// An `OffchainExternalities` implementation with limited capabilities. +pub struct LimitedExternalities { + capabilities: Capabilities, + externalities: T, +} + +impl LimitedExternalities { + /// Create new externalities limited to given `capabilities`. + pub fn new(capabilities: Capabilities, externalities: T) -> Self { + Self { + capabilities, + externalities, + } + } + + /// Check if given capability is allowed. + /// + /// Panics in case it is not. + fn check(&self, capability: Capability, name: &'static str) { + if !self.capabilities.has(capability) { + panic!("Accessing a forbidden API: {}. No: {:?} capability.", name, capability); + } + } +} + +impl Externalities for LimitedExternalities { + fn is_validator(&self) -> bool { + self.check(Capability::Keystore, "is_validator"); + self.externalities.is_validator() + } + + fn submit_transaction(&mut self, ex: Vec) -> Result<(), ()> { + self.check(Capability::TransactionPool, "submit_transaction"); + self.externalities.submit_transaction(ex) + } + + fn network_state(&self) -> Result { + self.check(Capability::NetworkState, "network_state"); + self.externalities.network_state() + } + + fn timestamp(&mut self) -> Timestamp { + self.check(Capability::Http, "timestamp"); + self.externalities.timestamp() + } + + fn sleep_until(&mut self, deadline: Timestamp) { + self.check(Capability::Http, "sleep_until"); + self.externalities.sleep_until(deadline) + } + + fn random_seed(&mut self) -> [u8; 32] { + self.check(Capability::Randomness, "random_seed"); + self.externalities.random_seed() + } + + fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { + self.check(Capability::OffchainWorkerDbWrite, "local_storage_set"); + self.externalities.local_storage_set(kind, key, value) + } + + fn local_storage_compare_and_set( + &mut self, + kind: StorageKind, + key: &[u8], + old_value: Option<&[u8]>, + new_value: &[u8], + ) -> bool { + self.check(Capability::OffchainWorkerDbWrite, "local_storage_compare_and_set"); + self.externalities.local_storage_compare_and_set(kind, key, old_value, new_value) + } + + fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { + self.check(Capability::OffchainWorkerDbRead, "local_storage_get"); + self.externalities.local_storage_get(kind, key) + } + + fn http_request_start(&mut self, method: &str, uri: &str, meta: &[u8]) -> Result { + self.check(Capability::Http, "http_request_start"); + self.externalities.http_request_start(method, uri, meta) + } + + fn http_request_add_header(&mut self, request_id: HttpRequestId, name: &str, value: &str) -> Result<(), ()> { + self.check(Capability::Http, "http_request_add_header"); + self.externalities.http_request_add_header(request_id, name, value) + } + + fn http_request_write_body( + &mut self, + request_id: HttpRequestId, + chunk: &[u8], + deadline: Option + ) -> Result<(), HttpError> { + self.check(Capability::Http, "http_request_write_body"); + self.externalities.http_request_write_body(request_id, chunk, deadline) + } + + fn http_response_wait(&mut self, ids: &[HttpRequestId], deadline: Option) -> Vec { + self.check(Capability::Http, "http_response_wait"); + self.externalities.http_response_wait(ids, deadline) + } + + fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { + self.check(Capability::Http, "http_response_headers"); + self.externalities.http_response_headers(request_id) + } + + fn http_response_read_body( + &mut self, + request_id: HttpRequestId, + buffer: &mut [u8], + deadline: Option + ) -> Result { + self.check(Capability::Http, "http_response_read_body"); + self.externalities.http_response_read_body(request_id, buffer, deadline) + } +} #[cfg(test)] @@ -443,4 +675,18 @@ mod tests { assert_eq!(t.sub(Duration::from_millis(10)), Timestamp(0)); assert_eq!(t.diff(&Timestamp(3)), Duration(2)); } + + #[test] + fn capabilities() { + let none = Capabilities::none(); + let all = Capabilities::all(); + let some = Capabilities::from(&[Capability::Keystore, Capability::Randomness][..]); + + assert!(!none.has(Capability::Keystore)); + assert!(all.has(Capability::Keystore)); + assert!(some.has(Capability::Keystore)); + assert!(!none.has(Capability::TransactionPool)); + assert!(all.has(Capability::TransactionPool)); + assert!(!some.has(Capability::TransactionPool)); + } } diff --git a/core/rpc-servers/Cargo.toml b/core/rpc-servers/Cargo.toml index 54a4b68eab9bf..d4befd52e9f7a 100644 --- a/core/rpc-servers/Cargo.toml +++ b/core/rpc-servers/Cargo.toml @@ -5,12 +5,12 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -jsonrpc-core = "13.0.0" -pubsub = { package = "jsonrpc-pubsub", version = "13.0.0" } +jsonrpc-core = "13.1.0" +pubsub = { package = "jsonrpc-pubsub", version = "13.1.0" } log = "0.4" serde = "1.0" sr-primitives = { path = "../sr-primitives" } [target.'cfg(not(target_os = "unknown"))'.dependencies] -http = { package = "jsonrpc-http-server", version = "13.0.0" } -ws = { package = "jsonrpc-ws-server", version = "13.0.0" } +http = { package = "jsonrpc-http-server", version = "13.1.0" } +ws = { package = "jsonrpc-ws-server", version = "13.1.0" } diff --git a/core/rpc/Cargo.toml b/core/rpc/Cargo.toml index 0a9cf108c9ede..234761708e7fe 100644 --- a/core/rpc/Cargo.toml +++ b/core/rpc/Cargo.toml @@ -8,17 +8,16 @@ edition = "2018" derive_more = "0.14.0" futures = "0.1" futures03 = { package = "futures-preview", version = "0.3.0-alpha.17", features = ["compat"] } -jsonrpc-core = "13.0.0" -jsonrpc-core-client = "13.0.0" -jsonrpc-pubsub = "13.0.0" -jsonrpc-derive = "13.0.0" +jsonrpc-core = "13.1.0" +jsonrpc-core-client = "13.1.0" +jsonrpc-pubsub = "13.1.0" +jsonrpc-derive = "13.1.0" log = "0.4" parking_lot = "0.9.0" codec = { package = "parity-scale-codec", version = "1.0.0" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" client = { package = "substrate-client", path = "../client" } -substrate-executor = { path = "../executor" } network = { package = "substrate-network", path = "../network" } primitives = { package = "substrate-primitives", path = "../primitives" } session = { package = "substrate-session", path = "../session" } diff --git a/core/rpc/src/author/error.rs b/core/rpc/src/author/error.rs index 2fcc8c780dfdb..1ce707f0bb38f 100644 --- a/core/rpc/src/author/error.rs +++ b/core/rpc/src/author/error.rs @@ -83,6 +83,8 @@ const POOL_TOO_LOW_PRIORITY: i64 = POOL_INVALID_TX + 4; const POOL_CYCLE_DETECTED: i64 = POOL_INVALID_TX + 5; /// The transaction was not included to the pool because of the limits. const POOL_IMMEDIATELY_DROPPED: i64 = POOL_INVALID_TX + 6; +/// The key type crypto is not known. +const UNSUPPORTED_KEY_TYPE: i64 = POOL_INVALID_TX + 7; impl From for rpc::Error { fn from(e: Error) -> Self { @@ -134,6 +136,14 @@ impl From for rpc::Error { message: "Immediately Dropped" .into(), data: Some("The transaction couldn't enter the pool because of the limit".into()), }, + Error::UnsupportedKeyType => rpc::Error { + code: rpc::ErrorCode::ServerError(UNSUPPORTED_KEY_TYPE), + message: "Unknown key type crypto" .into(), + data: Some( + "The crypto for the given key type is unknown, please add the public key to the \ + request to insert the key successfully.".into() + ), + }, e => errors::internal(e), } } diff --git a/core/rpc/src/author/mod.rs b/core/rpc/src/author/mod.rs index 6e2d7aa92abc9..0290a534ea549 100644 --- a/core/rpc/src/author/mod.rs +++ b/core/rpc/src/author/mod.rs @@ -162,7 +162,7 @@ impl AuthorApi, BlockHash

> for Author whe Some(public) => public.0, None => { let maybe_public = match key_type { - key_types::BABE | key_types::IM_ONLINE | key_types::SR25519 => + key_types::BABE | key_types::SR25519 => sr25519::Pair::from_string(&suri, maybe_password) .map(|pair| pair.public().to_raw_vec()), key_types::GRANDPA | key_types::ED25519 => diff --git a/core/rpc/src/chain/mod.rs b/core/rpc/src/chain/mod.rs index 9b8192e660e9b..cb0235e13bb79 100644 --- a/core/rpc/src/chain/mod.rs +++ b/core/rpc/src/chain/mod.rs @@ -67,19 +67,19 @@ pub trait ChainApi { #[pubsub( subscription = "chain_newHead", subscribe, - name = "chain_subscribeNewHead", - alias("subscribe_newHead") + name = "chain_subscribeNewHeads", + alias("subscribe_newHead", "chain_subscribeNewHead") )] - fn subscribe_new_head(&self, metadata: Self::Metadata, subscriber: Subscriber

); + fn subscribe_new_heads(&self, metadata: Self::Metadata, subscriber: Subscriber
); /// Unsubscribe from new head subscription. #[pubsub( subscription = "chain_newHead", unsubscribe, - name = "chain_unsubscribeNewHead", - alias("unsubscribe_newHead") + name = "chain_unsubscribeNewHeads", + alias("unsubscribe_newHead", "chain_unsubscribeNewHead") )] - fn unsubscribe_new_head(&self, metadata: Option, id: SubscriptionId) -> RpcResult; + fn unsubscribe_new_heads(&self, metadata: Option, id: SubscriptionId) -> RpcResult; /// New head subscription #[pubsub( @@ -199,7 +199,7 @@ impl ChainApi, Block::Hash, Block::Header, Sig Ok(self.client.info().chain.finalized_hash) } - fn subscribe_new_head(&self, _metadata: Self::Metadata, subscriber: Subscriber) { + fn subscribe_new_heads(&self, _metadata: Self::Metadata, subscriber: Subscriber) { self.subscribe_headers( subscriber, || self.block_hash(None.into()), @@ -210,7 +210,7 @@ impl ChainApi, Block::Hash, Block::Header, Sig ) } - fn unsubscribe_new_head(&self, _metadata: Option, id: SubscriptionId) -> RpcResult { + fn unsubscribe_new_heads(&self, _metadata: Option, id: SubscriptionId) -> RpcResult { Ok(self.subscriptions.cancel(id)) } diff --git a/core/rpc/src/chain/tests.rs b/core/rpc/src/chain/tests.rs index e6fa4d94e5b0d..36157df71df42 100644 --- a/core/rpc/src/chain/tests.rs +++ b/core/rpc/src/chain/tests.rs @@ -202,7 +202,7 @@ fn should_notify_about_latest_block() { subscriptions: Subscriptions::new(Arc::new(remote)), }; - api.subscribe_new_head(Default::default(), subscriber); + api.subscribe_new_heads(Default::default(), subscriber); // assert id assigned assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); diff --git a/core/rpc/src/state/tests.rs b/core/rpc/src/state/tests.rs index 6b4ddc9b920bc..07e009d1795cd 100644 --- a/core/rpc/src/state/tests.rs +++ b/core/rpc/src/state/tests.rs @@ -25,20 +25,24 @@ use test_client::{ consensus::BlockOrigin, runtime, }; -use substrate_executor::NativeExecutionDispatch; #[test] fn should_return_storage() { + const KEY: &[u8] = b":mock"; + const VALUE: &[u8] = b"hello world"; + let core = tokio::runtime::Runtime::new().unwrap(); - let client = Arc::new(test_client::new()); + let client = TestClientBuilder::new() + .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) + .build(); let genesis_hash = client.genesis_hash(); - let client = State::new(client, Subscriptions::new(Arc::new(core.executor()))); - let key = StorageKey(b":code".to_vec()); + let client = State::new(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); + let key = StorageKey(KEY.to_vec()); assert_eq!( client.storage(key.clone(), Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())).unwrap().unwrap() as usize, - LocalExecutor::native_equivalent().len(), + VALUE.len(), ); assert_matches!( client.storage_hash(key.clone(), Some(genesis_hash).into()).map(|x| x.is_some()), @@ -46,7 +50,7 @@ fn should_return_storage() { ); assert_eq!( client.storage_size(key.clone(), None).unwrap().unwrap() as usize, - LocalExecutor::native_equivalent().len(), + VALUE.len(), ); } @@ -262,10 +266,12 @@ fn should_return_runtime_version() { [\"0xc6e9a76309f39b09\",1],[\"0xdd718d5cc53262d4\",1],[\"0xcbca25e39f142387\",1],\ [\"0xf78b278be53f454c\",1],[\"0xab3c0572291feb8b\",1]]}"; - assert_eq!( - serde_json::to_string(&api.runtime_version(None.into()).unwrap()).unwrap(), - result, - ); + let runtime_version = api.runtime_version(None.into()).unwrap(); + let serialized = serde_json::to_string(&runtime_version).unwrap(); + assert_eq!(serialized, result); + + let deserialized: RuntimeVersion = serde_json::from_str(result).unwrap(); + assert_eq!(deserialized, runtime_version); } #[test] diff --git a/core/service/src/builder.rs b/core/service/src/builder.rs new file mode 100644 index 0000000000000..3b079e549d8cd --- /dev/null +++ b/core/service/src/builder.rs @@ -0,0 +1,809 @@ +// Copyright 2017-2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::{NewService, NetworkStatus, NetworkState, error::{self, Error}, DEFAULT_PROTOCOL_ID}; +use crate::{SpawnTaskHandle, start_rpc_servers, build_network_future, TransactionPoolAdapter}; +use crate::TaskExecutor; +use crate::config::Configuration; +use client::{BlockchainEvents, Client, runtime_api}; +use codec::{Decode, Encode, IoReader}; +use consensus_common::import_queue::ImportQueue; +use futures::{prelude::*, sync::mpsc}; +use futures03::{FutureExt as _, compat::Compat, StreamExt as _, TryStreamExt as _}; +use keystore::{Store as Keystore, KeyStorePtr}; +use log::{info, warn}; +use network::{FinalityProofProvider, OnDemand, NetworkService, NetworkStateInfo}; +use network::{config::BoxFinalityProofRequestBuilder, specialization::NetworkSpecialization}; +use parking_lot::{Mutex, RwLock}; +use primitives::{Blake2Hasher, H256, Hasher}; +use rpc::{self, system::SystemInfo}; +use sr_primitives::{BuildStorage, generic::BlockId}; +use sr_primitives::traits::{Block as BlockT, ProvideRuntimeApi, NumberFor, One, Zero, Header, SaturatedConversion}; +use substrate_executor::{NativeExecutor, NativeExecutionDispatch}; +use serde::{Serialize, de::DeserializeOwned}; +use std::{io::{Read, Write, Seek}, marker::PhantomData, sync::Arc, sync::atomic::AtomicBool}; +use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; +use tel::{telemetry, SUBSTRATE_INFO}; +use transaction_pool::txpool::{self, ChainApi, Pool as TransactionPool}; + +/// Aggregator for the components required to build a service. +/// +/// # Usage +/// +/// Call [`ServiceBuilder::new_full`] or [`ServiceBuilder::new_light`], then call the various +/// `with_` methods to add the required components that you built yourself: +/// +/// - [`with_select_chain`](ServiceBuilder::with_select_chain) +/// - [`with_import_queue`](ServiceBuilder::with_import_queue) +/// - [`with_network_protocol`](ServiceBuilder::with_network_protocol) +/// - [`with_finality_proof_provider`](ServiceBuilder::with_finality_proof_provider) +/// - [`with_transaction_pool`](ServiceBuilder::with_transaction_pool) +/// +/// After this is done, call [`build`](ServiceBuilder::build) to construct the service. +/// +/// The order in which the `with_*` methods are called doesn't matter, as the correct binding of +/// generics is done when you call `build`. +/// +pub struct ServiceBuilder { + config: Configuration, + client: Arc, + keystore: Arc>, + fetcher: Option, + select_chain: Option, + import_queue: TImpQu, + finality_proof_request_builder: Option, + finality_proof_provider: Option, + network_protocol: TNetP, + transaction_pool: Arc, + rpc_extensions: TRpc, + marker: PhantomData<(TBl, TRtApi)>, +} + +impl ServiceBuilder<(), (), TCfg, TGen, (), (), (), (), (), (), (), (), ()> +where TGen: Serialize + DeserializeOwned + BuildStorage { + /// Start the service builder with a configuration. + pub fn new_full, TRtApi, TExecDisp: NativeExecutionDispatch>( + config: Configuration + ) -> Result, + client::LocalCallExecutor, NativeExecutor>, + TBl, + TRtApi + >, + Arc>, + (), + (), + BoxFinalityProofRequestBuilder, + (), + (), + (), + () + >, Error> { + let keystore = Keystore::open(config.keystore_path.clone(), config.keystore_password.clone())?; + + let db_settings = client_db::DatabaseSettings { + cache_size: None, + state_cache_size: config.state_cache_size, + state_cache_child_ratio: + config.state_cache_child_ratio.map(|v| (v, 100)), + path: config.database_path.clone(), + pruning: config.pruning.clone(), + }; + + let executor = NativeExecutor::::new(config.default_heap_pages); + + let client = Arc::new(client_db::new_client( + db_settings, + executor, + &config.chain_spec, + config.execution_strategies.clone(), + Some(keystore.clone()), + )?); + + Ok(ServiceBuilder { + config, + client, + keystore, + fetcher: None, + select_chain: None, + import_queue: (), + finality_proof_request_builder: None, + finality_proof_provider: None, + network_protocol: (), + transaction_pool: Arc::new(()), + rpc_extensions: Default::default(), + marker: PhantomData, + }) + } + + /// Start the service builder with a configuration. + pub fn new_light, TRtApi, TExecDisp: NativeExecutionDispatch + 'static>( + config: Configuration + ) -> Result, network::OnDemand, Blake2Hasher>, + client::light::call_executor::RemoteOrLocalCallExecutor< + TBl, + client::light::backend::Backend< + client_db::light::LightStorage, + network::OnDemand, + Blake2Hasher + >, + client::light::call_executor::RemoteCallExecutor< + client::light::blockchain::Blockchain< + client_db::light::LightStorage, + network::OnDemand + >, + network::OnDemand, + >, + client::LocalCallExecutor< + client::light::backend::Backend< + client_db::light::LightStorage, + network::OnDemand, + Blake2Hasher + >, + NativeExecutor + > + >, + TBl, + TRtApi + >, + Arc>, + (), + (), + BoxFinalityProofRequestBuilder, + (), + (), + (), + () + >, Error> { + let keystore = Keystore::open(config.keystore_path.clone(), config.keystore_password.clone())?; + + let db_settings = client_db::DatabaseSettings { + cache_size: config.database_cache_size.map(|u| u as usize), + state_cache_size: config.state_cache_size, + state_cache_child_ratio: + config.state_cache_child_ratio.map(|v| (v, 100)), + path: config.database_path.clone(), + pruning: config.pruning.clone(), + }; + + let executor = NativeExecutor::::new(config.default_heap_pages); + + let db_storage = client_db::light::LightStorage::new(db_settings)?; + let light_blockchain = client::light::new_light_blockchain(db_storage); + let fetch_checker = Arc::new(client::light::new_fetch_checker(light_blockchain.clone(), executor.clone())); + let fetcher = Arc::new(network::OnDemand::new(fetch_checker)); + let client_backend = client::light::new_light_backend(light_blockchain, fetcher.clone()); + let client = client::light::new_light(client_backend, fetcher.clone(), &config.chain_spec, executor)?; + + Ok(ServiceBuilder { + config, + client: Arc::new(client), + keystore, + fetcher: Some(fetcher), + select_chain: None, + import_queue: (), + finality_proof_request_builder: None, + finality_proof_provider: None, + network_protocol: (), + transaction_pool: Arc::new(()), + rpc_extensions: Default::default(), + marker: PhantomData, + }) + } +} + +impl + ServiceBuilder { + + /// Returns a reference to the client that was stored in this builder. + pub fn client(&self) -> &Arc { + &self.client + } + + /// Returns a reference to the select-chain that was stored in this builder. + pub fn select_chain(&self) -> Option<&TSc> { + self.select_chain.as_ref() + } + + /// Defines which head-of-chain strategy to use. + pub fn with_opt_select_chain( + mut self, + select_chain_builder: impl FnOnce(&mut Configuration, Arc) -> Result, Error> + ) -> Result, Error> { + let select_chain = select_chain_builder(&mut self.config, self.client.clone())?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + rpc_extensions: self.rpc_extensions, + marker: self.marker, + }) + } + + /// Defines which head-of-chain strategy to use. + pub fn with_select_chain( + self, + builder: impl FnOnce(&mut Configuration, Arc) -> Result + ) -> Result, Error> { + self.with_opt_select_chain(|cfg, cl| builder(cfg, cl).map(Option::Some)) + } + + /// Defines which import queue to use. + pub fn with_import_queue( + mut self, + builder: impl FnOnce(&mut Configuration, Arc, Option, Arc) + -> Result + ) -> Result, Error> + where TSc: Clone { + let import_queue = builder( + &mut self.config, + self.client.clone(), + self.select_chain.clone(), + self.transaction_pool.clone() + )?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + rpc_extensions: self.rpc_extensions, + marker: self.marker, + }) + } + + /// Defines which network specialization protocol to use. + pub fn with_network_protocol( + self, + network_protocol_builder: impl FnOnce(&Configuration) -> Result + ) -> Result, Error> { + let network_protocol = network_protocol_builder(&self.config)?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol, + transaction_pool: self.transaction_pool, + rpc_extensions: self.rpc_extensions, + marker: self.marker, + }) + } + + /// Defines which strategy to use for providing finality proofs. + pub fn with_opt_finality_proof_provider( + self, + builder: impl FnOnce(Arc) -> Result>>, Error> + ) -> Result>, + TNetP, + TExPool, + TRpc + >, Error> { + let finality_proof_provider = builder(self.client.clone())?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + rpc_extensions: self.rpc_extensions, + marker: self.marker, + }) + } + + /// Defines which strategy to use for providing finality proofs. + pub fn with_finality_proof_provider( + self, + build: impl FnOnce(Arc) -> Result>, Error> + ) -> Result>, + TNetP, + TExPool, + TRpc + >, Error> { + self.with_opt_finality_proof_provider(|client| build(client).map(Option::Some)) + } + + /// Defines which import queue to use. + pub fn with_import_queue_and_opt_fprb( + mut self, + builder: impl FnOnce(&mut Configuration, Arc, Option, Arc) + -> Result<(UImpQu, Option), Error> + ) -> Result, Error> + where TSc: Clone { + let (import_queue, fprb) = builder( + &mut self.config, + self.client.clone(), + self.select_chain.clone(), + self.transaction_pool.clone() + )?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue, + finality_proof_request_builder: fprb, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + rpc_extensions: self.rpc_extensions, + marker: self.marker, + }) + } + + /// Defines which import queue to use. + pub fn with_import_queue_and_fprb( + self, + builder: impl FnOnce(&mut Configuration, Arc, Option, Arc) + -> Result<(UImpQu, UFprb), Error> + ) -> Result, Error> + where TSc: Clone { + self.with_import_queue_and_opt_fprb(|cfg, cl, sc, tx| builder(cfg, cl, sc, tx).map(|(q, f)| (q, Some(f)))) + } + + /// Defines which transaction pool to use. + pub fn with_transaction_pool( + self, + transaction_pool_builder: impl FnOnce(transaction_pool::txpool::Options, Arc) -> Result + ) -> Result, Error> { + let transaction_pool = transaction_pool_builder(self.config.transaction_pool.clone(), self.client.clone())?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: Arc::new(transaction_pool), + rpc_extensions: self.rpc_extensions, + marker: self.marker, + }) + } + + /// Defines the RPC extensions to use. + pub fn with_rpc_extensions( + self, + rpc_ext_builder: impl FnOnce(Arc, Arc) -> URpc + ) -> Result, Error> { + let rpc_extensions = rpc_ext_builder(self.client.clone(), self.transaction_pool.clone()); + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + rpc_extensions, + marker: self.marker, + }) + } +} + +/// Implemented on `ServiceBuilder`. Allows importing blocks once you have given all the required +/// components to the builder. +pub trait ServiceBuilderImport { + /// Starts the process of importing blocks. + fn import_blocks( + self, + exit: impl Future + Send + 'static, + input: impl Read + Seek, + ) -> Result + Send>, Error>; +} + +/// Implemented on `ServiceBuilder`. Allows exporting blocks once you have given all the required +/// components to the builder. +pub trait ServiceBuilderExport { + /// Type of block of the builder. + type Block: BlockT; + + /// Performs the blocks export. + fn export_blocks( + &self, + exit: impl Future + Send + 'static, + output: impl Write, + from: NumberFor, + to: Option>, + json: bool + ) -> Result<(), Error>; +} + +/// Implemented on `ServiceBuilder`. Allows reverting the chain once you have given all the +/// required components to the builder. +pub trait ServiceBuilderRevert { + /// Type of block of the builder. + type Block: BlockT; + + /// Performs a revert of `blocks` bocks. + fn revert_chain( + &self, + blocks: NumberFor + ) -> Result<(), Error>; +} + +impl + ServiceBuilderImport for ServiceBuilder, + TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> +where + TBl: BlockT::Out>, + TBackend: 'static + client::backend::Backend + Send, + TExec: 'static + client::CallExecutor + Send + Sync + Clone, + TImpQu: 'static + ImportQueue, + TRtApi: 'static + Send + Sync, +{ + fn import_blocks( + self, + exit: impl Future + Send + 'static, + input: impl Read + Seek, + ) -> Result + Send>, Error> { + let client = self.client; + let mut queue = self.import_queue; + import_blocks!(TBl, client, queue, exit, input) + .map(|f| Box::new(f) as Box<_>) + } +} + +impl + ServiceBuilderExport for ServiceBuilder, + TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> +where + TBl: BlockT::Out>, + TBackend: 'static + client::backend::Backend + Send, + TExec: 'static + client::CallExecutor + Send + Sync + Clone +{ + type Block = TBl; + + fn export_blocks( + &self, + exit: impl Future + Send + 'static, + mut output: impl Write, + from: NumberFor, + to: Option>, + json: bool + ) -> Result<(), Error> { + let client = &self.client; + export_blocks!(client, exit, output, from, to, json) + } +} + +impl + ServiceBuilderRevert for ServiceBuilder, + TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> +where + TBl: BlockT::Out>, + TBackend: 'static + client::backend::Backend + Send, + TExec: 'static + client::CallExecutor + Send + Sync + Clone +{ + type Block = TBl; + + fn revert_chain( + &self, + blocks: NumberFor + ) -> Result<(), Error> { + let client = &self.client; + revert_chain!(client, blocks) + } +} + +impl +ServiceBuilder< + TBl, + TRtApi, + TCfg, + TGen, + Client, + Arc>, + TSc, + TImpQu, + BoxFinalityProofRequestBuilder, + Arc>, + TNetP, + TransactionPool, + TRpc +> where + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: + runtime_api::Metadata + + offchain::OffchainWorkerApi + + runtime_api::TaggedTransactionQueue + + session::SessionKeys, + TBl: BlockT::Out>, + TRtApi: 'static + Send + Sync, + TCfg: Default, + TGen: Serialize + DeserializeOwned + BuildStorage, + TBackend: 'static + client::backend::Backend + Send, + TExec: 'static + client::CallExecutor + Send + Sync + Clone, + TSc: Clone, + TImpQu: 'static + ImportQueue, + TNetP: NetworkSpecialization, + TExPoolApi: 'static + ChainApi::Hash>, + TRpc: rpc::RpcExtension + Clone, +{ + /// Builds the service. + pub fn build(self) -> Result, + TBl, + Client, + TSc, + NetworkStatus, + NetworkService::Hash>, + TransactionPool, + offchain::OffchainWorkers< + Client, + TBackend::OffchainStorage, + TBl + >, + >, Error> { + let mut config = self.config; + session::generate_initial_session_keys( + self.client.clone(), + config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default() + )?; + let ( + client, + fetcher, + keystore, + select_chain, + import_queue, + finality_proof_request_builder, + finality_proof_provider, + network_protocol, + transaction_pool, + rpc_extensions + ) = ( + self.client, + self.fetcher, + self.keystore, + self.select_chain, + self.import_queue, + self.finality_proof_request_builder, + self.finality_proof_provider, + self.network_protocol, + self.transaction_pool, + self.rpc_extensions + ); + + new_impl!( + TBl, + config, + move |_| -> Result<_, Error> { + Ok(( + client, + fetcher, + keystore, + select_chain, + import_queue, + finality_proof_request_builder, + finality_proof_provider, + network_protocol, + transaction_pool, + rpc_extensions + )) + }, + |h, c, tx| maintain_transaction_pool(h, c, tx), + |n, o, p, ns, v| offchain_workers(n, o, p, ns, v), + |c, ssb, si, te, tp, ext, ks| start_rpc(c, ssb, si, te, tp, ext, ks), + ) + } +} + +pub(crate) fn start_rpc( + client: Arc>, + system_send_back: futures03::channel::mpsc::UnboundedSender>, + rpc_system_info: SystemInfo, + task_executor: TaskExecutor, + transaction_pool: Arc>, + rpc_extensions: impl rpc::RpcExtension, + keystore: KeyStorePtr, +) -> rpc_servers::RpcHandler +where + Block: BlockT::Out>, + Backend: client::backend::Backend + 'static, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: + runtime_api::Metadata + session::SessionKeys, + Api: Send + Sync + 'static, + Executor: client::CallExecutor + Send + Sync + Clone + 'static, + PoolApi: txpool::ChainApi + 'static { + use rpc::{chain, state, author, system}; + let subscriptions = rpc::Subscriptions::new(task_executor.clone()); + let chain = chain::Chain::new(client.clone(), subscriptions.clone()); + let state = state::State::new(client.clone(), subscriptions.clone()); + let author = rpc::author::Author::new( + client, + transaction_pool, + subscriptions, + keystore, + ); + let system = system::System::new(rpc_system_info, system_send_back); + + rpc_servers::rpc_handler(( + state::StateApi::to_delegate(state), + chain::ChainApi::to_delegate(chain), + author::AuthorApi::to_delegate(author), + system::SystemApi::to_delegate(system), + rpc_extensions, + )) +} + +pub(crate) fn maintain_transaction_pool( + id: &BlockId, + client: &Client, + transaction_pool: &TransactionPool, +) -> error::Result<()> where + Block: BlockT::Out>, + Backend: client::backend::Backend, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: runtime_api::TaggedTransactionQueue, + Executor: client::CallExecutor, + PoolApi: txpool::ChainApi, +{ + // Avoid calling into runtime if there is nothing to prune from the pool anyway. + if transaction_pool.status().is_empty() { + return Ok(()) + } + + if let Some(block) = client.block(id)? { + let parent_id = BlockId::hash(*block.block.header().parent_hash()); + let extrinsics = block.block.extrinsics(); + transaction_pool.prune(id, &parent_id, extrinsics).map_err(|e| format!("{:?}", e))?; + } + + Ok(()) +} + +pub(crate) fn offchain_workers( + number: &NumberFor, + offchain: &offchain::OffchainWorkers< + Client, + >::OffchainStorage, + Block + >, + pool: &Arc>, + network_state: &Arc, + is_validator: bool, +) -> error::Result + Send>> +where + Block: BlockT::Out>, + Backend: client::backend::Backend + 'static, + Api: 'static, + >::OffchainStorage: 'static, + Client: ProvideRuntimeApi + Send + Sync, + as ProvideRuntimeApi>::Api: offchain::OffchainWorkerApi, + Executor: client::CallExecutor + 'static, + PoolApi: txpool::ChainApi + 'static, +{ + let future = offchain.on_block_imported(number, pool, network_state.clone(), is_validator) + .map(|()| Ok(())); + Ok(Box::new(Compat::new(future))) +} + +#[cfg(test)] +mod tests { + use super::*; + use consensus_common::{BlockOrigin, SelectChain}; + use substrate_test_runtime_client::{prelude::*, runtime::Transfer}; + + #[test] + fn should_remove_transactions_from_the_pool() { + let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); + let client = Arc::new(client); + let pool = TransactionPool::new(Default::default(), ::transaction_pool::ChainApi::new(client.clone())); + let transaction = Transfer { + amount: 5, + nonce: 0, + from: AccountKeyring::Alice.into(), + to: Default::default(), + }.into_signed_tx(); + let best = longest_chain.best_chain().unwrap(); + + // store the transaction in the pool + pool.submit_one(&BlockId::hash(best.hash()), transaction.clone()).unwrap(); + + // import the block + let mut builder = client.new_block(Default::default()).unwrap(); + builder.push(transaction.clone()).unwrap(); + let block = builder.bake().unwrap(); + let id = BlockId::hash(block.header().hash()); + client.import(BlockOrigin::Own, block).unwrap(); + + // fire notification - this should clean up the queue + assert_eq!(pool.status().ready, 1); + maintain_transaction_pool( + &id, + &client, + &pool, + ).unwrap(); + + // then + assert_eq!(pool.status().ready, 0); + assert_eq!(pool.status().future, 0); + } +} diff --git a/core/service/src/chain_ops.rs b/core/service/src/chain_ops.rs index c801b81186f18..3a3677798b6ad 100644 --- a/core/service/src/chain_ops.rs +++ b/core/service/src/chain_ops.rs @@ -16,44 +16,19 @@ //! Chain utilities. -use std::{self, io::{Read, Write, Seek}}; -use futures::prelude::*; -use futures03::TryFutureExt as _; -use log::{info, warn}; - -use sr_primitives::generic::{SignedBlock, BlockId}; -use sr_primitives::traits::{SaturatedConversion, Zero, One, Block, Header, NumberFor}; -use consensus_common::import_queue::{ImportQueue, IncomingBlock, Link, BlockImportError, BlockImportResult}; -use network::message; - -use consensus_common::BlockOrigin; -use crate::components::{self, Components, ServiceFactory, FactoryFullConfiguration, FactoryBlockNumber, RuntimeGenesis}; -use crate::new_client; -use codec::{Decode, Encode, IoReader}; +use crate::RuntimeGenesis; use crate::error; use crate::chain_spec::ChainSpec; -/// Export a range of blocks to a binary stream. -pub fn export_blocks( - config: FactoryFullConfiguration, - exit: E, - mut output: W, - from: FactoryBlockNumber, - to: Option>, - json: bool -) -> error::Result<()> - where - F: ServiceFactory, - E: Future + Send + 'static, - W: Write, -{ - let client = new_client::(&config)?; - let mut block = from; +#[macro_export] +macro_rules! export_blocks { +($client:ident, $exit:ident, $output:ident, $from:ident, $to:ident, $json:ident) => {{ + let mut block = $from; - let last = match to { + let last = match $to { Some(v) if v.is_zero() => One::one(), Some(v) => v, - None => client.info().chain.best_number, + None => $client.info().chain.best_number, }; if last < block { @@ -62,28 +37,28 @@ pub fn export_blocks( let (exit_send, exit_recv) = std::sync::mpsc::channel(); ::std::thread::spawn(move || { - let _ = exit.wait(); + let _ = $exit.wait(); let _ = exit_send.send(()); }); info!("Exporting blocks from #{} to #{}", block, last); - if !json { + if !$json { let last_: u64 = last.saturated_into::(); let block_: u64 = block.saturated_into::(); let len: u64 = last_ - block_ + 1; - output.write(&len.encode())?; + $output.write(&len.encode())?; } loop { if exit_recv.try_recv().is_ok() { break; } - match client.block(&BlockId::number(block))? { + match $client.block(&BlockId::number(block))? { Some(block) => { - if json { - serde_json::to_writer(&mut output, &block) + if $json { + serde_json::to_writer(&mut $output, &block) .map_err(|e| format!("Error writing JSON: {}", e))?; } else { - output.write(&block.encode())?; + $output.write(&block.encode())?; } }, None => break, @@ -97,66 +72,59 @@ pub fn export_blocks( block += One::one(); } Ok(()) +}} } -struct WaitLink { - imported_blocks: u64, - has_error: bool, -} +#[macro_export] +macro_rules! import_blocks { +($block:ty, $client:ident, $queue:ident, $exit:ident, $input:ident) => {{ + use consensus_common::import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult}; + use consensus_common::BlockOrigin; + use network::message; + use sr_primitives::generic::SignedBlock; + use sr_primitives::traits::Block; + use futures03::TryFutureExt as _; + + struct WaitLink { + imported_blocks: u64, + has_error: bool, + } -impl WaitLink { - fn new() -> WaitLink { - WaitLink { - imported_blocks: 0, - has_error: false, + impl WaitLink { + fn new() -> WaitLink { + WaitLink { + imported_blocks: 0, + has_error: false, + } } } -} -impl Link for WaitLink { - fn blocks_processed( - &mut self, - imported: usize, - _count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> - ) { - self.imported_blocks += imported as u64; - - for result in results { - if let (Err(err), hash) = result { - warn!("There was an error importing block with hash {:?}: {:?}", hash, err); - self.has_error = true; - break; + impl Link for WaitLink { + fn blocks_processed( + &mut self, + imported: usize, + _count: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)> + ) { + self.imported_blocks += imported as u64; + + for result in results { + if let (Err(err), hash) = result { + warn!("There was an error importing block with hash {:?}: {:?}", hash, err); + self.has_error = true; + break; + } } } } -} - -/// Returns a future that import blocks from a binary stream. -pub fn import_blocks( - mut config: FactoryFullConfiguration, - exit: E, - input: R -) -> error::Result> - where F: ServiceFactory, E: Future + Send + 'static, R: Read + Seek, -{ - let client = new_client::(&config)?; - // FIXME #1134 this shouldn't need a mutable config. - let select_chain = components::FullComponents::::build_select_chain(&mut config, client.clone())?; - let (mut queue, _) = components::FullComponents::::build_import_queue( - &mut config, - client.clone(), - select_chain, - None, - )?; let (exit_send, exit_recv) = std::sync::mpsc::channel(); ::std::thread::spawn(move || { - let _ = exit.wait(); + let _ = $exit.wait(); let _ = exit_send.send(()); }); - let mut io_reader_input = IoReader(input); + let mut io_reader_input = IoReader($input); let count: u64 = Decode::decode(&mut io_reader_input) .map_err(|e| format!("Error reading file: {}", e))?; info!("Importing {} blocks", count); @@ -165,11 +133,11 @@ pub fn import_blocks( if exit_recv.try_recv().is_ok() { break; } - match SignedBlock::::decode(&mut io_reader_input) { + match SignedBlock::<$block>::decode(&mut io_reader_input) { Ok(signed) => { let (header, extrinsics) = signed.block.deconstruct(); let hash = header.hash(); - let block = message::BlockData:: { + let block = message::BlockData::<$block> { hash, justification: signed.justification, header: Some(header), @@ -178,8 +146,8 @@ pub fn import_blocks( message_queue: None }; // import queue handles verification and importing it into the client - queue.import_blocks(BlockOrigin::File, vec![ - IncomingBlock:: { + $queue.import_blocks(BlockOrigin::File, vec![ + IncomingBlock::<$block> { hash: block.hash, header: block.header, body: block.body, @@ -208,7 +176,7 @@ pub fn import_blocks( let blocks_before = link.imported_blocks; let _ = futures03::future::poll_fn(|cx| { - queue.poll_actions(cx, &mut link); + $queue.poll_actions(cx, &mut link); std::task::Poll::Pending::> }).compat().poll(); if link.has_error { @@ -226,24 +194,20 @@ pub fn import_blocks( ); } if link.imported_blocks >= count { - info!("Imported {} blocks. Best: #{}", block_count, client.info().chain.best_number); + info!("Imported {} blocks. Best: #{}", block_count, $client.info().chain.best_number); Ok(Async::Ready(())) } else { Ok(Async::NotReady) } })) +}} } -/// Revert the chain. -pub fn revert_chain( - config: FactoryFullConfiguration, - blocks: FactoryBlockNumber -) -> error::Result<()> - where F: ServiceFactory, -{ - let client = new_client::(&config)?; - let reverted = client.revert(blocks)?; - let info = client.info().chain; +#[macro_export] +macro_rules! revert_chain { +($client:ident, $blocks:ident) => {{ + let reverted = $client.revert($blocks)?; + let info = $client.info().chain; if reverted.is_zero() { info!("There aren't any non-finalized blocks to revert."); @@ -251,6 +215,7 @@ pub fn revert_chain( info!("Reverted {} blocks. Best: #{} ({})", reverted, info.best_number, info.best_hash); } Ok(()) +}} } /// Build a chain spec json diff --git a/core/service/src/chain_spec.rs b/core/service/src/chain_spec.rs index 1683876c3f86f..8b35b0bac9581 100644 --- a/core/service/src/chain_spec.rs +++ b/core/service/src/chain_spec.rs @@ -24,7 +24,7 @@ use serde::{Serialize, Deserialize}; use primitives::storage::{StorageKey, StorageData}; use sr_primitives::{BuildStorage, StorageOverlay, ChildrenStorageOverlay}; use serde_json as json; -use crate::components::RuntimeGenesis; +use crate::RuntimeGenesis; use network::Multiaddr; use tel::TelemetryEndpoints; diff --git a/core/service/src/components.rs b/core/service/src/components.rs deleted file mode 100644 index a9aa2129f2498..0000000000000 --- a/core/service/src/components.rs +++ /dev/null @@ -1,808 +0,0 @@ -// Copyright 2017-2019 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Substrate service components. - -use std::{sync::Arc, ops::Deref, ops::DerefMut}; -use serde::{Serialize, de::DeserializeOwned}; -use crate::chain_spec::ChainSpec; -use keystore::KeyStorePtr; -use client_db; -use client::{self, Client, runtime_api}; -use crate::{error, Service}; -use consensus_common::{import_queue::ImportQueue, SelectChain}; -use network::{ - self, OnDemand, FinalityProofProvider, NetworkStateInfo, config::BoxFinalityProofRequestBuilder -}; -use substrate_executor::{NativeExecutor, NativeExecutionDispatch}; -use transaction_pool::txpool::{self, Options as TransactionPoolOptions, Pool as TransactionPool}; -use sr_primitives::{ - BuildStorage, traits::{Block as BlockT, Header as HeaderT, ProvideRuntimeApi}, generic::BlockId -}; -use crate::config::Configuration; -use primitives::{Blake2Hasher, H256, traits::BareCryptoStorePtr}; -use rpc::{self, system::SystemInfo}; -use futures::{prelude::*, future::Executor}; -use futures03::{FutureExt as _, channel::mpsc, compat::Compat}; - -// Type aliases. -// These exist mainly to avoid typing `::Foo` all over the code. - -/// Network service type for `Components`. -pub type NetworkService = network::NetworkService< - ComponentBlock, - <::Factory as ServiceFactory>::NetworkProtocol, - ComponentExHash ->; - -/// Code executor type for a factory. -pub type CodeExecutor = NativeExecutor<::RuntimeDispatch>; - -/// Full client backend type for a factory. -pub type FullBackend = client_db::Backend<::Block>; - -/// Full client executor type for a factory. -pub type FullExecutor = client::LocalCallExecutor< - client_db::Backend<::Block>, - CodeExecutor, ->; - -/// Light client backend type for a factory. -pub type LightBackend = client::light::backend::Backend< - client_db::light::LightStorage<::Block>, - network::OnDemand<::Block>, - Blake2Hasher, ->; - -/// Light client executor type for a factory. -pub type LightExecutor = client::light::call_executor::RemoteOrLocalCallExecutor< - ::Block, - client::light::backend::Backend< - client_db::light::LightStorage<::Block>, - network::OnDemand<::Block>, - Blake2Hasher - >, - client::light::call_executor::RemoteCallExecutor< - client::light::blockchain::Blockchain< - client_db::light::LightStorage<::Block>, - network::OnDemand<::Block> - >, - network::OnDemand<::Block>, - >, - client::LocalCallExecutor< - client::light::backend::Backend< - client_db::light::LightStorage<::Block>, - network::OnDemand<::Block>, - Blake2Hasher - >, - CodeExecutor - > ->; - -/// Full client type for a factory. -pub type FullClient = Client, FullExecutor, ::Block, ::RuntimeApi>; - -/// Light client type for a factory. -pub type LightClient = Client, LightExecutor, ::Block, ::RuntimeApi>; - -/// `ChainSpec` specialization for a factory. -pub type FactoryChainSpec = ChainSpec<::Genesis>; - -/// `Genesis` specialization for a factory. -pub type FactoryGenesis = ::Genesis; - -/// `Block` type for a factory. -pub type FactoryBlock = ::Block; - -/// `Extrinsic` type for a factory. -pub type FactoryExtrinsic = <::Block as BlockT>::Extrinsic; - -/// `Number` type for a factory. -pub type FactoryBlockNumber = < as BlockT>::Header as HeaderT>::Number; - -/// Full `Configuration` type for a factory. -pub type FactoryFullConfiguration = Configuration<::Configuration, FactoryGenesis>; - -/// Client type for `Components`. -pub type ComponentClient = Client< - ::Backend, - ::Executor, - FactoryBlock<::Factory>, - ::RuntimeApi, ->; - -/// A offchain workers storage backend type. -pub type ComponentOffchainStorage = < - ::Backend as client::backend::Backend, Blake2Hasher> ->::OffchainStorage; - -/// Block type for `Components` -pub type ComponentBlock = <::Factory as ServiceFactory>::Block; - -/// Extrinsic hash type for `Components` -pub type ComponentExHash = <::TransactionPoolApi as txpool::ChainApi>::Hash; - -/// Extrinsic type. -pub type ComponentExtrinsic = as BlockT>::Extrinsic; - -/// Extrinsic pool API type for `Components`. -pub type PoolApi = ::TransactionPoolApi; - -/// A set of traits for the runtime genesis config. -pub trait RuntimeGenesis: Serialize + DeserializeOwned + BuildStorage {} -impl RuntimeGenesis for T {} - -/// A transport-agnostic handler of the RPC queries. -pub type RpcHandler = rpc_servers::RpcHandler; - -/// Something that can create and store initial session keys from given seeds. -pub trait InitialSessionKeys { - /// Generate the initial session keys for the given seeds and store them in - /// an internal keystore. - fn generate_initial_session_keys( - client: Arc>, - seeds: Vec, - ) -> error::Result<()>; -} - -impl InitialSessionKeys for C where - ComponentClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: session::SessionKeys>, -{ - fn generate_initial_session_keys( - client: Arc>, - seeds: Vec, - ) -> error::Result<()> { - session::generate_initial_session_keys(client, seeds).map_err(Into::into) - } -} - -/// Something that can start the RPC service. -pub trait StartRpc { - fn start_rpc( - client: Arc>, - system_send_back: mpsc::UnboundedSender>>, - system_info: SystemInfo, - task_executor: TaskExecutor, - transaction_pool: Arc>, - rpc_extensions: impl rpc::RpcExtension, - keystore: KeyStorePtr, - ) -> RpcHandler; -} - -impl StartRpc for C where - ComponentClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: - runtime_api::Metadata> + session::SessionKeys>, -{ - fn start_rpc( - client: Arc>, - system_send_back: mpsc::UnboundedSender>>, - rpc_system_info: SystemInfo, - task_executor: TaskExecutor, - transaction_pool: Arc>, - rpc_extensions: impl rpc::RpcExtension, - keystore: KeyStorePtr, - ) -> RpcHandler { - use rpc::{chain, state, author, system}; - let subscriptions = rpc::Subscriptions::new(task_executor.clone()); - let chain = chain::Chain::new(client.clone(), subscriptions.clone()); - let state = state::State::new(client.clone(), subscriptions.clone()); - let author = rpc::author::Author::new( - client, - transaction_pool, - subscriptions, - keystore, - ); - let system = system::System::new(rpc_system_info, system_send_back); - - rpc_servers::rpc_handler(( - state::StateApi::to_delegate(state), - chain::ChainApi::to_delegate(chain), - author::AuthorApi::to_delegate(author), - system::SystemApi::to_delegate(system), - rpc_extensions, - )) - } -} - -/// Something that can maintain transaction pool on every imported block. -pub trait MaintainTransactionPool { - fn maintain_transaction_pool( - id: &BlockId>, - client: &ComponentClient, - transaction_pool: &TransactionPool, - ) -> error::Result<()>; -} - -fn maintain_transaction_pool( - id: &BlockId, - client: &Client, - transaction_pool: &TransactionPool, -) -> error::Result<()> where - Block: BlockT::Out>, - Backend: client::backend::Backend, - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: runtime_api::TaggedTransactionQueue, - Executor: client::CallExecutor, - PoolApi: txpool::ChainApi, -{ - // Avoid calling into runtime if there is nothing to prune from the pool anyway. - if transaction_pool.status().is_empty() { - return Ok(()) - } - - if let Some(block) = client.block(id)? { - let parent_id = BlockId::hash(*block.block.header().parent_hash()); - let extrinsics = block.block.extrinsics(); - transaction_pool.prune(id, &parent_id, extrinsics).map_err(|e| format!("{:?}", e))?; - } - - Ok(()) -} - -impl MaintainTransactionPool for C where - ComponentClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: runtime_api::TaggedTransactionQueue>, -{ - fn maintain_transaction_pool( - id: &BlockId>, - client: &ComponentClient, - transaction_pool: &TransactionPool, - ) -> error::Result<()> { - maintain_transaction_pool(id, client, transaction_pool) - } -} - -pub trait OffchainWorker { - fn offchain_workers( - number: &FactoryBlockNumber, - offchain: &offchain::OffchainWorkers< - ComponentClient, - ComponentOffchainStorage, - ComponentBlock - >, - pool: &Arc>, - network_state: &Arc, - is_validator: bool, - ) -> error::Result + Send>>; -} - -impl OffchainWorker for C where - ComponentClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: offchain::OffchainWorkerApi>, -{ - fn offchain_workers( - number: &FactoryBlockNumber, - offchain: &offchain::OffchainWorkers< - ComponentClient, - ComponentOffchainStorage, - ComponentBlock - >, - pool: &Arc>, - network_state: &Arc, - is_validator: bool, - ) -> error::Result + Send>> { - let future = offchain.on_block_imported(number, pool, network_state.clone(), is_validator) - .map(|()| Ok(())); - Ok(Box::new(Compat::new(future))) - } -} - -/// The super trait that combines all required traits a `Service` needs to implement. -pub trait ServiceTrait: - Deref> - + Send - + 'static - + StartRpc - + MaintainTransactionPool - + OffchainWorker - + InitialSessionKeys -{} -impl ServiceTrait for T where - T: Deref> - + Send - + 'static - + StartRpc - + MaintainTransactionPool - + OffchainWorker - + InitialSessionKeys -{} - -/// Alias for a an implementation of `futures::future::Executor`. -pub type TaskExecutor = Arc + Send>> + Send + Sync>; - -/// A collection of types and methods to build a service on top of the substrate service. -pub trait ServiceFactory: 'static + Sized { - /// Block type. - type Block: BlockT; - /// The type that implements the runtime API. - type RuntimeApi: Send + Sync; - /// Network protocol extensions. - type NetworkProtocol: network::specialization::NetworkSpecialization; - /// Chain runtime. - type RuntimeDispatch: NativeExecutionDispatch + Send + Sync + 'static; - /// Extrinsic pool backend type for the full client. - type FullTransactionPoolApi: txpool::ChainApi::Hash, Block = Self::Block> + Send + 'static; - /// Extrinsic pool backend type for the light client. - type LightTransactionPoolApi: txpool::ChainApi::Hash, Block = Self::Block> + 'static; - /// Genesis configuration for the runtime. - type Genesis: RuntimeGenesis; - /// Other configuration for service members. - type Configuration: Default; - /// RPC initialisation. - type RpcExtensions: rpc::RpcExtension; - /// Extended full service type. - type FullService: ServiceTrait>; - /// Extended light service type. - type LightService: ServiceTrait>; - /// ImportQueue for full client - type FullImportQueue: ImportQueue + 'static; - /// ImportQueue for light clients - type LightImportQueue: ImportQueue + 'static; - /// The Fork Choice Strategy for the chain - type SelectChain: SelectChain + 'static; - - //TODO: replace these with a constructor trait. that TransactionPool implements. (#1242) - /// Extrinsic pool constructor for the full client. - fn build_full_transaction_pool(config: TransactionPoolOptions, client: Arc>) - -> Result, error::Error>; - /// Extrinsic pool constructor for the light client. - fn build_light_transaction_pool(config: TransactionPoolOptions, client: Arc>) - -> Result, error::Error>; - - /// Build network protocol. - fn build_network_protocol(config: &FactoryFullConfiguration) - -> Result; - - /// Build finality proof provider for serving network requests on full node. - fn build_finality_proof_provider( - client: Arc> - ) -> Result>>, error::Error>; - - /// Build the Fork Choice algorithm for full client - fn build_select_chain( - config: &mut FactoryFullConfiguration, - client: Arc>, - ) -> Result; - - /// Build full service. - fn new_full(config: FactoryFullConfiguration) - -> Result; - /// Build light service. - fn new_light(config: FactoryFullConfiguration) - -> Result; - - /// ImportQueue for a full client - fn build_full_import_queue( - config: &mut FactoryFullConfiguration, - _client: Arc>, - _select_chain: Self::SelectChain, - _transaction_pool: Option>>, - ) -> Result { - if let Some(name) = config.chain_spec.consensus_engine() { - match name { - _ => Err(format!("Chain Specification defines unknown consensus engine '{}'", name).into()) - } - - } else { - Err("Chain Specification doesn't contain any consensus_engine name".into()) - } - } - - /// ImportQueue for a light client - fn build_light_import_queue( - config: &mut FactoryFullConfiguration, - _client: Arc> - ) -> Result<(Self::LightImportQueue, BoxFinalityProofRequestBuilder), error::Error> { - if let Some(name) = config.chain_spec.consensus_engine() { - match name { - _ => Err(format!("Chain Specification defines unknown consensus engine '{}'", name).into()) - } - - } else { - Err("Chain Specification doesn't contain any consensus_engine name".into()) - } - } - - /// Create custom RPC method handlers for full node. - fn build_full_rpc_extensions( - client: Arc>, - transaction_pool: Arc>, - ) -> Self::RpcExtensions; - - /// Create custom RPC method handlers for light node. - fn build_light_rpc_extensions( - client: Arc>, - transaction_pool: Arc>, - ) -> Self::RpcExtensions; -} - -/// A collection of types and function to generalize over full / light client type. -pub trait Components: Sized + 'static { - /// Associated service factory. - type Factory: ServiceFactory; - /// Client backend. - type Backend: 'static + client::backend::Backend, Blake2Hasher>; - /// Client executor. - type Executor: 'static + client::CallExecutor, Blake2Hasher> + Send + Sync + Clone; - /// The type that implements the runtime API. - type RuntimeApi: Send + Sync; - /// The type that can start all runtime-dependent services. - type RuntimeServices: ServiceTrait; - /// The type that can extend the RPC methods. - type RpcExtensions: rpc::RpcExtension; - // TODO: Traitify transaction pool and allow people to implement their own. (#1242) - /// Extrinsic pool type. - type TransactionPoolApi: 'static + txpool::ChainApi< - Hash = as BlockT>::Hash, - Block = FactoryBlock - >; - /// Our Import Queue - type ImportQueue: ImportQueue> + 'static; - /// The Fork Choice Strategy for the chain - type SelectChain: SelectChain>; - - /// Create client. - fn build_client( - config: &FactoryFullConfiguration, - executor: CodeExecutor, - keystore: Option, - ) -> Result< - ( - Arc>, - Option>>> - ), - error::Error - >; - - /// Create extrinsic pool. - fn build_transaction_pool(config: TransactionPoolOptions, client: Arc>) - -> Result, error::Error>; - - /// Build the queue that imports blocks from the network, and optionally a way for the network - /// to build requests for proofs of finality. - fn build_import_queue( - config: &mut FactoryFullConfiguration, - client: Arc>, - select_chain: Option, - _transaction_pool: Option>>, - ) -> Result<(Self::ImportQueue, Option>>), error::Error>; - - /// Finality proof provider for serving network requests. - fn build_finality_proof_provider( - client: Arc> - ) -> Result::Block>>>, error::Error>; - - /// Build fork choice selector - fn build_select_chain( - config: &mut FactoryFullConfiguration, - client: Arc> - ) -> Result, error::Error>; - - /// Build RPC extensions - fn build_rpc_extensions( - client: Arc>, - transaction_pool: Arc>, - ) -> Self::RpcExtensions; -} - -/// A struct that implement `Components` for the full client. -pub struct FullComponents { - service: Service>, -} - -impl FullComponents { - /// Create new `FullComponents` - pub fn new( - config: FactoryFullConfiguration - ) -> Result { - Ok( - Self { - service: Service::new(config)?, - } - ) - } -} - -impl Deref for FullComponents { - type Target = Service; - - fn deref(&self) -> &Self::Target { - &self.service - } -} - -impl DerefMut for FullComponents { - fn deref_mut(&mut self) -> &mut Service { - &mut self.service - } -} - -impl Future for FullComponents { - type Item = (); - type Error = super::Error; - - fn poll(&mut self) -> Poll { - self.service.poll() - } -} - -impl Executor + Send>> -for FullComponents { - fn execute( - &self, - future: Box + Send> - ) -> Result<(), futures::future::ExecuteError + Send>>> { - self.service.execute(future) - } -} - -impl Components for FullComponents { - type Factory = Factory; - type Executor = FullExecutor; - type Backend = FullBackend; - type TransactionPoolApi = ::FullTransactionPoolApi; - type ImportQueue = Factory::FullImportQueue; - type RuntimeApi = Factory::RuntimeApi; - type RuntimeServices = Factory::FullService; - type RpcExtensions = Factory::RpcExtensions; - type SelectChain = Factory::SelectChain; - - fn build_client( - config: &FactoryFullConfiguration, - executor: CodeExecutor, - keystore: Option, - ) -> Result< - (Arc>, Option>>>), - error::Error, - > - { - let db_settings = client_db::DatabaseSettings { - cache_size: config.database_cache_size.map(|u| u as usize), - state_cache_size: config.state_cache_size, - state_cache_child_ratio: - config.state_cache_child_ratio.map(|v| (v, 100)), - path: config.database_path.clone(), - pruning: config.pruning.clone(), - }; - - Ok(( - Arc::new( - client_db::new_client( - db_settings, - executor, - &config.chain_spec, - config.execution_strategies.clone(), - keystore, - )? - ), - None, - )) - } - - fn build_transaction_pool( - config: TransactionPoolOptions, - client: Arc> - ) -> Result, error::Error> { - Factory::build_full_transaction_pool(config, client) - } - - fn build_import_queue( - config: &mut FactoryFullConfiguration, - client: Arc>, - select_chain: Option, - transaction_pool: Option>>, - ) -> Result<(Self::ImportQueue, Option>>), error::Error> { - let select_chain = select_chain - .ok_or(error::Error::SelectChainRequired)?; - Factory::build_full_import_queue(config, client, select_chain, transaction_pool) - .map(|queue| (queue, None)) - } - - fn build_select_chain( - config: &mut FactoryFullConfiguration, - client: Arc> - ) -> Result, error::Error> { - Self::Factory::build_select_chain(config, client).map(Some) - } - - fn build_finality_proof_provider( - client: Arc> - ) -> Result::Block>>>, error::Error> { - Factory::build_finality_proof_provider(client) - } - - fn build_rpc_extensions( - client: Arc>, - transaction_pool: Arc>, - ) -> Self::RpcExtensions { - Factory::build_full_rpc_extensions(client, transaction_pool) - } -} - -/// A struct that implement `Components` for the light client. -pub struct LightComponents { - service: Service>, -} - -impl LightComponents { - /// Create new `LightComponents` - pub fn new( - config: FactoryFullConfiguration, - ) -> Result { - Ok( - Self { - service: Service::new(config)?, - } - ) - } -} - -impl Deref for LightComponents { - type Target = Service; - - fn deref(&self) -> &Self::Target { - &self.service - } -} - -impl DerefMut for LightComponents { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.service - } -} - -impl Future for LightComponents { - type Item = (); - type Error = super::Error; - - fn poll(&mut self) -> Poll { - self.service.poll() - } -} - -impl Executor + Send>> -for LightComponents { - fn execute( - &self, - future: Box + Send> - ) -> Result<(), futures::future::ExecuteError + Send>>> { - self.service.execute(future) - } -} - -impl Components for LightComponents { - type Factory = Factory; - type Executor = LightExecutor; - type Backend = LightBackend; - type TransactionPoolApi = ::LightTransactionPoolApi; - type ImportQueue = ::LightImportQueue; - type RuntimeApi = Factory::RuntimeApi; - type RuntimeServices = Factory::LightService; - type RpcExtensions = Factory::RpcExtensions; - type SelectChain = Factory::SelectChain; - - fn build_client( - config: &FactoryFullConfiguration, - executor: CodeExecutor, - _: Option, - ) - -> Result< - ( - Arc>, - Option>>> - ), error::Error> - { - let db_settings = client_db::DatabaseSettings { - cache_size: None, - state_cache_size: config.state_cache_size, - state_cache_child_ratio: - config.state_cache_child_ratio.map(|v| (v, 100)), - path: config.database_path.clone(), - pruning: config.pruning.clone(), - }; - - let db_storage = client_db::light::LightStorage::new(db_settings)?; - let light_blockchain = client::light::new_light_blockchain(db_storage); - let fetch_checker = Arc::new( - client::light::new_fetch_checker(light_blockchain.clone(), executor.clone()) - ); - let fetcher = Arc::new(network::OnDemand::new(fetch_checker)); - let client_backend = client::light::new_light_backend(light_blockchain, fetcher.clone()); - let client = client::light::new_light(client_backend, fetcher.clone(), &config.chain_spec, executor)?; - Ok((Arc::new(client), Some(fetcher))) - } - - fn build_transaction_pool(config: TransactionPoolOptions, client: Arc>) - -> Result, error::Error> - { - Factory::build_light_transaction_pool(config, client) - } - - fn build_import_queue( - config: &mut FactoryFullConfiguration, - client: Arc>, - _select_chain: Option, - _transaction_pool: Option>>, - ) -> Result<(Self::ImportQueue, Option>>), error::Error> { - Factory::build_light_import_queue(config, client) - .map(|(queue, builder)| (queue, Some(builder))) - } - - fn build_finality_proof_provider( - _client: Arc> - ) -> Result::Block>>>, error::Error> { - Ok(None) - } - - fn build_select_chain( - _config: &mut FactoryFullConfiguration, - _client: Arc> - ) -> Result, error::Error> { - Ok(None) - } - - fn build_rpc_extensions( - client: Arc>, - transaction_pool: Arc>, - ) -> Self::RpcExtensions { - Factory::build_light_rpc_extensions(client, transaction_pool) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use consensus_common::BlockOrigin; - use substrate_test_runtime_client::{prelude::*, runtime::Transfer}; - - #[test] - fn should_remove_transactions_from_the_pool() { - let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); - let client = Arc::new(client); - let pool = TransactionPool::new(Default::default(), ::transaction_pool::ChainApi::new(client.clone())); - let transaction = Transfer { - amount: 5, - nonce: 0, - from: AccountKeyring::Alice.into(), - to: Default::default(), - }.into_signed_tx(); - let best = longest_chain.best_chain().unwrap(); - - // store the transaction in the pool - pool.submit_one(&BlockId::hash(best.hash()), transaction.clone()).unwrap(); - - // import the block - let mut builder = client.new_block(Default::default()).unwrap(); - builder.push(transaction.clone()).unwrap(); - let block = builder.bake().unwrap(); - let id = BlockId::hash(block.header().hash()); - client.import(BlockOrigin::Own, block).unwrap(); - - // fire notification - this should clean up the queue - assert_eq!(pool.status().ready, 1); - maintain_transaction_pool( - &id, - &client, - &pool, - ).unwrap(); - - // then - assert_eq!(pool.status().ready, 0); - assert_eq!(pool.status().future, 0); - } -} diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index 33a42e87fe04a..1362e86c2148d 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -19,51 +19,43 @@ #![warn(missing_docs)] -mod components; mod chain_spec; pub mod config; +#[macro_use] pub mod chain_ops; pub mod error; use std::io; +use std::marker::PhantomData; use std::net::SocketAddr; use std::collections::HashMap; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{Duration, Instant}; +use serde::{Serialize, de::DeserializeOwned}; use futures::sync::mpsc; use parking_lot::Mutex; -use client::{BlockchainEvents, backend::Backend, runtime_api::BlockT}; +use client::{runtime_api::BlockT, Client}; use exit_future::Signal; use futures::prelude::*; use futures03::stream::{StreamExt as _, TryStreamExt as _}; -use keystore::Store as Keystore; -use network::{NetworkState, NetworkStateInfo}; -use log::{log, info, warn, debug, error, Level}; +use network::{NetworkService, NetworkState, specialization::NetworkSpecialization}; +use log::{log, warn, debug, error, Level}; use codec::{Encode, Decode}; +use primitives::{Blake2Hasher, H256}; +use sr_primitives::BuildStorage; use sr_primitives::generic::BlockId; -use sr_primitives::traits::{Header, NumberFor, SaturatedConversion}; -use substrate_executor::NativeExecutor; -use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; -use tel::{telemetry, SUBSTRATE_INFO}; +use sr_primitives::traits::NumberFor; pub use self::error::Error; +pub use self::builder::{ServiceBuilder, ServiceBuilderExport, ServiceBuilderImport, ServiceBuilderRevert}; pub use config::{Configuration, Roles, PruningMode}; pub use chain_spec::{ChainSpec, Properties}; pub use transaction_pool::txpool::{ self, Pool as TransactionPool, Options as TransactionPoolOptions, ChainApi, IntoPoolError }; pub use client::FinalityNotifications; - -pub use components::{ - ServiceFactory, FullBackend, FullExecutor, LightBackend, - LightExecutor, Components, PoolApi, ComponentClient, ComponentOffchainStorage, - ComponentBlock, FullClient, LightClient, FullComponents, LightComponents, - CodeExecutor, NetworkService, FactoryChainSpec, FactoryBlock, - FactoryFullConfiguration, RuntimeGenesis, FactoryGenesis, - ComponentExHash, ComponentExtrinsic, FactoryExtrinsic, InitialSessionKeys, -}; -use components::{StartRpc, MaintainTransactionPool, OffchainWorker}; +pub use rpc::Metadata as RpcMetadata; #[doc(hidden)] pub use std::{ops::Deref, result::Result, sync::Arc}; #[doc(hidden)] @@ -74,15 +66,15 @@ pub use futures::future::Executor; const DEFAULT_PROTOCOL_ID: &str = "sup"; /// Substrate service. -pub struct Service { - client: Arc>, - select_chain: Option, - network: Arc>, +pub struct NewService { + client: Arc, + select_chain: Option, + network: Arc, /// Sinks to propagate network status updates. network_status_sinks: Arc>, NetworkState + TNetStatus, NetworkState )>>>>, - transaction_pool: Arc>, + transaction_pool: Arc, /// A future that resolves when the service has exited, this is useful to /// make sure any internally spawned futures stop when the service does. exit: exit_future::Exit, @@ -100,31 +92,22 @@ pub struct Service { /// The elements must then be polled manually. to_poll: Vec + Send>>, /// Configuration of this Service - config: FactoryFullConfiguration, - rpc_handlers: components::RpcHandler, + config: TCfg, + rpc_handlers: rpc_servers::RpcHandler, _rpc: Box, _telemetry: Option, _telemetry_on_connect_sinks: Arc>>>, - _offchain_workers: Option, - ComponentOffchainStorage, - ComponentBlock> - >>, + _offchain_workers: Option>, keystore: keystore::KeyStorePtr, + marker: PhantomData, } -/// Creates bare client without any networking. -pub fn new_client( - config: &FactoryFullConfiguration, -) -> Result>>, error::Error> { - let executor = NativeExecutor::new(config.default_heap_pages); - - components::FullComponents::::build_client( - config, - executor, - None, - ).map(|r| r.0) -} +/// A set of traits for the runtime genesis config. +pub trait RuntimeGenesis: Serialize + DeserializeOwned + BuildStorage {} +impl RuntimeGenesis for T {} + +/// Alias for a an implementation of `futures::future::Executor`. +pub type TaskExecutor = Arc + Send>> + Send + Sync>; /// An handle for spawning tasks in the service. #[derive(Clone)] @@ -146,59 +129,38 @@ impl Executor + Send>> for SpawnTaskHandle } } -/// Stream of events for connection established to a telemetry server. -pub type TelemetryOnConnectNotifications = mpsc::UnboundedReceiver<()>; - -/// Used to hook on telemetry connection established events. -pub struct TelemetryOnConnect { - /// Event stream. - pub telemetry_connection_sinks: TelemetryOnConnectNotifications, -} - -impl Service { - /// Creates a new service. - pub fn new( - mut config: FactoryFullConfiguration, - ) -> Result { +macro_rules! new_impl { + ( + $block:ty, + $config:ident, + $build_components:expr, + $maintain_transaction_pool:expr, + $offchain_workers:expr, + $start_rpc:expr, + ) => {{ let (signal, exit) = exit_future::signal(); // List of asynchronous tasks to spawn. We collect them, then spawn them all at once. let (to_spawn_tx, to_spawn_rx) = mpsc::unbounded:: + Send>>(); - // Create client - let executor = NativeExecutor::new(config.default_heap_pages); - - let keystore = Keystore::open(config.keystore_path.clone(), config.keystore_password.clone())?; - - let (client, on_demand) = Components::build_client(&config, executor, Some(keystore.clone()))?; - let select_chain = Components::build_select_chain(&mut config, client.clone())?; - - let transaction_pool = Arc::new( - Components::build_transaction_pool(config.transaction_pool.clone(), client.clone())? - ); - let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { - imports_external_transactions: !config.roles.is_light(), - pool: transaction_pool.clone(), - client: client.clone(), - }); - - let (import_queue, finality_proof_request_builder) = Components::build_import_queue( - &mut config, - client.clone(), - select_chain.clone(), - Some(transaction_pool.clone()), - )?; + // Create all the components. + let ( + client, + on_demand, + keystore, + select_chain, + import_queue, + finality_proof_request_builder, + finality_proof_provider, + network_protocol, + transaction_pool, + rpc_extensions + ) = $build_components(&mut $config)?; let import_queue = Box::new(import_queue); - let finality_proof_provider = Components::build_finality_proof_provider(client.clone())?; let chain_info = client.info().chain; - Components::RuntimeServices::generate_initial_session_keys( - client.clone(), - config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), - )?; - - let version = config.full_version(); + let version = $config.full_version(); info!("Highest known block at #{}", chain_info.best_number); telemetry!( SUBSTRATE_INFO; @@ -207,10 +169,14 @@ impl Service { "best" => ?chain_info.best_hash ); - let network_protocol = ::build_network_protocol(&config)?; + let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { + imports_external_transactions: !$config.roles.is_light(), + pool: transaction_pool.clone(), + client: client.clone(), + }); let protocol_id = { - let protocol_id_full = match config.chain_spec.protocol_id() { + let protocol_id_full = match $config.chain_spec.protocol_id() { Some(pid) => pid, None => { warn!("Using default protocol ID {:?} because none is configured in the \ @@ -223,8 +189,8 @@ impl Service { }; let network_params = network::config::Params { - roles: config.roles, - network_config: config.network.clone(), + roles: $config.roles, + network_config: $config.network.clone(), chain: client.clone(), finality_proof_provider, finality_proof_request_builder, @@ -242,7 +208,7 @@ impl Service { #[allow(deprecated)] let offchain_storage = client.backend().offchain_storage(); - let offchain_workers = match (config.offchain_worker, offchain_storage) { + let offchain_workers = match ($config.offchain_worker, offchain_storage) { (true, Some(db)) => { Some(Arc::new(offchain::OffchainWorkers::new(client.clone(), db))) }, @@ -260,23 +226,25 @@ impl Service { let offchain = offchain_workers.as_ref().map(Arc::downgrade); let to_spawn_tx_ = to_spawn_tx.clone(); let network_state_info: Arc = network.clone(); - let is_validator = config.roles.is_authority(); + let is_validator = $config.roles.is_authority(); let events = client.import_notification_stream() .map(|v| Ok::<_, ()>(v)).compat() .for_each(move |notification| { let number = *notification.header.number(); + let txpool = txpool.upgrade(); - if let (Some(txpool), Some(client)) = (txpool.upgrade(), wclient.upgrade()) { - Components::RuntimeServices::maintain_transaction_pool( + if let (Some(txpool), Some(client)) = (txpool.as_ref(), wclient.upgrade()) { + $maintain_transaction_pool( &BlockId::hash(notification.hash), &*client, &*txpool, ).map_err(|e| warn!("Pool error processing new block: {:?}", e))?; } - if let (Some(txpool), Some(offchain)) = (txpool.upgrade(), offchain.as_ref().and_then(|o| o.upgrade())) { - let future = Components::RuntimeServices::offchain_workers( + let offchain = offchain.as_ref().and_then(|o| o.upgrade()); + if let (Some(txpool), Some(offchain)) = (txpool, offchain) { + let future = $offchain_workers( &number, &offchain, &txpool, @@ -321,7 +289,7 @@ impl Service { let client_ = client.clone(); let mut sys = System::new(); let self_pid = get_current_pid().ok(); - let (netstat_tx, netstat_rx) = mpsc::unbounded::<(NetworkStatus>, NetworkState)>(); + let (netstat_tx, netstat_rx) = mpsc::unbounded::<(NetworkStatus<_>, NetworkState)>(); network_status_sinks.lock().push(netstat_tx); let tel_task = netstat_rx.for_each(move |(net_status, network_state)| { let info = client_.info(); @@ -374,23 +342,23 @@ impl Service { let (system_rpc_tx, system_rpc_rx) = futures03::channel::mpsc::unbounded(); let gen_handler = || { let system_info = rpc::system::SystemInfo { - chain_name: config.chain_spec.name().into(), - impl_name: config.impl_name.into(), - impl_version: config.impl_version.into(), - properties: config.chain_spec.properties(), + chain_name: $config.chain_spec.name().into(), + impl_name: $config.impl_name.into(), + impl_version: $config.impl_version.into(), + properties: $config.chain_spec.properties(), }; - Components::RuntimeServices::start_rpc( + $start_rpc( client.clone(), system_rpc_tx.clone(), system_info.clone(), Arc::new(SpawnTaskHandle { sender: to_spawn_tx.clone() }), transaction_pool.clone(), - Components::build_rpc_extensions(client.clone(), transaction_pool.clone()), + rpc_extensions.clone(), keystore.clone(), ) }; let rpc_handlers = gen_handler(); - let rpc = start_rpc_servers(&config, gen_handler)?; + let rpc = start_rpc_servers(&$config, gen_handler)?; let _ = to_spawn_tx.unbounded_send(Box::new(build_network_future( network_mut, @@ -406,17 +374,17 @@ impl Service { let telemetry_connection_sinks: Arc>>> = Default::default(); // Telemetry - let telemetry = config.telemetry_endpoints.clone().map(|endpoints| { - let is_authority = config.roles.is_authority(); + let telemetry = $config.telemetry_endpoints.clone().map(|endpoints| { + let is_authority = $config.roles.is_authority(); let network_id = network.local_peer_id().to_base58(); - let name = config.name.clone(); - let impl_name = config.impl_name.to_owned(); + let name = $config.name.clone(); + let impl_name = $config.impl_name.to_owned(); let version = version.clone(); - let chain_name = config.chain_spec.name().to_owned(); + let chain_name = $config.chain_spec.name().to_owned(); let telemetry_connection_sinks_ = telemetry_connection_sinks.clone(); let telemetry = tel::init_telemetry(tel::TelemetryConfig { endpoints, - wasm_external_transport: config.telemetry_external_transport.take(), + wasm_external_transport: $config.telemetry_external_transport.take(), }); let future = telemetry.clone() .map(|ev| Ok::<_, ()>(ev)) @@ -446,7 +414,7 @@ impl Service { telemetry }); - Ok(Service { + Ok(NewService { client, network, network_status_sinks, @@ -458,56 +426,145 @@ impl Service { to_spawn_tx, to_spawn_rx, to_poll: Vec::new(), - config, + $config, rpc_handlers, _rpc: rpc, _telemetry: telemetry, _offchain_workers: offchain_workers, _telemetry_on_connect_sinks: telemetry_connection_sinks.clone(), keystore, + marker: PhantomData::<$block>, }) - } + }} +} + +mod builder; + +/// Abstraction over a Substrate service. +pub trait AbstractService: 'static + Future + + Executor + Send>> + Send { + /// Type of block of this chain. + type Block: BlockT; + /// Backend storage for the client. + type Backend: 'static + client::backend::Backend; + /// How to execute calls towards the runtime. + type CallExecutor: 'static + client::CallExecutor + Send + Sync + Clone; + /// API that the runtime provides. + type RuntimeApi: Send + Sync; + /// Configuration struct of the service. + type Config; + /// Chain selection algorithm. + type SelectChain: consensus_common::SelectChain; + /// API of the transaction pool. + type TransactionPoolApi: ChainApi; + /// Network specialization. + type NetworkSpecialization: NetworkSpecialization; + + /// Get event stream for telemetry connection established events. + fn telemetry_on_connect_stream(&self) -> mpsc::UnboundedReceiver<()>; + + /// Returns the configuration passed on construction. + fn config(&self) -> &Self::Config; + + /// Returns the configuration passed on construction. + fn config_mut(&mut self) -> &mut Self::Config; + + /// return a shared instance of Telemetry (if enabled) + fn telemetry(&self) -> Option; + + /// Spawns a task in the background that runs the future passed as parameter. + fn spawn_task(&self, task: impl Future + Send + 'static); + + /// Spawns a task in the background that runs the future passed as + /// parameter. The given task is considered essential, i.e. if it errors we + /// trigger a service exit. + fn spawn_essential_task(&self, task: impl Future + Send + 'static); + + /// Returns a handle for spawning tasks. + fn spawn_task_handle(&self) -> SpawnTaskHandle; + + /// Returns the keystore that stores keys. + fn keystore(&self) -> keystore::KeyStorePtr; + + /// Starts an RPC query. + /// + /// The query is passed as a string and must be a JSON text similar to what an HTTP client + /// would for example send. + /// + /// Returns a `Future` that contains the optional response. + /// + /// If the request subscribes you to events, the `Sender` in the `RpcSession` object is used to + /// send back spontaneous events. + fn rpc_query(&self, mem: &RpcSession, request: &str) -> Box, Error = ()> + Send>; + + /// Get shared client instance. + fn client(&self) -> Arc>; + + /// Get clone of select chain. + fn select_chain(&self) -> Option; + + /// Get shared network instance. + fn network(&self) -> Arc>; - /// Returns a reference to the config passed at initialization. - pub fn config(&self) -> &FactoryFullConfiguration { + /// Returns a receiver that periodically receives a status of the network. + fn network_status(&self) -> mpsc::UnboundedReceiver<(NetworkStatus, NetworkState)>; + + /// Get shared transaction pool instance. + fn transaction_pool(&self) -> Arc>; + + /// Get a handle to a future that will resolve on exit. + fn on_exit(&self) -> ::exit_future::Exit; +} + +impl AbstractService for + NewService, TSc, NetworkStatus, + NetworkService, TransactionPool, TOc> +where TCfg: 'static + Send, + TBl: BlockT, + TBackend: 'static + client::backend::Backend, + TExec: 'static + client::CallExecutor + Send + Sync + Clone, + TRtApi: 'static + Send + Sync, + TSc: consensus_common::SelectChain + 'static + Clone + Send, + TExPoolApi: 'static + ChainApi, + TOc: 'static + Send + Sync, + TNetSpec: NetworkSpecialization, +{ + type Block = TBl; + type Backend = TBackend; + type CallExecutor = TExec; + type RuntimeApi = TRtApi; + type Config = TCfg; + type SelectChain = TSc; + type TransactionPoolApi = TExPoolApi; + type NetworkSpecialization = TNetSpec; + + fn config(&self) -> &Self::Config { &self.config } - /// Returns a reference to the config passed at initialization. - /// - /// > **Note**: This method is currently necessary because we extract some elements from the - /// > configuration at the end of the service initialization. It is intended to be - /// > removed. - pub fn config_mut(&mut self) -> &mut FactoryFullConfiguration { + fn config_mut(&mut self) -> &mut Self::Config { &mut self.config } - /// Get event stream for telemetry connection established events. - pub fn telemetry_on_connect_stream(&self) -> TelemetryOnConnectNotifications { + fn telemetry_on_connect_stream(&self) -> mpsc::UnboundedReceiver<()> { let (sink, stream) = mpsc::unbounded(); self._telemetry_on_connect_sinks.lock().push(sink); stream } - /// Return a shared instance of Telemetry (if enabled) - pub fn telemetry(&self) -> Option { + fn telemetry(&self) -> Option { self._telemetry.as_ref().map(|t| t.clone()) } - /// Returns the keystore instance. - pub fn keystore(&self) -> keystore::KeyStorePtr { + fn keystore(&self) -> keystore::KeyStorePtr { self.keystore.clone() } - /// Spawns a task in the background that runs the future passed as parameter. - pub fn spawn_task(&self, task: impl Future + Send + 'static) { + fn spawn_task(&self, task: impl Future + Send + 'static) { let _ = self.to_spawn_tx.unbounded_send(Box::new(task)); } - /// Spawns a task in the background that runs the future passed as - /// parameter. The given task is considered essential, i.e. if it errors we - /// trigger a service exit. - pub fn spawn_essential_task(&self, task: impl Future + Send + 'static) { + fn spawn_essential_task(&self, task: impl Future + Send + 'static) { let essential_failed = self.essential_failed.clone(); let essential_task = Box::new(task.map_err(move |_| { error!("Essential task failed. Shutting down service."); @@ -517,62 +574,45 @@ impl Service { let _ = self.to_spawn_tx.unbounded_send(essential_task); } - /// Returns a handle for spawning tasks. - pub fn spawn_task_handle(&self) -> SpawnTaskHandle { + fn spawn_task_handle(&self) -> SpawnTaskHandle { SpawnTaskHandle { sender: self.to_spawn_tx.clone(), } } - /// Starts an RPC query. - /// - /// The query is passed as a string and must be a JSON text similar to what an HTTP client - /// would for example send. - /// - /// Returns a `Future` that contains the optional response. - /// - /// If the request subscribes you to events, the `Sender` in the `RpcSession` object is used to - /// send back spontaneous events. - pub fn rpc_query(&self, mem: &RpcSession, request: &str) - -> impl Future, Error = ()> - { - self.rpc_handlers.handle_request(request, mem.metadata.clone()) + fn rpc_query(&self, mem: &RpcSession, request: &str) -> Box, Error = ()> + Send> { + Box::new(self.rpc_handlers.handle_request(request, mem.metadata.clone())) } - /// Get shared client instance. - pub fn client(&self) -> Arc> { + fn client(&self) -> Arc> { self.client.clone() } - /// Get clone of select chain. - pub fn select_chain(&self) -> Option<::SelectChain> { + fn select_chain(&self) -> Option { self.select_chain.clone() } - /// Get shared network instance. - pub fn network(&self) -> Arc> { + fn network(&self) -> Arc> { self.network.clone() } - /// Returns a receiver that periodically receives a status of the network. - pub fn network_status(&self) -> mpsc::UnboundedReceiver<(NetworkStatus>, NetworkState)> { + fn network_status(&self) -> mpsc::UnboundedReceiver<(NetworkStatus, NetworkState)> { let (sink, stream) = mpsc::unbounded(); self.network_status_sinks.lock().push(sink); stream } - /// Get shared transaction pool instance. - pub fn transaction_pool(&self) -> Arc> { + fn transaction_pool(&self) -> Arc> { self.transaction_pool.clone() } - /// Get a handle to a future that will resolve on exit. - pub fn on_exit(&self) -> ::exit_future::Exit { + fn on_exit(&self) -> ::exit_future::Exit { self.exit.clone() } } -impl Future for Service where Components: components::Components { +impl Future for +NewService { type Item = (); type Error = Error; @@ -603,9 +643,8 @@ impl Future for Service where Components: components::Co } } -impl Executor + Send>> - for Service where Components: components::Components -{ +impl Executor + Send>> for +NewService { fn execute( &self, future: Box + Send> @@ -746,7 +785,8 @@ pub struct NetworkStatus { pub average_upload_per_sec: u64, } -impl Drop for Service where Components: components::Components { +impl Drop for +NewService { fn drop(&mut self) { debug!(target: "service", "Substrate service shutdown"); if let Some(signal) = self.signal.take() { @@ -757,7 +797,7 @@ impl Drop for Service where Components: components::Comp /// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them alive. #[cfg(not(target_os = "unknown"))] -fn start_rpc_servers components::RpcHandler>( +fn start_rpc_servers rpc_servers::RpcHandler>( config: &Configuration, mut gen_handler: H ) -> Result, error::Error> { @@ -906,225 +946,6 @@ where } } -/// Constructs a service factory with the given name that implements the `ServiceFactory` trait. -/// The required parameters are required to be given in the exact order. Some parameters are followed -/// by `{}` blocks. These blocks are required and used to initialize the given parameter. -/// In these block it is required to write a closure that takes the same number of arguments, -/// the corresponding function in the `ServiceFactory` trait provides. -/// -/// # Example -/// -/// ``` -/// # use substrate_service::{ -/// # construct_service_factory, Service, FullBackend, FullExecutor, LightBackend, LightExecutor, -/// # FullComponents, LightComponents, FactoryFullConfiguration, FullClient -/// # }; -/// # use transaction_pool::{self, txpool::{Pool as TransactionPool}}; -/// # use network::{config::DummyFinalityProofRequestBuilder, construct_simple_protocol}; -/// # use client::{self, LongestChain}; -/// # use consensus_common::import_queue::{BasicQueue, Verifier}; -/// # use consensus_common::{BlockOrigin, BlockImportParams, well_known_cache_keys::Id as CacheKeyId}; -/// # use node_runtime::{GenesisConfig, RuntimeApi}; -/// # use std::sync::Arc; -/// # use node_primitives::Block; -/// # use babe_primitives::AuthorityPair as BabePair; -/// # use grandpa_primitives::AuthorityPair as GrandpaPair; -/// # use sr_primitives::Justification; -/// # use sr_primitives::traits::Block as BlockT; -/// # use grandpa; -/// # construct_simple_protocol! { -/// # pub struct NodeProtocol where Block = Block { } -/// # } -/// # struct MyVerifier; -/// # impl Verifier for MyVerifier { -/// # fn verify( -/// # &mut self, -/// # origin: BlockOrigin, -/// # header: B::Header, -/// # justification: Option, -/// # body: Option>, -/// # ) -> Result<(BlockImportParams, Option)>>), String> { -/// # unimplemented!(); -/// # } -/// # } -/// type FullChainApi = transaction_pool::ChainApi< -/// client::Client, FullExecutor, Block, RuntimeApi>, Block>; -/// type LightChainApi = transaction_pool::ChainApi< -/// client::Client, LightExecutor, Block, RuntimeApi>, Block>; -/// -/// construct_service_factory! { -/// struct Factory { -/// // Declare the block type -/// Block = Block, -/// RuntimeApi = RuntimeApi, -/// // Declare the network protocol and give an initializer. -/// NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) }, -/// RuntimeDispatch = node_executor::Executor, -/// FullTransactionPoolApi = FullChainApi -/// { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, -/// LightTransactionPoolApi = LightChainApi -/// { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, -/// Genesis = GenesisConfig, -/// Configuration = (), -/// FullService = FullComponents -/// { |config| >::new(config) }, -/// // Setup as Consensus Authority (if the role and key are given) -/// AuthoritySetup = { -/// |service: Self::FullService| { -/// Ok(service) -/// }}, -/// LightService = LightComponents -/// { |config| >::new(config) }, -/// FullImportQueue = BasicQueue -/// { |_, client, _, _| Ok(BasicQueue::new(MyVerifier, Box::new(client), None, None)) }, -/// LightImportQueue = BasicQueue -/// { |_, client| { -/// let fprb = Box::new(DummyFinalityProofRequestBuilder::default()) as Box<_>; -/// Ok((BasicQueue::new(MyVerifier, Box::new(client), None, None), fprb)) -/// }}, -/// SelectChain = LongestChain, Self::Block> -/// { |config: &FactoryFullConfiguration, client: Arc>| { -/// #[allow(deprecated)] -/// Ok(LongestChain::new(client.backend().clone())) -/// }}, -/// FinalityProofProvider = { |client: Arc>| { -/// Ok(Some(Arc::new(grandpa::FinalityProofProvider::new(client.clone(), client)) as _)) -/// }}, -/// RpcExtensions = (), -/// } -/// } -/// ``` -#[macro_export] -macro_rules! construct_service_factory { - ( - $(#[$attr:meta])* - struct $name:ident { - Block = $block:ty, - RuntimeApi = $runtime_api:ty, - NetworkProtocol = $protocol:ty { $( $protocol_init:tt )* }, - RuntimeDispatch = $dispatch:ty, - FullTransactionPoolApi = $full_transaction:ty { $( $full_transaction_init:tt )* }, - LightTransactionPoolApi = $light_transaction:ty { $( $light_transaction_init:tt )* }, - Genesis = $genesis:ty, - Configuration = $config:ty, - FullService = $full_service:ty { $( $full_service_init:tt )* }, - AuthoritySetup = { $( $authority_setup:tt )* }, - LightService = $light_service:ty { $( $light_service_init:tt )* }, - FullImportQueue = $full_import_queue:ty - { $( $full_import_queue_init:tt )* }, - LightImportQueue = $light_import_queue:ty - { $( $light_import_queue_init:tt )* }, - SelectChain = $select_chain:ty - { $( $select_chain_init:tt )* }, - FinalityProofProvider = { $( $finality_proof_provider_init:tt )* }, - RpcExtensions = $rpc_extensions_ty:ty - $( { $( $rpc_extensions:tt )* } )?, - } - ) => { - $( #[$attr] )* - pub struct $name {} - - #[allow(unused_variables)] - impl $crate::ServiceFactory for $name { - type Block = $block; - type RuntimeApi = $runtime_api; - type NetworkProtocol = $protocol; - type RuntimeDispatch = $dispatch; - type FullTransactionPoolApi = $full_transaction; - type LightTransactionPoolApi = $light_transaction; - type Genesis = $genesis; - type Configuration = $config; - type FullService = $full_service; - type LightService = $light_service; - type FullImportQueue = $full_import_queue; - type LightImportQueue = $light_import_queue; - type SelectChain = $select_chain; - type RpcExtensions = $rpc_extensions_ty; - - fn build_full_transaction_pool( - config: $crate::TransactionPoolOptions, - client: $crate::Arc<$crate::FullClient> - ) -> $crate::Result<$crate::TransactionPool, $crate::Error> - { - ( $( $full_transaction_init )* ) (config, client) - } - - fn build_light_transaction_pool( - config: $crate::TransactionPoolOptions, - client: $crate::Arc<$crate::LightClient> - ) -> $crate::Result<$crate::TransactionPool, $crate::Error> - { - ( $( $light_transaction_init )* ) (config, client) - } - - fn build_network_protocol(config: &$crate::FactoryFullConfiguration) - -> $crate::Result - { - ( $( $protocol_init )* ) (config) - } - - fn build_select_chain( - config: &mut $crate::FactoryFullConfiguration, - client: Arc<$crate::FullClient> - ) -> $crate::Result { - ( $( $select_chain_init )* ) (config, client) - } - - fn build_full_import_queue( - config: &mut $crate::FactoryFullConfiguration, - client: $crate::Arc<$crate::FullClient>, - select_chain: Self::SelectChain, - transaction_pool: Option>>, - ) -> $crate::Result { - ( $( $full_import_queue_init )* ) (config, client, select_chain, transaction_pool) - } - - fn build_light_import_queue( - config: &mut FactoryFullConfiguration, - client: Arc<$crate::LightClient>, - ) -> Result<(Self::LightImportQueue, $crate::BoxFinalityProofRequestBuilder<$block>), $crate::Error> { - ( $( $light_import_queue_init )* ) (config, client) - } - - fn build_finality_proof_provider( - client: Arc<$crate::FullClient> - ) -> Result>>, $crate::Error> { - ( $( $finality_proof_provider_init )* ) (client) - } - - fn new_light( - config: $crate::FactoryFullConfiguration - ) -> $crate::Result - { - ( $( $light_service_init )* ) (config) - } - - fn new_full( - config: $crate::FactoryFullConfiguration - ) -> Result - { - ( $( $full_service_init )* ) (config).and_then(|service| { - ($( $authority_setup )*)(service) - }) - } - - fn build_full_rpc_extensions( - client: Arc<$crate::FullClient>, - transaction_pool: Arc<$crate::TransactionPool>, - ) -> Self::RpcExtensions { - $( ( $( $rpc_extensions )* ) (client, transaction_pool) )? - } - - fn build_light_rpc_extensions( - client: Arc<$crate::LightClient>, - transaction_pool: Arc<$crate::TransactionPool>, - ) -> Self::RpcExtensions { - $( ( $( $rpc_extensions )* ) (client, transaction_pool) )? - } - } - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/core/service/test/src/lib.rs b/core/service/test/src/lib.rs index c2895c5329496..870f287bff8f2 100644 --- a/core/service/test/src/lib.rs +++ b/core/service/test/src/lib.rs @@ -27,32 +27,31 @@ use tempdir::TempDir; use tokio::{runtime::Runtime, prelude::FutureExt}; use tokio::timer::Interval; use service::{ - ServiceFactory, + AbstractService, + ChainSpec, Configuration, - FactoryFullConfiguration, - FactoryChainSpec, Roles, - FactoryExtrinsic, + Error, }; use network::{multiaddr, Multiaddr}; use network::config::{NetworkConfiguration, TransportConfig, NodeKeyConfig, Secret, NonReservedPeerMode}; -use sr_primitives::generic::BlockId; +use sr_primitives::{generic::BlockId, traits::Block as BlockT}; use consensus::{BlockImportParams, BlockImport}; /// Maximum duration of single wait call. const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); -struct TestNet { +struct TestNet { runtime: Runtime, - authority_nodes: Vec<(usize, SyncService, Multiaddr)>, - full_nodes: Vec<(usize, SyncService, Multiaddr)>, - light_nodes: Vec<(usize, SyncService, Multiaddr)>, - chain_spec: FactoryChainSpec, + authority_nodes: Vec<(usize, SyncService, U, Multiaddr)>, + full_nodes: Vec<(usize, SyncService, U, Multiaddr)>, + light_nodes: Vec<(usize, SyncService, Multiaddr)>, + chain_spec: ChainSpec, base_port: u16, nodes: usize, } -/// Wraps around an `Arc>` and implements `Future`. +/// Wraps around an `Arc` and implements `Future`. pub struct SyncService(Arc>); impl SyncService { @@ -82,22 +81,24 @@ impl> Future for SyncService { } } -impl TestNet { +impl TestNet +where F: Send + 'static, L: Send +'static, U: Clone + Send + 'static +{ pub fn run_until_all_full( &mut self, full_predicate: FP, light_predicate: LP, ) where - FP: Send + Fn(usize, &SyncService) -> bool + 'static, - LP: Send + Fn(usize, &SyncService) -> bool + 'static, + FP: Send + Fn(usize, &SyncService) -> bool + 'static, + LP: Send + Fn(usize, &SyncService) -> bool + 'static, { let full_nodes = self.full_nodes.clone(); let light_nodes = self.light_nodes.clone(); let interval = Interval::new_interval(Duration::from_millis(100)) .map_err(|_| ()) .for_each(move |_| { - let full_ready = full_nodes.iter().all(|&(ref id, ref service, _)| + let full_ready = full_nodes.iter().all(|&(ref id, ref service, _, _)| full_predicate(*id, service) ); @@ -125,14 +126,14 @@ impl TestNet { } } -fn node_config ( +fn node_config ( index: usize, - spec: &FactoryChainSpec, + spec: &ChainSpec, role: Roles, key_seed: Option, base_port: u16, root: &TempDir, -) -> FactoryFullConfiguration +) -> Configuration<(), G> { let root = root.path().join(format!("node-{}", index)); @@ -194,18 +195,18 @@ fn node_config ( } } -impl TestNet where - F::FullService: Future, - F::LightService: Future, +impl TestNet where + F: AbstractService, + L: AbstractService, { fn new( temp: &TempDir, - spec: FactoryChainSpec, - full: usize, - light: usize, - authorities: Vec, + spec: ChainSpec, + full: impl Iterator) -> Result<(F, U), Error>>, + light: impl Iterator) -> Result>, + authorities: impl Iterator) -> Result<(F, U), Error>)>, base_port: u16 - ) -> TestNet { + ) -> TestNet { let _ = env_logger::try_init(); fdlimit::raise_fd_limit(); let runtime = Runtime::new().expect("Error creating tokio runtime"); @@ -222,79 +223,89 @@ impl TestNet where net } - fn insert_nodes(&mut self, temp: &TempDir, full: usize, light: usize, authorities: Vec) { - let mut nodes = self.nodes; - let base_port = self.base_port; - let spec = &self.chain_spec; + fn insert_nodes( + &mut self, + temp: &TempDir, + full: impl Iterator) -> Result<(F, U), Error>>, + light: impl Iterator) -> Result>, + authorities: impl Iterator) -> Result<(F, U), Error>)> + ) { let executor = self.runtime.executor(); - self.authority_nodes.extend(authorities.iter().enumerate().map(|(index, key)| { - let node_config = node_config::( - index, - &spec, + + for (key, authority) in authorities { + let node_config = node_config( + self.nodes, + &self.chain_spec, Roles::AUTHORITY, - Some(key.clone()), - base_port, + Some(key), + self.base_port, &temp, ); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let service = SyncService::from(F::new_full(node_config).expect("Error creating test node service")); + let (service, user_data) = authority(node_config).expect("Error creating test node service"); + let service = SyncService::from(service); executor.spawn(service.clone().map_err(|_| ())); let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into())); - ((index + nodes), service, addr) - })); - nodes += authorities.len(); + self.authority_nodes.push((self.nodes, service, user_data, addr)); + self.nodes += 1; + } - self.full_nodes.extend((nodes..nodes + full).map(|index| { - let node_config = node_config::(index, &spec, Roles::FULL, None, base_port, &temp); + for full in full { + let node_config = node_config(self.nodes, &self.chain_spec, Roles::FULL, None, self.base_port, &temp); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let service = SyncService::from(F::new_full(node_config).expect("Error creating test node service")); + let (service, user_data) = full(node_config).expect("Error creating test node service"); + let service = SyncService::from(service); executor.spawn(service.clone().map_err(|_| ())); let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into())); - (index, service, addr) - })); - nodes += full; + self.full_nodes.push((self.nodes, service, user_data, addr)); + self.nodes += 1; + } - self.light_nodes.extend((nodes..nodes + light).map(|index| { - let node_config = node_config::(index, &spec, Roles::LIGHT, None, base_port, &temp); + for light in light { + let node_config = node_config(self.nodes, &self.chain_spec, Roles::LIGHT, None, self.base_port, &temp); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let service = SyncService::from(F::new_light(node_config).expect("Error creating test node service")); + let service = SyncService::from(light(node_config).expect("Error creating test node service")); executor.spawn(service.clone().map_err(|_| ())); let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into())); - (index, service, addr) - })); - nodes += light; - - self.nodes = nodes; + self.light_nodes.push((self.nodes, service, addr)); + self.nodes += 1; + } } } -pub fn connectivity(spec: FactoryChainSpec) where - F::FullService: Future, - F::LightService: Future, +pub fn connectivity(spec: ChainSpec, full_builder: Fb, light_builder: Lb) where + Fb: Fn(Configuration<(), G>) -> Result, + F: AbstractService, + Lb: Fn(Configuration<(), G>) -> Result, + L: AbstractService, { const NUM_FULL_NODES: usize = 5; const NUM_LIGHT_NODES: usize = 5; { let temp = TempDir::new("substrate-connectivity-test").expect("Error creating test dir"); let runtime = { - let mut network = TestNet::::new( + let mut network = TestNet::new( &temp, spec.clone(), - NUM_FULL_NODES, - NUM_LIGHT_NODES, - vec![], + (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), + (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), 30400, ); info!("Checking star topology"); - let first_address = network.full_nodes[0].2.clone(); - for (_, service, _) in network.full_nodes.iter().skip(1) { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); + let first_address = network.full_nodes[0].3.clone(); + for (_, service, _, _) in network.full_nodes.iter().skip(1) { + service.get().network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); + service.get().network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); } network.run_until_all_full( |_index, service| service.get().network().num_connected() == NUM_FULL_NODES - 1 @@ -311,27 +322,31 @@ pub fn connectivity(spec: FactoryChainSpec) where { let temp = TempDir::new("substrate-connectivity-test").expect("Error creating test dir"); { - let mut network = TestNet::::new( + let mut network = TestNet::new( &temp, spec, - NUM_FULL_NODES, - NUM_LIGHT_NODES, - vec![], + (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), + (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), 30400, ); info!("Checking linked topology"); - let mut address = network.full_nodes[0].2.clone(); + let mut address = network.full_nodes[0].3.clone(); let max_nodes = std::cmp::max(NUM_FULL_NODES, NUM_LIGHT_NODES); for i in 0..max_nodes { if i != 0 { - if let Some((_, service, node_id)) = network.full_nodes.get(i) { - service.get().network().add_reserved_peer(address.to_string()).expect("Error adding reserved peer"); + if let Some((_, service, _, node_id)) = network.full_nodes.get(i) { + service.get().network().add_reserved_peer(address.to_string()) + .expect("Error adding reserved peer"); address = node_id.clone(); } } if let Some((_, service, node_id)) = network.light_nodes.get(i) { - service.get().network().add_reserved_peer(address.to_string()).expect("Error adding reserved peer"); + service.get().network().add_reserved_peer(address.to_string()) + .expect("Error adding reserved peer"); address = node_id.clone(); } } @@ -345,42 +360,53 @@ pub fn connectivity(spec: FactoryChainSpec) where } } -pub fn sync(spec: FactoryChainSpec, mut block_factory: B, mut extrinsic_factory: E) where - F: ServiceFactory, - F::FullService: Future, - F::LightService: Future, - B: FnMut(&SyncService) -> BlockImportParams, - E: FnMut(&SyncService) -> FactoryExtrinsic, +pub fn sync( + spec: ChainSpec, + full_builder: Fb, + light_builder: Lb, + mut block_factory: B, + mut extrinsic_factory: E +) where + Fb: Fn(Configuration<(), G>) -> Result<(F, U), Error>, + F: AbstractService, + Lb: Fn(Configuration<(), G>) -> Result, + L: AbstractService, + B: FnMut(&F, &U) -> BlockImportParams, + E: FnMut(&F, &U) -> ::Extrinsic, + U: Clone + Send + 'static, { const NUM_FULL_NODES: usize = 10; // FIXME: BABE light client support is currently not working. const NUM_LIGHT_NODES: usize = 10; const NUM_BLOCKS: usize = 512; let temp = TempDir::new("substrate-sync-test").expect("Error creating test dir"); - let mut network = TestNet::::new( + let mut network = TestNet::new( &temp, spec.clone(), - NUM_FULL_NODES, - NUM_LIGHT_NODES, - vec![], + (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg) }), + (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), 30500, ); info!("Checking block sync"); let first_address = { let first_service = &network.full_nodes[0].1; + let first_user_data = &network.full_nodes[0].2; let mut client = first_service.get().client(); for i in 0 .. NUM_BLOCKS { if i % 128 == 0 { info!("Generating #{}", i); } - let import_data = block_factory(&first_service); + let import_data = block_factory(&first_service.get(), first_user_data); client.import_block(import_data, HashMap::new()).expect("Error importing test block"); } - network.full_nodes[0].2.clone() + network.full_nodes[0].3.clone() }; info!("Running sync"); - for (_, service, _) in network.full_nodes.iter().skip(1) { + for (_, service, _, _) in network.full_nodes.iter().skip(1) { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { @@ -395,8 +421,9 @@ pub fn sync(spec: FactoryChainSpec, mut block_factory: B, mut extrin info!("Checking extrinsic propagation"); let first_service = network.full_nodes[0].1.clone(); + let first_user_data = &network.full_nodes[0].2; let best_block = BlockId::number(first_service.get().client().info().chain.best_number); - let extrinsic = extrinsic_factory(&first_service); + let extrinsic = extrinsic_factory(&first_service.get(), first_user_data); first_service.get().transaction_pool().submit_one(&best_block, extrinsic).unwrap(); network.run_until_all_full( |_index, service| service.get().transaction_pool().ready().count() == 1, @@ -404,33 +431,39 @@ pub fn sync(spec: FactoryChainSpec, mut block_factory: B, mut extrin ); } -pub fn consensus(spec: FactoryChainSpec, authorities: Vec) where - F: ServiceFactory, - F::FullService: Future, - F::LightService: Future, +pub fn consensus( + spec: ChainSpec, + full_builder: Fb, + light_builder: Lb, + authorities: impl IntoIterator +) where + Fb: Fn(Configuration<(), G>) -> Result, + F: AbstractService, + Lb: Fn(Configuration<(), G>) -> Result, + L: AbstractService, { const NUM_FULL_NODES: usize = 10; const NUM_LIGHT_NODES: usize = 10; const NUM_BLOCKS: usize = 10; // 10 * 2 sec block production time = ~20 seconds let temp = TempDir::new("substrate-conensus-test").expect("Error creating test dir"); - let mut network = TestNet::::new( + let mut network = TestNet::new( &temp, spec.clone(), - NUM_FULL_NODES / 2, - NUM_LIGHT_NODES / 2, - authorities, + (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), + (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), + authorities.into_iter().map(|key| (key, { |cfg| full_builder(cfg).map(|s| (s, ())) })), 30600, ); info!("Checking consensus"); - let first_address = network.authority_nodes[0].2.clone(); - for (_, service, _) in network.full_nodes.iter() { + let first_address = network.authority_nodes[0].3.clone(); + for (_, service, _, _) in network.full_nodes.iter() { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } - for (_, service, _) in network.authority_nodes.iter().skip(1) { + for (_, service, _, _) in network.authority_nodes.iter().skip(1) { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } network.run_until_all_full( @@ -441,8 +474,15 @@ pub fn consensus(spec: FactoryChainSpec, authorities: Vec) where ); info!("Adding more peers"); - network.insert_nodes(&temp, NUM_FULL_NODES / 2, NUM_LIGHT_NODES / 2, vec![]); - for (_, service, _) in network.full_nodes.iter() { + network.insert_nodes( + &temp, + (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), + (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), + ); + for (_, service, _, _) in network.full_nodes.iter() { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { diff --git a/core/sr-api-macros/src/decl_runtime_apis.rs b/core/sr-api-macros/src/decl_runtime_apis.rs index 27f102740b83b..0e69c2b76d701 100644 --- a/core/sr-api-macros/src/decl_runtime_apis.rs +++ b/core/sr-api-macros/src/decl_runtime_apis.rs @@ -552,9 +552,9 @@ impl<'a> ToClientSideDecl<'a> { fn fold_trait_item_method(&mut self, method: TraitItemMethod) -> (TraitItemMethod, Option, TraitItemMethod) { let crate_ = self.crate_; - let context_other = quote!( #crate_::runtime_api::ExecutionContext::Other ); + let context = quote!( #crate_::runtime_api::ExecutionContext::OffchainCall(None) ); let fn_impl = self.create_method_runtime_api_impl(method.clone()); - let fn_decl = self.create_method_decl(method.clone(), context_other); + let fn_decl = self.create_method_decl(method.clone(), context); let fn_decl_ctx = self.create_method_decl_with_context(method); (fn_decl, fn_impl, fn_decl_ctx) diff --git a/core/sr-io/src/offchain/http.rs b/core/sr-io/src/offchain/http.rs index 6685dd023f469..7aab309f1384c 100644 --- a/core/sr-io/src/offchain/http.rs +++ b/core/sr-io/src/offchain/http.rs @@ -224,7 +224,7 @@ pub enum Error { /// Deadline has been reached. DeadlineReached, /// Request had timed out. - Timeout, + IoError, /// Unknown error has been ecountered. Unknown, } @@ -283,8 +283,8 @@ impl PendingRequest { .zip(requests.into_iter()) .map(|(status, req)| match status { RequestStatus::DeadlineReached => Err(req), - RequestStatus::Timeout => Ok(Err(Error::Timeout)), - RequestStatus::Unknown => Ok(Err(Error::Unknown)), + RequestStatus::IoError => Ok(Err(Error::IoError)), + RequestStatus::Invalid => Ok(Err(Error::Unknown)), RequestStatus::Finished(code) => Ok(Ok(Response::new(req.id, code))), }) .collect() diff --git a/core/sr-io/without_std.rs b/core/sr-io/without_std.rs index 954eccc9cfafd..c5f2ac483fa81 100644 --- a/core/sr-io/without_std.rs +++ b/core/sr-io/without_std.rs @@ -1163,7 +1163,7 @@ impl OffchainApi for () { statuses .into_iter() - .map(|status| status.try_into().unwrap_or(offchain::HttpRequestStatus::Unknown)) + .map(|status| status.try_into().unwrap_or(offchain::HttpRequestStatus::Invalid)) .collect() } diff --git a/core/sr-primitives/src/traits.rs b/core/sr-primitives/src/traits.rs index abbbef3a5b260..3dc1649e495d4 100644 --- a/core/sr-primitives/src/traits.rs +++ b/core/sr-primitives/src/traits.rs @@ -796,7 +796,7 @@ impl Checkable for T { /// An abstract error concerning an attempt to verify, check or dispatch the transaction. This /// cannot be more concrete because it's designed to work reasonably well over a broad range of /// possible transaction types. -#[cfg_attr(feature = "std", derive(Debug))] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] pub enum DispatchError { /// General error to do with the inability to pay some fees (e.g. account balance too low). Payment, diff --git a/core/sr-version/src/lib.rs b/core/sr-version/src/lib.rs index 731c5b63c7137..f787d8dd7ebfb 100644 --- a/core/sr-version/src/lib.rs +++ b/core/sr-version/src/lib.rs @@ -96,7 +96,13 @@ pub struct RuntimeVersion { pub impl_version: u32, /// List of supported API "features" along with their versions. - #[cfg_attr(feature = "std", serde(serialize_with = "apis_serialize::serialize"))] + #[cfg_attr( + feature = "std", + serde( + serialize_with = "apis_serialize::serialize", + deserialize_with = "apis_serialize::deserialize", + ) + )] pub apis: ApisVec, } @@ -163,7 +169,7 @@ impl NativeVersion { mod apis_serialize { use super::*; use impl_serde::serialize as bytes; - use serde::{Serializer, ser::SerializeTuple}; + use serde::{Serializer, de, ser::SerializeTuple}; #[derive(Serialize)] struct ApiId<'a>( @@ -187,4 +193,44 @@ mod apis_serialize { { bytes::serialize(*apis, ser) } + + #[derive(Deserialize)] + struct ApiIdOwned( + #[serde(deserialize_with="deserialize_bytes")] + super::ApiId, + u32, + ); + + pub fn deserialize<'de, D>(deserializer: D) -> Result where + D: de::Deserializer<'de>, + { + struct Visitor; + impl<'de> de::Visitor<'de> for Visitor { + type Value = ApisVec; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a sequence of api id and version tuples") + } + + fn visit_seq(self, mut visitor: V) -> Result where + V: de::SeqAccess<'de>, + { + let mut apis = Vec::new(); + while let Some(value) = visitor.next_element::()? { + apis.push((value.0, value.1)); + } + Ok(apis.into()) + } + } + deserializer.deserialize_seq(Visitor) + } + + pub fn deserialize_bytes<'de, D>(d: D) -> Result where + D: de::Deserializer<'de> + { + let bytes = bytes::deserialize_check_len(d, bytes::ExpectedLen::Exact(8))?; + let mut arr = [0; 8]; + arr.copy_from_slice(&bytes); + Ok(arr) + } } diff --git a/core/test-runtime/client/src/lib.rs b/core/test-runtime/client/src/lib.rs index f1cbb6fd8c921..ad5badf8bf363 100644 --- a/core/test-runtime/client/src/lib.rs +++ b/core/test-runtime/client/src/lib.rs @@ -51,8 +51,7 @@ mod local_executor { native_executor_instance!( pub LocalExecutor, runtime::api::dispatch, - runtime::native_version, - runtime::WASM_BINARY + runtime::native_version ); } @@ -97,12 +96,34 @@ pub type LightExecutor = client::light::call_executor::RemoteOrLocalCallExecutor pub struct GenesisParameters { support_changes_trie: bool, heap_pages_override: Option, + extra_storage: Vec<(Vec, Vec)>, +} + +impl GenesisParameters { + fn genesis_config(&self) -> GenesisConfig { + GenesisConfig::new( + self.support_changes_trie, + vec![ + sr25519::Public::from(Sr25519Keyring::Alice).into(), + sr25519::Public::from(Sr25519Keyring::Bob).into(), + sr25519::Public::from(Sr25519Keyring::Charlie).into(), + ], + vec![ + AccountKeyring::Alice.into(), + AccountKeyring::Bob.into(), + AccountKeyring::Charlie.into(), + ], + 1000, + self.heap_pages_override, + self.extra_storage.clone(), + ) + } } impl generic_test_client::GenesisInit for GenesisParameters { fn genesis_storage(&self) -> (StorageOverlay, ChildrenStorageOverlay) { use codec::Encode; - let mut storage = genesis_config(self.support_changes_trie, self.heap_pages_override).genesis_map(); + let mut storage = self.genesis_config().genesis_map(); let child_roots = storage.1.iter().map(|(sk, child_map)| { let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( @@ -157,6 +178,13 @@ pub trait TestClientBuilderExt: Sized { /// Override the default value for Wasm heap pages. fn set_heap_pages(self, heap_pages: u64) -> Self; + /// Add an extra value into the genesis storage. + /// + /// # Panics + /// + /// Panics if the key is empty. + fn add_extra_storage>, V: Into>>(self, key: K, value: V) -> Self; + /// Build the test client. fn build(self) -> Client { self.build_with_longest_chain().0 @@ -182,28 +210,18 @@ impl TestClientBuilderExt for TestClientBuilder< self } + fn add_extra_storage>, V: Into>>(mut self, key: K, value: V) -> Self { + let key = key.into(); + assert!(!key.is_empty()); + self.genesis_init_mut().extra_storage.push((key, value.into())); + self + } + fn build_with_longest_chain(self) -> (Client, client::LongestChain) { self.build_with_native_executor(None) } } -fn genesis_config(support_changes_trie: bool, heap_pages_override: Option) -> GenesisConfig { - GenesisConfig::new( - support_changes_trie, - vec![ - sr25519::Public::from(Sr25519Keyring::Alice).into(), - sr25519::Public::from(Sr25519Keyring::Bob).into(), - sr25519::Public::from(Sr25519Keyring::Charlie).into(), - ], vec![ - AccountKeyring::Alice.into(), - AccountKeyring::Bob.into(), - AccountKeyring::Charlie.into(), - ], - 1000, - heap_pages_override, - ) -} - /// Creates new client instance used for tests. pub fn new() -> Client { TestClientBuilder::new().build() diff --git a/core/test-runtime/src/genesismap.rs b/core/test-runtime/src/genesismap.rs index 7686ed08bc02e..c2dd49156d7a0 100644 --- a/core/test-runtime/src/genesismap.rs +++ b/core/test-runtime/src/genesismap.rs @@ -25,10 +25,12 @@ use sr_primitives::traits::{Block as BlockT, Hash as HashT, Header as HeaderT}; /// Configuration of a general Substrate test genesis block. pub struct GenesisConfig { - pub changes_trie_config: Option, - pub authorities: Vec, - pub balances: Vec<(AccountId, u64)>, - pub heap_pages_override: Option, + changes_trie_config: Option, + authorities: Vec, + balances: Vec<(AccountId, u64)>, + heap_pages_override: Option, + /// Additional storage key pairs that will be added to the genesis map. + extra_storage: Vec<(Vec, Vec)>, } impl GenesisConfig { @@ -38,6 +40,7 @@ impl GenesisConfig { endowed_accounts: Vec, balance: u64, heap_pages_override: Option, + extra_storage: Vec<(Vec, Vec)>, ) -> Self { GenesisConfig { changes_trie_config: match support_changes_trie { @@ -47,6 +50,7 @@ impl GenesisConfig { authorities: authorities.clone(), balances: endowed_accounts.into_iter().map(|a| (a, balance)).collect(), heap_pages_override, + extra_storage, } } @@ -70,6 +74,10 @@ impl GenesisConfig { map.insert(well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), changes_trie_config.encode()); } map.insert(twox_128(&b"sys:auth"[..])[..].to_vec(), self.authorities.encode()); + // Finally, add the extra storage entries. + for (key, value) in self.extra_storage.iter().cloned() { + map.insert(key, value); + } (map, Default::default()) } } diff --git a/core/test-runtime/src/system.rs b/core/test-runtime/src/system.rs index f3c890cf79f54..63f9c0050b73c 100644 --- a/core/test-runtime/src/system.rs +++ b/core/test-runtime/src/system.rs @@ -62,6 +62,12 @@ pub fn initialize_block(header: &Header) { ::put(&header.parent_hash); ::put(header.digest()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); + + // try to read something that depends on current header digest + // so that it'll be included in execution proof + if let Some(generic::DigestItem::Other(v)) = header.digest().logs().iter().next() { + let _: Option = storage::unhashed::get(&v); + } } pub fn get_block_number() -> Option { @@ -104,6 +110,12 @@ fn execute_block_with_state_root_handler( assert!(txs_root == header.extrinsics_root, "Transaction trie root must be valid."); } + // try to read something that depends on current header digest + // so that it'll be included in execution proof + if let Some(generic::DigestItem::Other(v)) = header.digest().logs().iter().next() { + let _: Option = storage::unhashed::get(&v); + } + // execute transactions block.extrinsics.iter().enumerate().for_each(|(i, e)| { storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(i as u32)); diff --git a/node-template/src/cli.rs b/node-template/src/cli.rs index 4d672491c18e6..f6edbb2cc3ee3 100644 --- a/node-template/src/cli.rs +++ b/node-template/src/cli.rs @@ -4,9 +4,8 @@ use std::cell::RefCell; use tokio::runtime::Runtime; pub use substrate_cli::{VersionInfo, IntoExit, error}; use substrate_cli::{informant, parse_and_prepare, ParseAndPrepare, NoCustom}; -use substrate_service::{ServiceFactory, Roles as ServiceRoles}; +use substrate_service::{AbstractService, Roles as ServiceRoles}; use crate::chain_spec; -use std::ops::Deref; use log::info; /// Parse command line arguments into service configuration. @@ -16,7 +15,8 @@ pub fn run(args: I, exit: E, version: VersionInfo) -> error::Result<()> E: IntoExit, { match parse_and_prepare::(&version, "substrate-node", args) { - ParseAndPrepare::Run(cmd) => cmd.run(load_spec, exit, |exit, _cli_args, _custom_args, config| { + ParseAndPrepare::Run(cmd) => cmd.run::<(), _, _, _, _>(load_spec, exit, + |exit, _cli_args, _custom_args, config| { info!("{}", version.name); info!(" version {}", config.full_version()); info!(" by {}, 2017, 2018", version.author); @@ -27,21 +27,24 @@ pub fn run(args: I, exit: E, version: VersionInfo) -> error::Result<()> match config.roles { ServiceRoles::LIGHT => run_until_exit( runtime, - service::Factory::new_light(config).map_err(|e| format!("{:?}", e))?, + service::new_light(config).map_err(|e| format!("{:?}", e))?, exit ), _ => run_until_exit( runtime, - service::Factory::new_full(config).map_err(|e| format!("{:?}", e))?, + service::new_full(config).map_err(|e| format!("{:?}", e))?, exit ), }.map_err(|e| format!("{:?}", e)) }), ParseAndPrepare::BuildSpec(cmd) => cmd.run(load_spec), - ParseAndPrepare::ExportBlocks(cmd) => cmd.run::(load_spec, exit), - ParseAndPrepare::ImportBlocks(cmd) => cmd.run::(load_spec, exit), + ParseAndPrepare::ExportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec, exit), + ParseAndPrepare::ImportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec, exit), ParseAndPrepare::PurgeChain(cmd) => cmd.run(load_spec), - ParseAndPrepare::RevertChain(cmd) => cmd.run::(load_spec), + ParseAndPrepare::RevertChain(cmd) => cmd.run_with_builder::<(), _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec), ParseAndPrepare::CustomCommand(_) => Ok(()) }?; @@ -55,14 +58,13 @@ fn load_spec(id: &str) -> Result, String> { }) } -fn run_until_exit( +fn run_until_exit( mut runtime: Runtime, service: T, e: E, -) -> error::Result<()> where - T: Deref>, - T: Future + Send + 'static, - C: substrate_service::Components, +) -> error::Result<()> +where + T: AbstractService, E: IntoExit, { let (exit_send, exit) = exit_future::signal(); @@ -99,7 +101,8 @@ impl IntoExit for Exit { let exit_send_cell = RefCell::new(Some(exit_send)); ctrlc::set_handler(move || { - if let Some(exit_send) = exit_send_cell.try_borrow_mut().expect("signal handler not reentrant; qed").take() { + let exit_send = exit_send_cell.try_borrow_mut().expect("signal handler not reentrant; qed").take(); + if let Some(exit_send) = exit_send { exit_send.send(()).expect("Error sending exit notification"); } }).expect("Error setting Ctrl-C handler"); diff --git a/node-template/src/main.rs b/node-template/src/main.rs index 18e9638833fd2..024efcc7db541 100644 --- a/node-template/src/main.rs +++ b/node-template/src/main.rs @@ -4,6 +4,7 @@ #![warn(unused_extern_crates)] mod chain_spec; +#[macro_use] mod service; mod cli; diff --git a/node-template/src/service.rs b/node-template/src/service.rs index 2baa0c7631373..7bb4cc8d52d68 100644 --- a/node-template/src/service.rs +++ b/node-template/src/service.rs @@ -1,32 +1,24 @@ -#![warn(unused_extern_crates)] - //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. use std::sync::Arc; use std::time::Duration; -use substrate_client::{self as client, LongestChain}; -use babe::{import_queue, start_babe, BabeImportQueue, Config}; +use substrate_client::LongestChain; +use babe::{import_queue, start_babe, Config}; use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use futures::prelude::*; use node_template_runtime::{self, GenesisConfig, opaque::Block, RuntimeApi, WASM_BINARY}; -use substrate_service::{ - FactoryFullConfiguration, LightComponents, FullComponents, FullBackend, - FullClient, LightClient, LightBackend, FullExecutor, LightExecutor, - error::{Error as ServiceError}, -}; +use substrate_service::{error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder}; use transaction_pool::{self, txpool::{Pool as TransactionPool}}; use inherents::InherentDataProviders; use network::construct_simple_protocol; use substrate_executor::native_executor_instance; -use substrate_service::{ServiceFactory, construct_service_factory, TelemetryOnConnect}; pub use substrate_executor::NativeExecutor; // Our native executor instance. native_executor_instance!( pub Executor, node_template_runtime::api::dispatch, - node_template_runtime::native_version, - WASM_BINARY + node_template_runtime::native_version ); construct_simple_protocol! { @@ -34,235 +26,205 @@ construct_simple_protocol! { pub struct NodeProtocol where Block = Block { } } -type BabeBlockImportForService = babe::BabeBlockImport< - FullBackend, - FullExecutor, - ::Block, - grandpa::BlockImportForService, - ::RuntimeApi, - client::Client< - FullBackend, - FullExecutor, - ::Block, - ::RuntimeApi - >, ->; - -pub struct NodeConfig { - /// GRANDPA and BABE connection to import block. - // FIXME #1134 rather than putting this on the config, let's have an actual intermediate setup state - pub import_setup: Option<( - BabeBlockImportForService, - grandpa::LinkHalfForService, - babe::BabeLink, - )>, - /// Tasks that were created by previous setup steps and should be spawned. - pub tasks_to_spawn: Option + Send>>>, - inherent_data_providers: InherentDataProviders, -} - -impl Default for NodeConfig where F: ServiceFactory { - fn default() -> NodeConfig { - NodeConfig { - import_setup: None, - inherent_data_providers: InherentDataProviders::new(), - tasks_to_spawn: None, - } - } -} - -construct_service_factory! { - struct Factory { - Block = Block, - RuntimeApi = RuntimeApi, - NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) }, - RuntimeDispatch = Executor, - FullTransactionPoolApi = - transaction_pool::ChainApi< - client::Client, FullExecutor, Block, RuntimeApi>, - Block - > { - |config, client| - Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) - }, - LightTransactionPoolApi = - transaction_pool::ChainApi< - client::Client, LightExecutor, Block, RuntimeApi>, - Block - > { - |config, client| - Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) - }, - Genesis = GenesisConfig, - Configuration = NodeConfig, - FullService = FullComponents { - |config: FactoryFullConfiguration| FullComponents::::new(config) - }, - AuthoritySetup = { - |mut service: Self::FullService| { - let (block_import, link_half, babe_link) = - service.config_mut().custom.import_setup.take() - .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - - // spawn any futures that were created in the previous setup steps - if let Some(tasks) = service.config_mut().custom.tasks_to_spawn.take() { - for task in tasks { - service.spawn_task( - task.select(service.on_exit()) - .map(|_| ()) - .map_err(|_| ()) - ); - } - } - - if service.config().roles.is_authority() { - let proposer = basic_authorship::ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - }; - - let client = service.client(); - let select_chain = service.select_chain() - .ok_or(ServiceError::SelectChainRequired)?; - - let babe_config = babe::BabeParams { - config: Config::get_or_compute(&*client)?, - keystore: service.keystore(), - client, - select_chain, - block_import, - env: proposer, - sync_oracle: service.network(), - inherent_data_providers: service.config() - .custom.inherent_data_providers.clone(), - force_authoring: service.config().force_authoring, - time_source: babe_link, - }; - - let babe = start_babe(babe_config)?; - let select = babe.select(service.on_exit()).then(|_| Ok(())); - - // the BABE authoring task is considered infallible, i.e. if it - // fails we take down the service with it. - service.spawn_essential_task(select); - } - - let config = grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), - justification_period: 4096, - name: Some(service.config().name.clone()), - keystore: Some(service.keystore()), - }; - - match (service.config().roles.is_authority(), service.config().disable_grandpa) { - (false, false) => { - // start the lightweight GRANDPA observer - service.spawn_task(Box::new(grandpa::run_grandpa_observer( - config, - link_half, - service.network(), - service.on_exit(), - )?)); - }, - (true, false) => { - // start the full GRANDPA voter - let telemetry_on_connect = TelemetryOnConnect { - telemetry_connection_sinks: service.telemetry_on_connect_stream(), - }; - let grandpa_config = grandpa::GrandpaParams { - config: config, - link: link_half, - network: service.network(), - inherent_data_providers: - service.config().custom.inherent_data_providers.clone(), - on_exit: service.on_exit(), - telemetry_on_connect: Some(telemetry_on_connect), - }; - - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - service.spawn_essential_task(grandpa::run_grandpa_voter(grandpa_config)?); - }, - (_, true) => { - grandpa::setup_disabled_grandpa( - service.client(), - &service.config().custom.inherent_data_providers, - service.network(), - )?; - }, - } - - Ok(service) - } - }, - LightService = LightComponents - { |config| >::new(config) }, - FullImportQueue = BabeImportQueue { - | - config: &mut FactoryFullConfiguration, - client: Arc>, - select_chain: Self::SelectChain, - transaction_pool: Option>>, - | { +/// Starts a `ServiceBuilder` for a full service. +/// +/// Use this macro if you don't actually need the full service, but just the builder in order to +/// be able to perform chain operations. +macro_rules! new_full_start { + ($config:expr) => {{ + let mut import_setup = None; + let inherent_data_providers = inherents::InherentDataProviders::new(); + let mut tasks_to_spawn = None; + + let builder = substrate_service::ServiceBuilder::new_full::< + node_template_runtime::opaque::Block, node_template_runtime::RuntimeApi, crate::service::Executor + >($config)? + .with_select_chain(|_config, client| { + #[allow(deprecated)] + Ok(substrate_client::LongestChain::new(client.backend().clone())) + })? + .with_transaction_pool(|config, client| + Ok(transaction_pool::txpool::Pool::new(config, transaction_pool::ChainApi::new(client))) + )? + .with_import_queue(|_config, client, mut select_chain, transaction_pool| { + let select_chain = select_chain.take() + .ok_or_else(|| substrate_service::Error::SelectChainRequired)?; let (block_import, link_half) = - grandpa::block_import::<_, _, _, RuntimeApi, FullClient, _>( + grandpa::block_import::<_, _, _, node_template_runtime::RuntimeApi, _, _>( client.clone(), client.clone(), select_chain )?; let justification_import = block_import.clone(); - let (import_queue, babe_link, babe_block_import, pruning_task) = import_queue( - Config::get_or_compute(&*client)?, + + let (import_queue, babe_link, babe_block_import, pruning_task) = babe::import_queue( + babe::Config::get_or_compute(&*client)?, block_import, Some(Box::new(justification_import)), None, client.clone(), client, - config.custom.inherent_data_providers.clone(), - transaction_pool, + inherent_data_providers.clone(), + Some(transaction_pool) )?; - config.custom.import_setup = Some((babe_block_import.clone(), link_half, babe_link)); - config.custom.tasks_to_spawn = Some(vec![Box::new(pruning_task)]); + + import_setup = Some((babe_block_import.clone(), link_half, babe_link)); + tasks_to_spawn = Some(vec![Box::new(pruning_task)]); + Ok(import_queue) - } - }, - LightImportQueue = BabeImportQueue - { |config: &FactoryFullConfiguration, client: Arc>| { - #[allow(deprecated)] - let fetch_checker = client.backend().blockchain().fetcher() - .upgrade() - .map(|fetcher| fetcher.checker().clone()) - .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, LightClient>( - client.clone(), Arc::new(fetch_checker), client.clone() - )?; + })?; - let finality_proof_import = block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); + (builder, import_setup, inherent_data_providers, tasks_to_spawn) + }} +} - // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. - let (import_queue, ..) = import_queue::<_, _, _, _, _, _, TransactionPool>( - Config::get_or_compute(&*client)?, - block_import, - None, - Some(Box::new(finality_proof_import)), - client.clone(), - client, - config.custom.inherent_data_providers.clone(), - None, - )?; +/// Builds a new service for a full client. +pub fn new_full(config: Configuration) + -> Result +{ + + let (builder, mut import_setup, inherent_data_providers, mut tasks_to_spawn) = new_full_start!(config); + + let service = builder.with_network_protocol(|_| Ok(NodeProtocol::new()))? + .with_finality_proof_provider(|client| + Ok(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _) + )? + .build()?; + + let (block_import, link_half, babe_link) = + import_setup.take() + .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); + + // spawn any futures that were created in the previous setup steps + if let Some(tasks) = tasks_to_spawn.take() { + for task in tasks { + service.spawn_task( + task.select(service.on_exit()) + .map(|_| ()) + .map_err(|_| ()) + ); + } + } - Ok((import_queue, finality_proof_request_builder)) - }}, - SelectChain = LongestChain, Self::Block> - { |config: &FactoryFullConfiguration, client: Arc>| { - #[allow(deprecated)] - Ok(LongestChain::new(client.backend().clone())) - } + if service.config().roles.is_authority() { + let proposer = basic_authorship::ProposerFactory { + client: service.client(), + transaction_pool: service.transaction_pool(), + }; + + let client = service.client(); + let select_chain = service.select_chain() + .ok_or(ServiceError::SelectChainRequired)?; + + let babe_config = babe::BabeParams { + config: Config::get_or_compute(&*client)?, + keystore: service.keystore(), + client, + select_chain, + block_import, + env: proposer, + sync_oracle: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + force_authoring: service.config().force_authoring, + time_source: babe_link, + }; + + let babe = start_babe(babe_config)?; + let select = babe.select(service.on_exit()).then(|_| Ok(())); + + // the BABE authoring task is considered infallible, i.e. if it + // fails we take down the service with it. + service.spawn_essential_task(select); + } + + let config = grandpa::Config { + // FIXME #1578 make this available through chainspec + gossip_duration: Duration::from_millis(333), + justification_period: 4096, + name: Some(service.config().name.clone()), + keystore: Some(service.keystore()), + }; + + match (service.config().roles.is_authority(), service.config().disable_grandpa) { + (false, false) => { + // start the lightweight GRANDPA observer + service.spawn_task(Box::new(grandpa::run_grandpa_observer( + config, + link_half, + service.network(), + service.on_exit(), + )?)); + }, + (true, false) => { + // start the full GRANDPA voter + let grandpa_config = grandpa::GrandpaParams { + config: config, + link: link_half, + network: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + on_exit: service.on_exit(), + telemetry_on_connect: Some(service.telemetry_on_connect_stream()), + }; + + // the GRANDPA voter task is considered infallible, i.e. + // if it fails we take down the service with it. + service.spawn_essential_task(grandpa::run_grandpa_voter(grandpa_config)?); + }, + (_, true) => { + grandpa::setup_disabled_grandpa( + service.client(), + &inherent_data_providers, + service.network(), + )?; }, - FinalityProofProvider = { |client: Arc>| { - Ok(Some(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _)) - }}, - RpcExtensions = (), } + + Ok(service) +} + +/// Builds a new service for a light client. +pub fn new_light(config: Configuration) + -> Result +{ + let inherent_data_providers = InherentDataProviders::new(); + + ServiceBuilder::new_light::(config)? + .with_select_chain(|_config, client| { + #[allow(deprecated)] + Ok(LongestChain::new(client.backend().clone())) + })? + .with_transaction_pool(|config, client| + Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) + )? + .with_import_queue_and_fprb(|_config, client, _select_chain, transaction_pool| { + #[allow(deprecated)] + let fetch_checker = client.backend().blockchain().fetcher() + .upgrade() + .map(|fetcher| fetcher.checker().clone()) + .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; + let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, _>( + client.clone(), Arc::new(fetch_checker), client.clone() + )?; + + let finality_proof_import = block_import.clone(); + let finality_proof_request_builder = + finality_proof_import.create_finality_proof_request_builder(); + + // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. + let (import_queue, ..) = import_queue( + Config::get_or_compute(&*client)?, + block_import, + None, + Some(Box::new(finality_proof_import)), + client.clone(), + client, + inherent_data_providers.clone(), + Some(transaction_pool) + )?; + + Ok((import_queue, finality_proof_request_builder)) + })? + .with_network_protocol(|_| Ok(NodeProtocol::new()))? + .with_finality_proof_provider(|client| + Ok(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _) + )? + .build() } diff --git a/node/cli/Cargo.toml b/node/cli/Cargo.toml index 7b4ebb0c5f32d..1f35f7b86b41c 100644 --- a/node/cli/Cargo.toml +++ b/node/cli/Cargo.toml @@ -11,7 +11,7 @@ log = "0.4" tokio = "0.1.7" futures = "0.1" exit-future = "0.1" -jsonrpc-core = "13.0.0" +jsonrpc-core = "13.1.0" cli = { package = "substrate-cli", path = "../../core/cli" } codec = { package = "parity-scale-codec", version = "1.0.0" } sr-io = { path = "../../core/sr-io" } diff --git a/node/cli/src/chain_spec.rs b/node/cli/src/chain_spec.rs index fca4c78b892ee..00bcb2577613d 100644 --- a/node/cli/src/chain_spec.rs +++ b/node/cli/src/chain_spec.rs @@ -30,7 +30,7 @@ use hex_literal::hex; use substrate_telemetry::TelemetryEndpoints; use grandpa_primitives::{AuthorityId as GrandpaId}; use babe_primitives::{AuthorityId as BabeId}; -use im_online::AuthorityId as ImOnlineId; +use im_online::sr25519::{AuthorityId as ImOnlineId}; use sr_primitives::Perbill; const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; @@ -350,8 +350,8 @@ pub fn local_testnet_config() -> ChainSpec { #[cfg(test)] pub(crate) mod tests { use super::*; + use crate::service::{new_full, new_light}; use service_test; - use crate::service::Factory; fn local_testnet_genesis_instant_single() -> GenesisConfig { testnet_genesis( @@ -395,6 +395,10 @@ pub(crate) mod tests { #[test] #[ignore] fn test_connectivity() { - service_test::connectivity::(integration_test_config_with_two_authorities()); + service_test::connectivity( + integration_test_config_with_two_authorities(), + |config| new_full(config), + |config| new_light(config), + ); } } diff --git a/node/cli/src/lib.rs b/node/cli/src/lib.rs index 4e3cfa7f01092..b7679be1764e0 100644 --- a/node/cli/src/lib.rs +++ b/node/cli/src/lib.rs @@ -21,14 +21,14 @@ pub use cli::error; pub mod chain_spec; +#[macro_use] mod service; mod factory_impl; use tokio::prelude::Future; use tokio::runtime::{Builder as RuntimeBuilder, Runtime}; pub use cli::{VersionInfo, IntoExit, NoCustom, SharedParams, ExecutionStrategyParam}; -use substrate_service::{ServiceFactory, Roles as ServiceRoles}; -use std::ops::Deref; +use substrate_service::{AbstractService, Roles as ServiceRoles}; use log::info; use structopt::{StructOpt, clap::App}; use cli::{AugmentClap, GetLogFilter, parse_and_prepare, ParseAndPrepare}; @@ -159,7 +159,8 @@ pub fn run(args: I, exit: E, version: cli::VersionInfo) -> error::Resul E: IntoExit, { match parse_and_prepare::(&version, "substrate-node", args) { - ParseAndPrepare::Run(cmd) => cmd.run(load_spec, exit, |exit, _cli_args, _custom_args, config| { + ParseAndPrepare::Run(cmd) => cmd.run::<(), _, _, _, _>(load_spec, exit, + |exit, _cli_args, _custom_args, config| { info!("{}", version.name); info!(" version {}", config.full_version()); info!(" by Parity Technologies, 2017-2019"); @@ -171,23 +172,26 @@ pub fn run(args: I, exit: E, version: cli::VersionInfo) -> error::Resul match config.roles { ServiceRoles::LIGHT => run_until_exit( runtime, - service::Factory::new_light(config).map_err(|e| format!("{:?}", e))?, + service::new_light(config).map_err(|e| format!("{:?}", e))?, exit ), _ => run_until_exit( runtime, - service::Factory::new_full(config).map_err(|e| format!("{:?}", e))?, + service::new_full(config).map_err(|e| format!("{:?}", e))?, exit ), }.map_err(|e| format!("{:?}", e)) }), ParseAndPrepare::BuildSpec(cmd) => cmd.run(load_spec), - ParseAndPrepare::ExportBlocks(cmd) => cmd.run::(load_spec, exit), - ParseAndPrepare::ImportBlocks(cmd) => cmd.run::(load_spec, exit), + ParseAndPrepare::ExportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec, exit), + ParseAndPrepare::ImportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec, exit), ParseAndPrepare::PurgeChain(cmd) => cmd.run(load_spec), - ParseAndPrepare::RevertChain(cmd) => cmd.run::(load_spec), + ParseAndPrepare::RevertChain(cmd) => cmd.run_with_builder::<(), _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec), ParseAndPrepare::CustomCommand(CustomSubcommands::Factory(cli_args)) => { - let mut config = cli::create_config_with_db_path( + let mut config = cli::create_config_with_db_path::<(), _, _>( load_spec, &cli_args.shared_params, &version, @@ -209,9 +213,13 @@ pub fn run(args: I, exit: E, version: cli::VersionInfo) -> error::Resul cli_args.num, cli_args.rounds, ); - transaction_factory::factory::>( + + let service_builder = new_full_start!(config).0; + transaction_factory::factory::, _, _, _, _, _>( factory_state, - config, + service_builder.client(), + service_builder.select_chain() + .expect("The select_chain is always initialized by new_full_start!; QED") ).map_err(|e| format!("Error in transaction factory: {}", e))?; Ok(()) @@ -219,14 +227,13 @@ pub fn run(args: I, exit: E, version: cli::VersionInfo) -> error::Resul } } -fn run_until_exit( +fn run_until_exit( mut runtime: Runtime, service: T, e: E, -) -> error::Result<()> where - T: Deref>, - T: Future + Send + 'static, - C: substrate_service::Components, +) -> error::Result<()> +where + T: AbstractService, E: IntoExit, { let (exit_send, exit) = exit_future::signal(); diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index 7022d12d69a0f..c47e764c4294e 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -16,271 +16,240 @@ #![warn(unused_extern_crates)] -//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. +//! Service implementation. Specialized wrapper over substrate service. use std::sync::Arc; -use std::time::Duration; -use babe::{import_queue, start_babe, BabeImportQueue, Config}; +use babe::{import_queue, Config}; use client::{self, LongestChain}; use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use node_executor; -use futures::prelude::*; use node_primitives::Block; use node_runtime::{GenesisConfig, RuntimeApi}; use substrate_service::{ - FactoryFullConfiguration, LightComponents, FullComponents, FullBackend, - FullClient, LightClient, LightBackend, FullExecutor, LightExecutor, - error::{Error as ServiceError}, + AbstractService, ServiceBuilder, config::Configuration, error::{Error as ServiceError}, }; use transaction_pool::{self, txpool::{Pool as TransactionPool}}; use inherents::InherentDataProviders; use network::construct_simple_protocol; -use substrate_service::construct_service_factory; -use substrate_service::TelemetryOnConnect; construct_simple_protocol! { /// Demo protocol attachment for substrate. pub struct NodeProtocol where Block = Block { } } -type BabeBlockImportForService = babe::BabeBlockImport< - FullBackend, - FullExecutor, - ::Block, - grandpa::BlockImportForService, - ::RuntimeApi, - client::Client< - FullBackend, - FullExecutor, - ::Block, - ::RuntimeApi - >, ->; - -/// Node specific configuration -pub struct NodeConfig { - /// GRANDPA and BABE connection to import block. - // FIXME #1134 rather than putting this on the config, let's have an actual intermediate setup state - pub import_setup: Option<( - BabeBlockImportForService, - grandpa::LinkHalfForService, - babe::BabeLink, - )>, - /// Tasks that were created by previous setup steps and should be spawned. - pub tasks_to_spawn: Option + Send>>>, - inherent_data_providers: InherentDataProviders, -} - -impl Default for NodeConfig where F: substrate_service::ServiceFactory { - fn default() -> NodeConfig { - NodeConfig { - import_setup: None, - inherent_data_providers: InherentDataProviders::new(), - tasks_to_spawn: None, - } - } -} - -construct_service_factory! { - struct Factory { - Block = Block, - RuntimeApi = RuntimeApi, - NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) }, - RuntimeDispatch = node_executor::Executor, - FullTransactionPoolApi = - transaction_pool::ChainApi< - client::Client, FullExecutor, Block, RuntimeApi>, - Block - > { - |config, client| - Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) - }, - LightTransactionPoolApi = - transaction_pool::ChainApi< - client::Client, LightExecutor, Block, RuntimeApi>, - Block - > { - |config, client| - Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) - }, - Genesis = GenesisConfig, - Configuration = NodeConfig, - FullService = FullComponents { - |config: FactoryFullConfiguration| FullComponents::::new(config) - }, - AuthoritySetup = { - |mut service: Self::FullService| { - let (block_import, link_half, babe_link) = - service.config_mut().custom.import_setup.take() - .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - - // spawn any futures that were created in the previous setup steps - if let Some(tasks) = service.config_mut().custom.tasks_to_spawn.take() { - for task in tasks { - service.spawn_task( - task.select(service.on_exit()) - .map(|_| ()) - .map_err(|_| ()) - ); - } - } - - if service.config().roles.is_authority() { - let proposer = substrate_basic_authorship::ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - }; - - let client = service.client(); - let select_chain = service.select_chain() - .ok_or(ServiceError::SelectChainRequired)?; - - let babe_config = babe::BabeParams { - config: Config::get_or_compute(&*client)?, - keystore: service.keystore(), - client, - select_chain, - block_import, - env: proposer, - sync_oracle: service.network(), - inherent_data_providers: service.config() - .custom.inherent_data_providers.clone(), - force_authoring: service.config().force_authoring, - time_source: babe_link, - }; - - let babe = start_babe(babe_config)?; - let select = babe.select(service.on_exit()).then(|_| Ok(())); - - // the BABE authoring task is considered infallible, i.e. if it - // fails we take down the service with it. - service.spawn_essential_task(select); - } - - let config = grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), - justification_period: 4096, - name: Some(service.config().name.clone()), - keystore: Some(service.keystore()), - }; - - match (service.config().roles.is_authority(), service.config().disable_grandpa) { - (false, false) => { - // start the lightweight GRANDPA observer - service.spawn_task(Box::new(grandpa::run_grandpa_observer( - config, - link_half, - service.network(), - service.on_exit(), - )?)); - }, - (true, false) => { - // start the full GRANDPA voter - let telemetry_on_connect = TelemetryOnConnect { - telemetry_connection_sinks: service.telemetry_on_connect_stream(), - }; - let grandpa_config = grandpa::GrandpaParams { - config: config, - link: link_half, - network: service.network(), - inherent_data_providers: - service.config().custom.inherent_data_providers.clone(), - on_exit: service.on_exit(), - telemetry_on_connect: Some(telemetry_on_connect), - }; - - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - service.spawn_essential_task(grandpa::run_grandpa_voter(grandpa_config)?); - }, - (_, true) => { - grandpa::setup_disabled_grandpa( - service.client(), - &service.config().custom.inherent_data_providers, - service.network(), - )?; - }, - } - - Ok(service) - } - }, - LightService = LightComponents - { |config| >::new(config) }, - FullImportQueue = BabeImportQueue - { - | - config: &mut FactoryFullConfiguration, - client: Arc>, - select_chain: Self::SelectChain, - transaction_pool: Option>>, - | - { +/// Starts a `ServiceBuilder` for a full service. +/// +/// Use this macro if you don't actually need the full service, but just the builder in order to +/// be able to perform chain operations. +macro_rules! new_full_start { + ($config:expr) => {{ + let mut import_setup = None; + let inherent_data_providers = inherents::InherentDataProviders::new(); + let mut tasks_to_spawn = None; + + let builder = substrate_service::ServiceBuilder::new_full::< + node_primitives::Block, node_runtime::RuntimeApi, node_executor::Executor + >($config)? + .with_select_chain(|_config, client| { + #[allow(deprecated)] + Ok(client::LongestChain::new(client.backend().clone())) + })? + .with_transaction_pool(|config, client| + Ok(transaction_pool::txpool::Pool::new(config, transaction_pool::ChainApi::new(client))) + )? + .with_import_queue(|_config, client, mut select_chain, transaction_pool| { + let select_chain = select_chain.take() + .ok_or_else(|| substrate_service::Error::SelectChainRequired)?; let (block_import, link_half) = - grandpa::block_import::<_, _, _, RuntimeApi, FullClient, _>( + grandpa::block_import::<_, _, _, node_runtime::RuntimeApi, _, _>( client.clone(), client.clone(), select_chain )?; let justification_import = block_import.clone(); - let (import_queue, babe_link, babe_block_import, pruning_task) = import_queue( - Config::get_or_compute(&*client)?, + let (import_queue, babe_link, babe_block_import, pruning_task) = babe::import_queue( + babe::Config::get_or_compute(&*client)?, block_import, Some(Box::new(justification_import)), None, client.clone(), client, - config.custom.inherent_data_providers.clone(), - transaction_pool, + inherent_data_providers.clone(), + Some(transaction_pool) )?; - config.custom.import_setup = Some((babe_block_import.clone(), link_half, babe_link)); - config.custom.tasks_to_spawn = Some(vec![Box::new(pruning_task)]); + import_setup = Some((babe_block_import.clone(), link_half, babe_link)); + tasks_to_spawn = Some(vec![Box::new(pruning_task)]); Ok(import_queue) - }}, - LightImportQueue = BabeImportQueue - { |config: &FactoryFullConfiguration, client: Arc>| { - #[allow(deprecated)] - let fetch_checker = client.backend().blockchain().fetcher() - .upgrade() - .map(|fetcher| fetcher.checker().clone()) - .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, LightClient>( - client.clone(), Arc::new(fetch_checker), client.clone() - )?; + })? + .with_rpc_extensions(|client, pool| { + use node_rpc::accounts::{Accounts, AccountsApi}; + + let mut io = jsonrpc_core::IoHandler::::default(); + io.extend_with( + AccountsApi::to_delegate(Accounts::new(client, pool)) + ); + io + })?; + + (builder, import_setup, inherent_data_providers, tasks_to_spawn) + }} +} - let finality_proof_import = block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); +/// Creates a full service from the configuration. +/// +/// We need to use a macro because the test suit doesn't work with an opaque service. It expects +/// concrete types instead. +macro_rules! new_full { + ($config:expr) => {{ + use futures::Future; + + let (builder, mut import_setup, inherent_data_providers, mut tasks_to_spawn) = new_full_start!($config); + + let service = builder.with_network_protocol(|_| Ok(crate::service::NodeProtocol::new()))? + .with_finality_proof_provider(|client| + Ok(Arc::new(grandpa::FinalityProofProvider::new(client.clone(), client)) as _) + )? + .build()?; + + let (block_import, link_half, babe_link) = import_setup.take() + .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); + + // spawn any futures that were created in the previous setup steps + if let Some(tasks) = tasks_to_spawn.take() { + for task in tasks { + service.spawn_task( + task.select(service.on_exit()) + .map(|_| ()) + .map_err(|_| ()) + ); + } + } - // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. - let (import_queue, ..) = import_queue::<_, _, _, _, _, _, TransactionPool>( - Config::get_or_compute(&*client)?, - block_import, - None, - Some(Box::new(finality_proof_import)), - client.clone(), - client, - config.custom.inherent_data_providers.clone(), - None, + if service.config().roles.is_authority() { + let proposer = substrate_basic_authorship::ProposerFactory { + client: service.client(), + transaction_pool: service.transaction_pool(), + }; + + let client = service.client(); + let select_chain = service.select_chain() + .ok_or(substrate_service::Error::SelectChainRequired)?; + + let babe_config = babe::BabeParams { + config: babe::Config::get_or_compute(&*client)?, + keystore: service.keystore(), + client, + select_chain, + block_import, + env: proposer, + sync_oracle: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + force_authoring: service.config().force_authoring, + time_source: babe_link, + }; + + let babe = babe::start_babe(babe_config)?; + let select = babe.select(service.on_exit()).then(|_| Ok(())); + service.spawn_task(Box::new(select)); + } + + let config = grandpa::Config { + // FIXME #1578 make this available through chainspec + gossip_duration: std::time::Duration::from_millis(333), + justification_period: 4096, + name: Some(service.config().name.clone()), + keystore: Some(service.keystore()), + }; + + match (service.config().roles.is_authority(), service.config().disable_grandpa) { + (false, false) => { + // start the lightweight GRANDPA observer + service.spawn_task(Box::new(grandpa::run_grandpa_observer( + config, + link_half, + service.network(), + service.on_exit(), + )?)); + }, + (true, false) => { + // start the full GRANDPA voter + let grandpa_config = grandpa::GrandpaParams { + config: config, + link: link_half, + network: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + on_exit: service.on_exit(), + telemetry_on_connect: Some(service.telemetry_on_connect_stream()), + }; + service.spawn_task(Box::new(grandpa::run_grandpa_voter(grandpa_config)?)); + }, + (_, true) => { + grandpa::setup_disabled_grandpa( + service.client(), + &inherent_data_providers, + service.network(), )?; + }, + } - Ok((import_queue, finality_proof_request_builder)) - }}, - SelectChain = LongestChain, Self::Block> - { |config: &FactoryFullConfiguration, client: Arc>| { - #[allow(deprecated)] - Ok(LongestChain::new(client.backend().clone())) - } - }, - FinalityProofProvider = { |client: Arc>| { - Ok(Some(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _)) - }}, + Ok((service, inherent_data_providers)) + }} +} - RpcExtensions = jsonrpc_core::IoHandler - { |client, pool| { +/// Builds a new service for a full client. +pub fn new_full(config: Configuration) +-> Result { + new_full!(config).map(|(service, _)| service) +} + +/// Builds a new service for a light client. +pub fn new_light(config: Configuration) +-> Result { + let inherent_data_providers = InherentDataProviders::new(); + + ServiceBuilder::new_light::(config)? + .with_select_chain(|_config, client| { + #[allow(deprecated)] + Ok(LongestChain::new(client.backend().clone())) + })? + .with_transaction_pool(|config, client| + Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) + )? + .with_import_queue_and_fprb(|_config, client, _select_chain, transaction_pool| { + #[allow(deprecated)] + let fetch_checker = client.backend().blockchain().fetcher() + .upgrade() + .map(|fetcher| fetcher.checker().clone()) + .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; + let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, _>( + client.clone(), Arc::new(fetch_checker), client.clone() + )?; + + let finality_proof_import = block_import.clone(); + let finality_proof_request_builder = + finality_proof_import.create_finality_proof_request_builder(); + + // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. + let (import_queue, ..) = import_queue( + Config::get_or_compute(&*client)?, + block_import, + None, + Some(Box::new(finality_proof_import)), + client.clone(), + client, + inherent_data_providers.clone(), + Some(transaction_pool) + )?; + + Ok((import_queue, finality_proof_request_builder)) + })? + .with_network_protocol(|_| Ok(NodeProtocol::new()))? + .with_finality_proof_provider(|client| + Ok(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _) + )? + .with_rpc_extensions(|client, pool| { use node_rpc::accounts::{Accounts, AccountsApi}; let mut io = jsonrpc_core::IoHandler::default(); @@ -288,11 +257,10 @@ construct_service_factory! { AccountsApi::to_delegate(Accounts::new(client, pool)) ); io - }}, - } + })? + .build() } - #[cfg(test)] mod tests { use std::sync::Arc; @@ -312,9 +280,8 @@ mod tests { use timestamp; use finality_tracker; use keyring::AccountKeyring; - use substrate_service::ServiceFactory; - use service_test::SyncService; - use crate::service::Factory; + use substrate_service::AbstractService; + use crate::service::{new_full, new_light}; #[cfg(feature = "rhd")] fn test_sync() { @@ -369,8 +336,10 @@ mod tests { let v: Vec = Decode::decode(&mut xt.as_slice()).unwrap(); OpaqueExtrinsic(v) }; - service_test::sync::( + service_test::sync( chain_spec::integration_test_config(), + |config| new_full(config), + |config| new_light(config), block_factory, extrinsic_factory, ); @@ -387,130 +356,127 @@ mod tests { let chain_spec = crate::chain_spec::tests::integration_test_config_with_single_authority(); + // For the block factory let mut slot_num = 1u64; - let block_factory = |service: &SyncService<::FullService>| { - let service = service.get(); - let mut inherent_data = service - .config() - .custom - .inherent_data_providers - .create_inherent_data() - .expect("Creates inherent data."); - inherent_data.replace_data(finality_tracker::INHERENT_IDENTIFIER, &1u64); - - let parent_id = BlockId::number(service.client().info().chain.best_number); - let parent_header = service.client().header(&parent_id).unwrap().unwrap(); - let mut proposer_factory = substrate_basic_authorship::ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - }; - - let mut digest = Digest::::default(); - - // even though there's only one authority some slots might be empty, - // so we must keep trying the next slots until we can claim one. - let babe_pre_digest = loop { - inherent_data.replace_data(timestamp::INHERENT_IDENTIFIER, &(slot_num * SLOT_DURATION)); - if let Some(babe_pre_digest) = babe::test_helpers::claim_slot( - slot_num, - &parent_header, - &*service.client(), - (278, 1000), - &keystore, - ) { - break babe_pre_digest; - } - - slot_num += 1; - }; - - digest.push(::babe_pre_digest(babe_pre_digest)); - - let mut proposer = proposer_factory.init(&parent_header).unwrap(); - let new_block = futures03::executor::block_on(proposer.propose( - inherent_data, - digest, - std::time::Duration::from_secs(1), - )).expect("Error making test block"); - - let (new_header, new_body) = new_block.deconstruct(); - let pre_hash = new_header.hash(); - // sign the pre-sealed hash of the block and then - // add it to a digest item. - let to_sign = pre_hash.encode(); - let signature = alice.sign(&to_sign[..]); - let item = ::babe_seal( - signature.into(), - ); - slot_num += 1; - - BlockImportParams { - origin: BlockOrigin::File, - header: new_header, - justification: None, - post_digests: vec![item], - body: Some(new_body), - finalized: true, - auxiliary: Vec::new(), - fork_choice: ForkChoiceStrategy::LongestChain, - } - }; + // For the extrinsics factory let bob = Arc::new(AccountKeyring::Bob.pair()); let charlie = Arc::new(AccountKeyring::Charlie.pair()); - let mut index = 0; - let extrinsic_factory = |service: &SyncService<::FullService>| { - let amount = 5 * CENTS; - let to = AddressPublic::from_raw(bob.public().0); - let from = AddressPublic::from_raw(charlie.public().0); - let genesis_hash = service.get().client().block_hash(0).unwrap().unwrap(); - let best_block_id = BlockId::number(service.get().client().info().chain.best_number); - let version = service.get().client().runtime_version_at(&best_block_id).unwrap().spec_version; - let signer = charlie.clone(); - - let function = Call::Balances(BalancesCall::transfer(to.into(), amount)); - - let check_version = system::CheckVersion::new(); - let check_genesis = system::CheckGenesis::new(); - let check_era = system::CheckEra::from(Era::Immortal); - let check_nonce = system::CheckNonce::from(index); - let check_weight = system::CheckWeight::new(); - let take_fees = balances::TakeFees::from(0); - let extra = (check_version, check_genesis, check_era, check_nonce, check_weight, take_fees); - - let raw_payload = (function, extra.clone(), version, genesis_hash, genesis_hash); - let signature = raw_payload.using_encoded(|payload| if payload.len() > 256 { - signer.sign(&blake2_256(payload)[..]) - } else { - signer.sign(payload) - }); - let xt = UncheckedExtrinsic::new_signed( - raw_payload.0, - from.into(), - signature.into(), - extra, - ).encode(); - let v: Vec = Decode::decode(&mut xt.as_slice()).unwrap(); - index += 1; - OpaqueExtrinsic(v) - }; - - service_test::sync::( + service_test::sync( chain_spec, - block_factory, - extrinsic_factory, + |config| new_full!(config), + |config| new_light(config), + |service, inherent_data_providers| { + let mut inherent_data = inherent_data_providers + .create_inherent_data() + .expect("Creates inherent data."); + inherent_data.replace_data(finality_tracker::INHERENT_IDENTIFIER, &1u64); + + let parent_id = BlockId::number(service.client().info().chain.best_number); + let parent_header = service.client().header(&parent_id).unwrap().unwrap(); + let mut proposer_factory = substrate_basic_authorship::ProposerFactory { + client: service.client(), + transaction_pool: service.transaction_pool(), + }; + + let mut digest = Digest::::default(); + + // even though there's only one authority some slots might be empty, + // so we must keep trying the next slots until we can claim one. + let babe_pre_digest = loop { + inherent_data.replace_data(timestamp::INHERENT_IDENTIFIER, &(slot_num * SLOT_DURATION)); + if let Some(babe_pre_digest) = babe::test_helpers::claim_slot( + slot_num, + &parent_header, + &*service.client(), + (278, 1000), + &keystore, + ) { + break babe_pre_digest; + } + + slot_num += 1; + }; + + digest.push(::babe_pre_digest(babe_pre_digest)); + + let mut proposer = proposer_factory.init(&parent_header).unwrap(); + let new_block = futures03::executor::block_on(proposer.propose( + inherent_data, + digest, + std::time::Duration::from_secs(1), + )).expect("Error making test block"); + + let (new_header, new_body) = new_block.deconstruct(); + let pre_hash = new_header.hash(); + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let to_sign = pre_hash.encode(); + let signature = alice.sign(&to_sign[..]); + let item = ::babe_seal( + signature.into(), + ); + slot_num += 1; + + BlockImportParams { + origin: BlockOrigin::File, + header: new_header, + justification: None, + post_digests: vec![item], + body: Some(new_body), + finalized: true, + auxiliary: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, + } + }, + |service, _| { + let amount = 5 * CENTS; + let to = AddressPublic::from_raw(bob.public().0); + let from = AddressPublic::from_raw(charlie.public().0); + let genesis_hash = service.client().block_hash(0).unwrap().unwrap(); + let best_block_id = BlockId::number(service.client().info().chain.best_number); + let version = service.client().runtime_version_at(&best_block_id).unwrap().spec_version; + let signer = charlie.clone(); + + let function = Call::Balances(BalancesCall::transfer(to.into(), amount)); + + let check_version = system::CheckVersion::new(); + let check_genesis = system::CheckGenesis::new(); + let check_era = system::CheckEra::from(Era::Immortal); + let check_nonce = system::CheckNonce::from(index); + let check_weight = system::CheckWeight::new(); + let take_fees = balances::TakeFees::from(0); + let extra = (check_version, check_genesis, check_era, check_nonce, check_weight, take_fees); + + let raw_payload = (function, extra.clone(), version, genesis_hash, genesis_hash); + let signature = raw_payload.using_encoded(|payload| if payload.len() > 256 { + signer.sign(&blake2_256(payload)[..]) + } else { + signer.sign(payload) + }); + let xt = UncheckedExtrinsic::new_signed( + raw_payload.0, + from.into(), + signature.into(), + extra, + ).encode(); + let v: Vec = Decode::decode(&mut xt.as_slice()).unwrap(); + + index += 1; + OpaqueExtrinsic(v) + }, ); } #[test] #[ignore] fn test_consensus() { - use super::Factory; - - service_test::consensus::( + service_test::consensus( crate::chain_spec::tests::integration_test_config_with_two_authorities(), + |config| new_full(config), + |config| new_light(config), vec![ "//Alice".into(), "//Bob".into(), diff --git a/node/executor/src/lib.rs b/node/executor/src/lib.rs index 874c5a1b6db1c..eba744f62adfc 100644 --- a/node/executor/src/lib.rs +++ b/node/executor/src/lib.rs @@ -30,8 +30,7 @@ use substrate_executor::native_executor_instance; native_executor_instance!( pub Executor, node_runtime::api::dispatch, - node_runtime::native_version, - node_runtime::WASM_BINARY + node_runtime::native_version ); #[cfg(test)] diff --git a/node/rpc-client/Cargo.toml b/node/rpc-client/Cargo.toml index bc492bc003394..b98df224dfcf1 100644 --- a/node/rpc-client/Cargo.toml +++ b/node/rpc-client/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" env_logger = "0.6" futures = "0.1.26" hyper = "0.12" -jsonrpc-core-client = { version = "13.0.0", features = ["http", "ws"] } +jsonrpc-core-client = { version = "13.1.0", features = ["http", "ws"] } log = "0.4" node-primitives = { path = "../primitives" } substrate-rpc = { path = "../../core/rpc", version = "2.0.0" } diff --git a/node/rpc/Cargo.toml b/node/rpc/Cargo.toml index 6042380c8379f..55371daad6b0f 100644 --- a/node/rpc/Cargo.toml +++ b/node/rpc/Cargo.toml @@ -6,10 +6,10 @@ edition = "2018" [dependencies] client = { package = "substrate-client", path = "../../core/client" } -jsonrpc-core = "13.0.0" -jsonrpc-core-client = "13.0.0" -jsonrpc-derive = "13.0.0" -jsonrpc-pubsub = "13.0.0" +jsonrpc-core = "13.1.0" +jsonrpc-core-client = "13.1.0" +jsonrpc-derive = "13.1.0" +jsonrpc-pubsub = "13.1.0" keyring = { package = "substrate-keyring", path = "../../core/keyring" } log = "0.4" node-primitives = { path = "../primitives" } diff --git a/node/runtime/src/lib.rs b/node/runtime/src/lib.rs index e80d5fa83ab38..d23e3bb457542 100644 --- a/node/runtime/src/lib.rs +++ b/node/runtime/src/lib.rs @@ -47,8 +47,7 @@ use elections::VoteIndex; use version::NativeVersion; use primitives::OpaqueMetadata; use grandpa::{AuthorityId as GrandpaId, AuthorityWeight as GrandpaWeight}; -use im_online::{AuthorityId as ImOnlineId}; -use finality_tracker::{DEFAULT_REPORT_LATENCY, DEFAULT_WINDOW_SIZE}; +use im_online::sr25519::{AuthorityId as ImOnlineId}; #[cfg(any(feature = "std", test))] pub use sr_primitives::BuildStorage; @@ -80,8 +79,8 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to equal spec_version. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 149, - impl_version: 149, + spec_version: 154, + impl_version: 154, apis: RUNTIME_API_VERSIONS, }; @@ -394,6 +393,7 @@ impl sudo::Trait for Runtime { } impl im_online::Trait for Runtime { + type AuthorityId = ImOnlineId; type Call = Call; type Event = Event; type UncheckedExtrinsic = UncheckedExtrinsic; @@ -414,8 +414,8 @@ impl grandpa::Trait for Runtime { } parameter_types! { - pub const WindowSize: BlockNumber = DEFAULT_WINDOW_SIZE.into(); - pub const ReportLatency: BlockNumber = DEFAULT_REPORT_LATENCY.into(); + pub const WindowSize: BlockNumber = 101; + pub const ReportLatency: BlockNumber = 1000; } impl finality_tracker::Trait for Runtime { @@ -448,8 +448,8 @@ construct_runtime!( Treasury: treasury::{Module, Call, Storage, Event}, Contracts: contracts, Sudo: sudo, - ImOnline: im_online::{Module, Call, Storage, Event, ValidateUnsigned, Config}, - AuthorityDiscovery: authority_discovery::{Module, Call, Config}, + ImOnline: im_online::{Module, Call, Storage, Event, ValidateUnsigned, Config}, + AuthorityDiscovery: authority_discovery::{Module, Call, Config}, Offences: offences::{Module, Call, Storage, Event}, } ); @@ -579,19 +579,19 @@ impl_runtime_apis! { } } - impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { - fn authority_id() -> Option { + impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { + fn authority_id() -> Option { AuthorityDiscovery::authority_id() } - fn authorities() -> Vec { + fn authorities() -> Vec { AuthorityDiscovery::authorities() } - fn sign(payload: Vec, authority_id: im_online::AuthorityId) -> Option> { + fn sign(payload: Vec, authority_id: ImOnlineId) -> Option> { AuthorityDiscovery::sign(payload, authority_id) } - fn verify(payload: Vec, signature: Vec, public_key: im_online::AuthorityId) -> bool { + fn verify(payload: Vec, signature: Vec, public_key: ImOnlineId) -> bool { AuthorityDiscovery::verify(payload, signature, public_key) } } diff --git a/scripts/node-template-release/src/main.rs b/scripts/node-template-release/src/main.rs index 4036104f6030a..e1db5af649748 100644 --- a/scripts/node-template-release/src/main.rs +++ b/scripts/node-template-release/src/main.rs @@ -117,8 +117,15 @@ fn replace_path_dependencies_with_git(cargo_toml_path: &Path, commit_id: &str, c cargo_toml.insert("dependencies".into(), dependencies.into()); } -/// Add `profile.release` = `panic = unwind` to the given `Cargo.toml` -fn cargo_toml_add_profile_release(cargo_toml: &mut CargoToml) { +/// Update the top level (workspace) `Cargo.toml` file. +/// +/// - Adds `profile.release` = `panic = unwind` +/// - Adds `workspace` definition +fn update_top_level_cargo_toml( + cargo_toml: &mut CargoToml, + workspace_members: Vec<&PathBuf>, + node_template_path: &Path, +) { let mut panic_unwind = toml::value::Table::new(); panic_unwind.insert("panic".into(), "unwind".into()); @@ -126,6 +133,24 @@ fn cargo_toml_add_profile_release(cargo_toml: &mut CargoToml) { profile.insert("release".into(), panic_unwind.into()); cargo_toml.insert("profile".into(), profile.into()); + + let members = workspace_members.iter() + .map(|p| + p.strip_prefix(node_template_path) + .expect("Workspace member is a child of the node template path!") + .parent() + // We get the `Cargo.toml` paths as workspace members, but for the `members` field + // we just need the path. + .expect("The given path ends with `Cargo.toml` as file name!") + .display() + .to_string() + ) + .collect::>(); + + let mut members_section = toml::value::Table::new(); + members_section.insert("members".into(), members.into()); + + cargo_toml.insert("workspace".into(), members_section.into()); } fn write_cargo_toml(path: &Path, cargo_toml: CargoToml) { @@ -137,10 +162,24 @@ fn write_cargo_toml(path: &Path, cargo_toml: CargoToml) { /// Build and test the generated node-template fn build_and_test(path: &Path, cargo_tomls: &[PathBuf]) { // Build node - assert!(Command::new("cargo").args(&["build", "--all"]).current_dir(path).status().expect("Compiles node").success()); + assert!( + Command::new("cargo") + .args(&["build", "--all"]) + .current_dir(path) + .status() + .expect("Compiles node") + .success() + ); // Test node - assert!(Command::new("cargo").args(&["test", "--all"]).current_dir(path).status().expect("Tests node").success()); + assert!( + Command::new("cargo") + .args(&["test", "--all"]) + .current_dir(path) + .status() + .expect("Tests node") + .success() + ); // Remove all `target` directories for toml in cargo_tomls { @@ -174,14 +213,20 @@ fn main() { let cargo_tomls = find_cargo_tomls(build_dir.path().to_owned()); let commit_id = get_git_commit_id(&options.node_template); + let top_level_cargo_toml_path = node_template_path.join("Cargo.toml"); cargo_tomls.iter().for_each(|t| { let mut cargo_toml = parse_cargo_toml(&t); replace_path_dependencies_with_git(&t, &commit_id, &mut cargo_toml); - // If this is the top-level `Cargo.toml`, add `profile.release` - if &node_template_path.join("Cargo.toml") == t { - cargo_toml_add_profile_release(&mut cargo_toml); + // Check if this is the top level `Cargo.toml`, as this requires some special treatments. + if top_level_cargo_toml_path == *t { + // All workspace member `Cargo.toml` file paths. + let workspace_members = cargo_tomls.iter() + .filter(|p| **p != top_level_cargo_toml_path) + .collect(); + + update_top_level_cargo_toml(&mut cargo_toml, workspace_members, &node_template_path); } write_cargo_toml(&t, cargo_toml); diff --git a/srml/aura/src/lib.rs b/srml/aura/src/lib.rs index e02a2e1f0a0ec..4df832912a543 100644 --- a/srml/aura/src/lib.rs +++ b/srml/aura/src/lib.rs @@ -258,7 +258,7 @@ impl Module { fn on_timestamp_set(now: T::Moment, slot_duration: T::Moment) { let last = Self::last(); - ::LastTimestamp::put(now.clone()); + ::LastTimestamp::put(now); if last.is_zero() { return; @@ -266,7 +266,7 @@ impl Module { assert!(!slot_duration.is_zero(), "Aura slot duration cannot be zero."); - let last_slot = last / slot_duration.clone(); + let last_slot = last / slot_duration; let cur_slot = now / slot_duration; assert!(last_slot < cur_slot, "Only one block may be authored per slot."); diff --git a/srml/authority-discovery/Cargo.toml b/srml/authority-discovery/Cargo.toml index 5d52bdb2461fa..f4c2d4e2b0bd0 100644 --- a/srml/authority-discovery/Cargo.toml +++ b/srml/authority-discovery/Cargo.toml @@ -17,6 +17,9 @@ srml-support = { path = "../support", default-features = false } sr-io = { package = "sr-io", path = "../../core/sr-io", default-features = false } system = { package = "srml-system", path = "../system", default-features = false } +[dev-dependencies] +sr-staking-primitives = { path = "../../core/sr-staking-primitives", default-features = false } + [features] default = ["std"] std = [ diff --git a/srml/authority-discovery/src/lib.rs b/srml/authority-discovery/src/lib.rs index 00912aeffed8b..ffcb6672c8662 100644 --- a/srml/authority-discovery/src/lib.rs +++ b/srml/authority-discovery/src/lib.rs @@ -33,18 +33,20 @@ use codec::{Decode, Encode}; use rstd::prelude::*; use srml_support::{decl_module, decl_storage, StorageValue}; -pub trait Trait: system::Trait + session::Trait {} +pub trait Trait: system::Trait + session::Trait + im_online::Trait {} + +type AuthorityIdFor = ::AuthorityId; decl_storage! { trait Store for Module as AuthorityDiscovery { /// The current set of keys that may issue a heartbeat. - Keys get(keys): Vec; + Keys get(keys): Vec>; } add_extra_genesis { - config(keys): Vec; + config(keys): Vec>; build(| storage: &mut (sr_primitives::StorageOverlay, sr_primitives::ChildrenStorageOverlay), - config: &GenesisConfig + config: &GenesisConfig, | { sr_io::with_storage( storage, @@ -64,10 +66,10 @@ impl Module { /// set, otherwise this function returns None. The restriction might be /// softened in the future in case a consumer needs to learn own authority /// identifier. - pub fn authority_id() -> Option { - let authorities = Keys::get(); + pub fn authority_id() -> Option> { + let authorities = Keys::::get(); - let local_keys = im_online::AuthorityId::all(); + let local_keys = >::all(); authorities.into_iter().find_map(|authority| { if local_keys.contains(&authority) { @@ -79,12 +81,12 @@ impl Module { } /// Retrieve authority identifiers of the current authority set. - pub fn authorities() -> Vec { - Keys::get() + pub fn authorities() -> Vec> { + Keys::::get() } /// Sign the given payload with the private key corresponding to the given authority id. - pub fn sign(payload: Vec, authority_id: im_online::AuthorityId) -> Option> { + pub fn sign(payload: Vec, authority_id: AuthorityIdFor) -> Option> { authority_id.sign(&payload).map(|s| s.encode()) } @@ -93,27 +95,27 @@ impl Module { pub fn verify( payload: Vec, signature: Vec, - authority_id: im_online::AuthorityId, + authority_id: AuthorityIdFor, ) -> bool { - im_online::AuthoritySignature::decode(&mut &signature[..]) + as RuntimeAppPublic>::Signature::decode(&mut &signature[..]) .map(|s| authority_id.verify(&payload, &s)) .unwrap_or(false) } - fn initialize_keys(keys: &[im_online::AuthorityId]) { + fn initialize_keys(keys: &[AuthorityIdFor]) { if !keys.is_empty() { - assert!(Keys::get().is_empty(), "Keys are already initialized!"); - Keys::put_ref(keys); + assert!(Keys::::get().is_empty(), "Keys are already initialized!"); + Keys::::put_ref(keys); } } } impl session::OneSessionHandler for Module { - type Key = im_online::AuthorityId; + type Key = AuthorityIdFor; fn on_genesis_session<'a, I: 'a>(validators: I) where - I: Iterator, + I: Iterator, { let keys = validators.map(|x| x.1).collect::>(); Self::initialize_keys(&keys); @@ -121,10 +123,10 @@ impl session::OneSessionHandler for Module { fn on_new_session<'a, I: 'a>(_changed: bool, _validators: I, next_validators: I) where - I: Iterator, + I: Iterator, { // Remember who the authorities are for the new session. - Keys::put(next_validators.map(|x| x.1).collect::>()); + Keys::::put(next_validators.map(|x| x.1).collect::>()); } fn on_disabled(_i: usize) { @@ -139,9 +141,11 @@ mod tests { use primitives::testing::KeyStore; use primitives::{crypto::key_types, sr25519, traits::BareCryptoStore, H256}; use sr_io::{with_externalities, TestExternalities}; + use sr_primitives::generic::UncheckedExtrinsic; use sr_primitives::testing::{Header, UintAuthorityId}; use sr_primitives::traits::{ConvertInto, IdentityLookup, OpaqueKeys}; use sr_primitives::Perbill; + use sr_staking_primitives::CurrentElectedSet; use srml_support::{impl_outer_origin, parameter_types}; type AuthorityDiscovery = Module; @@ -151,12 +155,21 @@ mod tests { pub struct Test; impl Trait for Test {} + type AuthorityId = im_online::sr25519::AuthorityId; + + pub struct DummyCurrentElectedSet(std::marker::PhantomData); + impl CurrentElectedSet for DummyCurrentElectedSet { + fn current_elected_set() -> Vec { + vec![] + } + } + pub struct TestOnSessionEnding; - impl session::OnSessionEnding for TestOnSessionEnding { + impl session::OnSessionEnding for TestOnSessionEnding { fn on_session_ending( _: SessionIndex, _: SessionIndex, - ) -> Option> { + ) -> Option> { None } } @@ -167,11 +180,25 @@ mod tests { type ShouldEndSession = session::PeriodicSessions; type SessionHandler = TestSessionHandler; type Event = (); - type ValidatorId = im_online::AuthorityId; + type ValidatorId = AuthorityId; type ValidatorIdOf = ConvertInto; type SelectInitialValidators = (); } + impl session::historical::Trait for Test { + type FullIdentification = (); + type FullIdentificationOf = (); + } + + impl im_online::Trait for Test { + type AuthorityId = AuthorityId; + type Call = im_online::Call; + type Event = (); + type UncheckedExtrinsic = UncheckedExtrinsic<(), im_online::Call, (), ()>; + type ReportUnresponsiveness = (); + type CurrentElectedSet = DummyCurrentElectedSet; + } + pub type BlockNumber = u64; parameter_types! { @@ -191,7 +218,7 @@ mod tests { type Call = (); type Hash = H256; type Hashing = ::sr_primitives::traits::BlakeTwo256; - type AccountId = im_online::AuthorityId; + type AccountId = AuthorityId; type Lookup = IdentityLookup; type Header = Header; type WeightMultiplierUpdate = (); @@ -208,17 +235,17 @@ mod tests { } pub struct TestSessionHandler; - impl session::SessionHandler for TestSessionHandler { + impl session::SessionHandler for TestSessionHandler { fn on_new_session( _changed: bool, - _validators: &[(im_online::AuthorityId, Ks)], - _queued_validators: &[(im_online::AuthorityId, Ks)], + _validators: &[(AuthorityId, Ks)], + _queued_validators: &[(AuthorityId, Ks)], ) { } fn on_disabled(_validator_index: usize) {} - fn on_genesis_session(_validators: &[(im_online::AuthorityId, Ks)]) {} + fn on_genesis_session(_validators: &[(AuthorityId, Ks)]) {} } #[test] @@ -236,17 +263,17 @@ mod tests { .sr25519_public_keys(key_types::IM_ONLINE) .pop() .unwrap(); - let authority_id = im_online::AuthorityId::from(public_key); + let authority_id = AuthorityId::from(public_key); // Build genesis. let mut t = system::GenesisConfig::default() .build_storage::() .unwrap(); - GenesisConfig { + GenesisConfig:: { keys: vec![authority_id.clone()], } - .assimilate_storage::(&mut t) + .assimilate_storage(&mut t) .unwrap(); // Create externalities. @@ -279,11 +306,11 @@ mod tests { let keys = vec![(); 5] .iter() .map(|_x| sr25519::Pair::generate_with_phrase(None).0.public()) - .map(im_online::AuthorityId::from) + .map(AuthorityId::from) .collect(); - GenesisConfig { keys: keys } - .assimilate_storage::(&mut t) + GenesisConfig:: { keys: keys } + .assimilate_storage(&mut t) .unwrap(); // Create externalities. @@ -310,17 +337,17 @@ mod tests { .sr25519_public_keys(key_types::IM_ONLINE) .pop() .unwrap(); - let authority_id = im_online::AuthorityId::from(public_key); + let authority_id = AuthorityId::from(public_key); // Build genesis. let mut t = system::GenesisConfig::default() .build_storage::() .unwrap(); - GenesisConfig { + GenesisConfig:: { keys: vec![authority_id.clone()], } - .assimilate_storage::(&mut t) + .assimilate_storage(&mut t) .unwrap(); // Create externalities. diff --git a/srml/authorship/src/lib.rs b/srml/authorship/src/lib.rs index 424223a2c9a64..c71c26df02a61 100644 --- a/srml/authorship/src/lib.rs +++ b/srml/authorship/src/lib.rs @@ -27,7 +27,7 @@ use srml_support::traits::{FindAuthor, VerifySeal, Get}; use srml_support::dispatch::Result as DispatchResult; use codec::{Encode, Decode}; use system::ensure_none; -use sr_primitives::traits::{SimpleArithmetic, Header as HeaderT, One, Zero}; +use sr_primitives::traits::{Header as HeaderT, One, Zero}; use sr_primitives::weights::SimpleDispatchInfo; use inherents::{ RuntimeString, InherentIdentifier, ProvideInherent, @@ -236,29 +236,14 @@ decl_storage! { } } -fn prune_old_uncles( - minimum_height: BlockNumber, - uncles: &mut Vec> -) where BlockNumber: SimpleArithmetic { - let prune_entries = uncles.iter().take_while(|item| match item { - UncleEntryItem::Uncle(_, _) => true, - UncleEntryItem::InclusionHeight(height) => height < &minimum_height, - }); - let prune_index = prune_entries.count(); - - let _ = uncles.drain(..prune_index); -} - decl_module! { pub struct Module for enum Call where origin: T::Origin { fn on_initialize(now: T::BlockNumber) { let uncle_generations = T::UncleGenerations::get(); - let mut uncles = ::Uncles::get(); - // prune uncles that are older than the allowed number of generations. if uncle_generations <= now { let minimum_height = now - uncle_generations; - prune_old_uncles(minimum_height, &mut uncles) + Self::prune_old_uncles(minimum_height) } ::DidSetUncles::put(false); @@ -387,6 +372,18 @@ impl Module { // check uncle validity. T::FilterUncle::filter_uncle(&uncle, accumulator) } + + fn prune_old_uncles(minimum_height: T::BlockNumber) { + let mut uncles = ::Uncles::get(); + let prune_entries = uncles.iter().take_while(|item| match item { + UncleEntryItem::Uncle(_, _) => true, + UncleEntryItem::InclusionHeight(height) => height < &minimum_height, + }); + let prune_index = prune_entries.count(); + + let _ = uncles.drain(..prune_index); + ::Uncles::put(uncles); + } } impl ProvideInherent for Module { @@ -569,15 +566,21 @@ mod tests { #[test] fn prune_old_uncles_works() { use UncleEntryItem::*; - let mut uncles = vec![ - InclusionHeight(1u32), Uncle((), Some(())), Uncle((), None), Uncle((), None), - InclusionHeight(2u32), Uncle((), None), - InclusionHeight(3u32), Uncle((), None), - ]; - - prune_old_uncles(3, &mut uncles); - - assert_eq!(uncles, vec![InclusionHeight(3), Uncle((), None)]); + with_externalities(&mut new_test_ext(), || { + let hash = Default::default(); + let author = Default::default(); + let uncles = vec![ + InclusionHeight(1u64), Uncle(hash, Some(author)), Uncle(hash, None), Uncle(hash, None), + InclusionHeight(2u64), Uncle(hash, None), + InclusionHeight(3u64), Uncle(hash, None), + ]; + + ::Uncles::put(uncles); + Authorship::prune_old_uncles(3); + + let uncles = ::Uncles::get(); + assert_eq!(uncles, vec![InclusionHeight(3u64), Uncle(hash, None)]); + }) } #[test] diff --git a/srml/babe/src/lib.rs b/srml/babe/src/lib.rs index b58fb26b5a127..0f439d489e8af 100644 --- a/srml/babe/src/lib.rs +++ b/srml/babe/src/lib.rs @@ -445,6 +445,8 @@ impl session::OneSessionHandler for Module { fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, queued_validators: I) where I: Iterator { + Self::do_initialize(); + // Update epoch index let epoch_index = EpochIndex::get() .checked_add(1) diff --git a/srml/balances/src/lib.rs b/srml/balances/src/lib.rs index 3f727bfa51bee..0c838970451e1 100644 --- a/srml/balances/src/lib.rs +++ b/srml/balances/src/lib.rs @@ -72,8 +72,6 @@ //! - [`Imbalance`](../srml_support/traits/trait.Imbalance.html): Functions for handling //! imbalances between total issuance in the system and account balances. Must be used when a function //! creates new funds (e.g. a reward) or destroys some funds (e.g. a system fee). -//! - [`MakePayment`](../srml_support/traits/trait.MakePayment.html): Simple trait designed -//! for hooking into a transaction payment. //! - [`IsDeadAccount`](../srml_system/trait.IsDeadAccount.html): Determiner to say whether a //! given account is unused. //! @@ -88,6 +86,17 @@ //! //! - `vesting_balance` - Get the amount that is currently being vested and cannot be transferred out of this account. //! +//! ### Signed Extensions +//! +//! The balances module defines the following extensions: +//! +//! - [`TakeFees`]: Consumes fees proportional to the length and weight of the transaction. +//! Additionally, it can contain a single encoded payload as a `tip`. The inclusion priority +//! is increased proportional to the tip. +//! +//! Lookup the runtime aggregator file (e.g. `node/runtime`) to see the full list of signed +//! extensions included in a chain. +//! //! ## Usage //! //! The following examples show how to use the Balances module in your custom module. @@ -172,12 +181,6 @@ mod tests; pub use self::imbalances::{PositiveImbalance, NegativeImbalance}; -pub const DEFAULT_EXISTENTIAL_DEPOSIT: u32 = 0; -pub const DEFAULT_TRANSFER_FEE: u32 = 0; -pub const DEFAULT_CREATION_FEE: u32 = 0; -pub const DEFAULT_TRANSACTION_BASE_FEE: u32 = 0; -pub const DEFAULT_TRANSACTION_BYTE_FEE: u32 = 0; - pub trait Subtrait: system::Trait { /// The balance of an account. type Balance: Parameter + Member + SimpleArithmetic + Codec + Default + Copy + @@ -459,7 +462,7 @@ decl_module! { /// - Independent of the arguments. /// - Contains a limited number of reads and writes. /// # - #[weight = SimpleDispatchInfo::FixedOperational(500_000)] + #[weight = SimpleDispatchInfo::FixedOperational(50_000)] fn set_balance( origin, who: ::Source, @@ -485,6 +488,21 @@ decl_module! { } Self::set_reserved_balance(&who, new_reserved); } + + /// Exactly as `transfer`, except the origin must be root and the source account may be + /// specified. + #[weight = SimpleDispatchInfo::FixedNormal(1_000_000)] + pub fn force_transfer( + origin, + source: ::Source, + dest: ::Source, + #[compact] value: T::Balance + ) { + ensure_root(origin)?; + let source = T::Lookup::lookup(source)?; + let dest = T::Lookup::lookup(dest)?; + >::transfer(&source, &dest, value)?; + } } } diff --git a/srml/balances/src/tests.rs b/srml/balances/src/tests.rs index 338513d40d267..1af3ce6ba013b 100644 --- a/srml/balances/src/tests.rs +++ b/srml/balances/src/tests.rs @@ -26,6 +26,7 @@ use srml_support::{ traits::{LockableCurrency, LockIdentifier, WithdrawReason, WithdrawReasons, Currency, ReservableCurrency} }; +use system::RawOrigin; const ID_1: LockIdentifier = *b"1 "; const ID_2: LockIdentifier = *b"2 "; @@ -352,6 +353,20 @@ fn balance_transfer_works() { }); } +#[test] +fn force_transfer_works() { + with_externalities(&mut ExtBuilder::default().build(), || { + let _ = Balances::deposit_creating(&1, 111); + assert_noop!( + Balances::force_transfer(Some(2).into(), 1, 2, 69), + "bad origin: expected to be a root origin" + ); + assert_ok!(Balances::force_transfer(RawOrigin::Root.into(), 1, 2, 69)); + assert_eq!(Balances::total_balance(&1), 42); + assert_eq!(Balances::total_balance(&2), 69); + }); +} + #[test] fn reserving_balance_should_work() { with_externalities(&mut ExtBuilder::default().build(), || { diff --git a/srml/contracts/src/lib.rs b/srml/contracts/src/lib.rs index 9f9fae52922bf..f8485f848ad15 100644 --- a/srml/contracts/src/lib.rs +++ b/srml/contracts/src/lib.rs @@ -425,7 +425,8 @@ where } /// The default dispatch fee computor computes the fee in the same way that -/// the implementation of `MakePayment` for the Balances module does. +/// the implementation of `TakeFees` for the Balances module does. Note that this only takes a fixed +/// fee based on size. Unlike the balances module, weight-fee is applied. pub struct DefaultDispatchFeeComputor(PhantomData); impl ComputeDispatchFee<::Call, BalanceOf> for DefaultDispatchFeeComputor { fn compute_dispatch_fee(call: &::Call) -> BalanceOf { diff --git a/srml/democracy/src/lib.rs b/srml/democracy/src/lib.rs index cfac7fdfbcfe1..4853d192c4bee 100644 --- a/srml/democracy/src/lib.rs +++ b/srml/democracy/src/lib.rs @@ -24,11 +24,11 @@ use sr_primitives::traits::{Zero, Bounded, CheckedMul, CheckedDiv, EnsureOrigin, use sr_primitives::weights::SimpleDispatchInfo; use codec::{Encode, Decode, Input, Output, Error}; use srml_support::{ - decl_module, decl_storage, decl_event, ensure, - StorageValue, StorageMap, Parameter, Dispatchable, EnumerableStorageMap, + decl_module, decl_storage, decl_event, ensure, AppendableStorageMap, StorageValue, StorageMap, + Parameter, Dispatchable, EnumerableStorageMap, traits::{ - Currency, ReservableCurrency, LockableCurrency, WithdrawReason, LockIdentifier, - OnFreeBalanceZero, Get + Currency, ReservableCurrency, LockableCurrency, WithdrawReason, LockIdentifier, Get, + OnFreeBalanceZero } }; use srml_support::dispatch::Result; @@ -174,13 +174,6 @@ impl Decode for Vote { type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -pub const DEFAULT_ENACTMENT_PERIOD: u32 = 0; -pub const DEFAULT_LAUNCH_PERIOD: u32 = 0; -pub const DEFAULT_VOTING_PERIOD: u32 = 0; -pub const DEFAULT_MINIMUM_DEPOSIT: u32 = 0; -pub const DEFAULT_EMERGENCY_VOTING_PERIOD: u32 = 0; -pub const DEFAULT_COOLOFF_PERIOD: u32 = 0; - pub trait Trait: system::Trait + Sized { type Proposal: Parameter + Dispatchable; type Event: From> + Into<::Event>; @@ -384,9 +377,8 @@ decl_module! { PublicPropCount::put(index + 1); >::insert(index, (value, vec![who.clone()])); - let mut props = Self::public_props(); - props.push((index, (*proposal).clone(), who)); - >::put(props); + let new_prop = (index, (*proposal).clone(), who); + >::append_or_put([new_prop].into_iter()); Self::deposit_event(RawEvent::Proposed(index, value)); } @@ -795,7 +787,7 @@ impl Module { fn do_vote(who: T::AccountId, ref_index: ReferendumIndex, vote: Vote) -> Result { ensure!(Self::is_active_referendum(ref_index), "vote given for invalid referendum."); if !>::exists(&(ref_index, who.clone())) { - >::mutate(ref_index, |voters| voters.push(who.clone())); + >::append_or_insert(ref_index, [who.clone()].into_iter()); } >::insert(&(ref_index, who), vote); Ok(()) @@ -933,9 +925,9 @@ impl Module { if info.delay.is_zero() { Self::enact_proposal(info.proposal, index); } else { - >::mutate( + >::append_or_insert( now + info.delay, - |q| q.push(Some((info.proposal, index))) + [Some((info.proposal, index))].into_iter() ); } } else { @@ -953,12 +945,12 @@ impl Module { if (now % T::LaunchPeriod::get()).is_zero() { // Errors come from the queue being empty. we don't really care about that, and even if // we did, there is nothing we can do here. - let _ = Self::launch_next(now.clone()); + let _ = Self::launch_next(now); } // tally up votes for any expiring referenda. for (index, info) in Self::maturing_referenda_at(now).into_iter() { - Self::bake_referendum(now.clone(), index, info)?; + Self::bake_referendum(now, index, info)?; } for (proposal, index) in >::take(now).into_iter().filter_map(|x| x) { diff --git a/srml/elections/src/lib.rs b/srml/elections/src/lib.rs index a82b3fab7ef1d..9c49b9055b9bc 100644 --- a/srml/elections/src/lib.rs +++ b/srml/elections/src/lib.rs @@ -28,7 +28,7 @@ use sr_primitives::traits::{Zero, One, StaticLookup, Bounded, Saturating}; use sr_primitives::weights::SimpleDispatchInfo; use runtime_io::print; use srml_support::{ - StorageValue, StorageMap, + StorageValue, StorageMap, AppendableStorageMap, DecodeLengthStorageMap, dispatch::Result, decl_storage, decl_event, ensure, decl_module, traits::{ Currency, ExistenceRequirement, Get, LockableCurrency, LockIdentifier, @@ -138,18 +138,8 @@ pub type VoteIndex = u32; // all three must be in sync. type ApprovalFlag = u32; -pub const APPROVAL_FLAG_MASK: ApprovalFlag = 0x8000_0000; pub const APPROVAL_FLAG_LEN: usize = 32; -pub const DEFAULT_CANDIDACY_BOND: u32 = 9; -pub const DEFAULT_VOTING_BOND: u32 = 0; -pub const DEFAULT_VOTING_FEE: u32 = 0; -pub const DEFAULT_PRESENT_SLASH_PER_VOTER: u32 = 1; -pub const DEFAULT_CARRY_COUNT: u32 = 2; -pub const DEFAULT_INACTIVE_GRACE_PERIOD: u32 = 1; -pub const DEFAULT_VOTING_PERIOD: u32 = 1000; -pub const DEFAULT_DECAY_RATIO: u32 = 24; - pub trait Trait: system::Trait { type Event: From> + Into<::Event>; @@ -724,15 +714,15 @@ impl Module { /// /// The voter index must be provided as explained in [`voter_at`] function. fn do_set_approvals(who: T::AccountId, votes: Vec, index: VoteIndex, hint: SetIndex) -> Result { - let candidates = Self::candidates(); + let candidates_len = ::Candidates::decode_len().unwrap_or(0_usize); ensure!(!Self::presentation_active(), "no approval changes during presentation period"); ensure!(index == Self::vote_index(), "incorrect vote index"); - ensure!(!candidates.is_empty(), "amount of candidates to receive approval votes should be non-zero"); + ensure!(!candidates_len.is_zero(), "amount of candidates to receive approval votes should be non-zero"); // Prevent a vote from voters that provide a list of votes that exceeds the candidates length // since otherwise an attacker may be able to submit a very long list of `votes` that far exceeds // the amount of candidates and waste more computation than a reasonable voting bond would cover. - ensure!(candidates.len() >= votes.len(), "amount of candidate votes cannot exceed amount of candidates"); + ensure!(candidates_len >= votes.len(), "amount of candidate votes cannot exceed amount of candidates"); // Amount to be locked up. let mut locked_balance = T::Currency::total_balance(&who); @@ -765,10 +755,10 @@ impl Module { CellStatus::Head | CellStatus::Occupied => { // Either occupied or out-of-range. let next = Self::next_nonfull_voter_set(); - let mut set = Self::voters(next); + let set_len = >::decode_len(next).unwrap_or(0_usize); // Caused a new set to be created. Pay for it. // This is the last potential error. Writes will begin afterwards. - if set.is_empty() { + if set_len == 0 { let imbalance = T::Currency::withdraw( &who, T::VotingFee::get(), @@ -779,8 +769,10 @@ impl Module { // NOTE: this is safe since the `withdraw()` will check this. locked_balance -= T::VotingFee::get(); } - Self::checked_push_voter(&mut set, who.clone(), next); - >::insert(next, set); + if set_len + 1 == VOTER_SET_SIZE { + NextVoterSet::put(next + 1); + } + >::append_or_insert(next, [Some(who.clone())].into_iter()) } } @@ -918,18 +910,6 @@ impl Module { Ok(()) } - fn checked_push_voter(set: &mut Vec>, who: T::AccountId, index: u32) { - let len = set.len(); - - // Defensive only: this should never happen. Don't push since it will break more things. - if len == VOTER_SET_SIZE { return; } - - set.push(Some(who)); - if len + 1 == VOTER_SET_SIZE { - NextVoterSet::put(index + 1); - } - } - /// Get the set and vector index of a global voter index. /// /// Note that this function does not take holes into account. diff --git a/srml/example/Cargo.toml b/srml/example/Cargo.toml index 87069a487a19a..73e280605a1c7 100644 --- a/srml/example/Cargo.toml +++ b/srml/example/Cargo.toml @@ -11,9 +11,10 @@ srml-support = { path = "../support", default-features = false } system = { package = "srml-system", path = "../system", default-features = false } balances = { package = "srml-balances", path = "../balances", default-features = false } sr-primitives = { path = "../../core/sr-primitives", default-features = false } +rstd = { package = "sr-std", path = "../../core/sr-std", default-features = false } +rio = { package = "sr-io", path = "../../core/sr-io", default-features = false } [dev-dependencies] -sr-io = { path = "../../core/sr-io" } primitives = { package = "substrate-primitives", path = "../../core/primitives" } [features] @@ -25,4 +26,6 @@ std = [ "srml-support/std", "system/std", "balances/std", + "rio/std", + "rstd/std" ] diff --git a/srml/example/src/lib.rs b/srml/example/src/lib.rs index 1c0191b69d8bd..3de961f5cdcf5 100644 --- a/srml/example/src/lib.rs +++ b/srml/example/src/lib.rs @@ -253,9 +253,15 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] +use rstd::marker::PhantomData; use srml_support::{StorageValue, dispatch::Result, decl_module, decl_storage, decl_event}; use system::{ensure_signed, ensure_root}; -use sr_primitives::weights::SimpleDispatchInfo; +use codec::{Encode, Decode}; +use sr_primitives::{ + traits::{SignedExtension, DispatchError, Bounded}, + transaction_validity::ValidTransaction, + weights::{SimpleDispatchInfo, DispatchInfo}, +}; /// Our module's configuration trait. All our types and consts go in here. If the /// module is dependent on specific other modules, then their configuration traits @@ -500,15 +506,96 @@ impl Module { } } +// Similar to other SRML modules, your module can also define a signed extension and perform some +// checks and [pre/post]processing [before/after] the transaction. A signed extension can be any +// decodable type that implements `SignedExtension`. See the trait definition for the full list of +// bounds. As a convention, you can follow this approach to create an extension for your module: +// - If the extension does not carry any data, then use a tuple struct with just a `marker` +// (needed for the compiler to accept `T: Trait`) will suffice. +// - Otherwise, create a tuple struct which contains the external data. Of course, for the entire +// struct to be decodable, each individual item also needs to be decodable. +// +// Note that a signed extension can also indicate that a particular data must be present in the +// _signing payload_ of a transaction by providing an implementation for the `additional_signed` +// method. This example will not cover this type of extension. See `CheckRuntime` in system module +// for an example. +// +// Using the extension, you can add some hooks to the lifecycle of each transaction. Note that by +// default, an extension is applied to all `Call` functions (i.e. all transactions). the `Call` enum +// variant is given to each function of `SignedExtension`. Hence, you can filter based on module or +// a particular call if needed. +// +// Some extra information, such as encoded length, some static dispatch info like weight and the +// sender of the transaction (if signed) are also provided. +// +// The full list of hooks that can be added to a signed extension can be found +// [here](https://crates.parity.io/sr_primitives/traits/trait.SignedExtension.html). +// +// The signed extensions are aggregated in the runtime file of a substrate chain. All extensions +// should be aggregated in a tuple and passed to the `CheckedExtrinsic` and `UncheckedExtrinsic` +// types defined in the runtime. Lookup `pub type SignedExtra = (...)` in `node/runtime` and +// `node-template` for an example of this. + +/// A simple signed extension that checks for the `set_dummy` call. In that case, it increases the +/// priority and prints some log. +/// +/// Additionally, it drops any transaction with an encoded length higher than 200 bytes. No +/// particular reason why, just to demonstrate the power of signed extensions. +#[derive(Encode, Decode, Clone, Eq, PartialEq)] +pub struct WatchDummy(PhantomData); + +#[cfg(feature = "std")] +impl rstd::fmt::Debug for WatchDummy { + fn fmt(&self, f: &mut rstd::fmt::Formatter) -> rstd::fmt::Result { + write!(f, "WatchDummy") + } +} + +impl SignedExtension for WatchDummy { + type AccountId = T::AccountId; + // Note that this could also be assigned to the top-level call enum. It is passed into the + // balances module directly and since `Trait: balances::Trait`, you could also use `T::Call`. + // In that case, you would have had access to all call variants and could match on variants from + // other modules. + type Call = Call; + type AdditionalSigned = (); + type Pre = (); + + fn additional_signed(&self) -> rstd::result::Result<(), &'static str> { Ok(()) } + + fn validate( + &self, + _who: &Self::AccountId, + call: &Self::Call, + _info: DispatchInfo, + len: usize, + ) -> rstd::result::Result { + // if the transaction is too big, just drop it. + if len > 200 { return Err(DispatchError::Exhausted) } + + // check for `set_dummy` + match call { + Call::set_dummy(..) => { + rio::print("set_dummy was received."); + + let mut valid_tx = ValidTransaction::default(); + valid_tx.priority = Bounded::max_value(); + Ok(valid_tx) + } + _ => Ok(Default::default()) + } + } +} + #[cfg(test)] mod tests { use super::*; use srml_support::{assert_ok, impl_outer_origin, parameter_types}; - use sr_io::with_externalities; + use rio::with_externalities; use primitives::{H256, Blake2Hasher}; // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are requried. + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use sr_primitives::{ Perbill, traits::{BlakeTwo256, OnInitialize, OnFinalize, IdentityLookup}, testing::Header }; @@ -575,7 +662,7 @@ mod tests { // This function basically just builds a genesis storage key/value store according to // our desired mockup. - fn new_test_ext() -> sr_io::TestExternalities { + fn new_test_ext() -> rio::TestExternalities { let mut t = system::GenesisConfig::default().build_storage::().unwrap(); // We use default for brevity, but you can configure as desired if needed. balances::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); @@ -617,4 +704,21 @@ mod tests { assert_eq!(Example::foo(), 25); }); } + + #[test] + fn signed_ext_watch_dummy_works() { + with_externalities(&mut new_test_ext(), || { + let call = >::set_dummy(10); + let info = DispatchInfo::default(); + + assert_eq!( + WatchDummy::(PhantomData).validate(&1, &call, info, 150).unwrap().priority, + Bounded::max_value() + ); + assert_eq!( + WatchDummy::(PhantomData).validate(&1, &call, info, 250), + Err(DispatchError::Exhausted) + ); + }) + } } diff --git a/srml/grandpa/src/lib.rs b/srml/grandpa/src/lib.rs index d64939ae0a766..5fc354eab96a7 100644 --- a/srml/grandpa/src/lib.rs +++ b/srml/grandpa/src/lib.rs @@ -33,7 +33,8 @@ pub use substrate_finality_grandpa_primitives as fg_primitives; use rstd::prelude::*; use codec::{self as codec, Encode, Decode, Error}; use srml_support::{ - decl_event, decl_storage, decl_module, dispatch::Result, storage::StorageValue + decl_event, decl_storage, decl_module, dispatch::Result, + storage::StorageValue, storage::StorageMap, }; use sr_primitives::{ generic::{DigestItem, OpaqueDigestItemId}, traits::Zero, @@ -43,7 +44,7 @@ use sr_staking_primitives::{ SessionIndex, offence::{Offence, Kind}, }; -use fg_primitives::{ScheduledChange, ConsensusLog, GRANDPA_ENGINE_ID}; +use fg_primitives::{GRANDPA_ENGINE_ID, ScheduledChange, ConsensusLog, SetId, RoundNumber}; pub use fg_primitives::{AuthorityId, AuthorityWeight}; use system::{ensure_signed, DigestOf}; @@ -65,7 +66,7 @@ pub struct OldStoredPendingChange { /// The delay in blocks until it will be applied. pub delay: N, /// The next authority set. - pub next_authorities: Vec<(AuthorityId, u64)>, + pub next_authorities: Vec<(AuthorityId, AuthorityWeight)>, } /// A stored pending change. @@ -76,7 +77,7 @@ pub struct StoredPendingChange { /// The delay in blocks until it will be applied. pub delay: N, /// The next authority set. - pub next_authorities: Vec<(AuthorityId, u64)>, + pub next_authorities: Vec<(AuthorityId, AuthorityWeight)>, /// If defined it means the change was forced and the given block number /// indicates the median last finalized block when the change was signaled. pub forced: Option, @@ -127,7 +128,7 @@ pub enum StoredState { decl_event!( pub enum Event { /// New authority set has been applied. - NewAuthorities(Vec<(AuthorityId, u64)>), + NewAuthorities(Vec<(AuthorityId, AuthorityWeight)>), /// Current authority set has been paused. Paused, /// Current authority set has been resumed. @@ -151,6 +152,13 @@ decl_storage! { /// `true` if we are currently stalled. Stalled get(stalled): Option<(T::BlockNumber, T::BlockNumber)>; + + /// The number of changes (both in terms of keys and underlying economic responsibilities) + /// in the "set" of Grandpa validators from genesis. + CurrentSetId get(current_set_id) build(|_| fg_primitives::SetId::default()): SetId; + + /// A mapping from grandpa set ID to the index of the *most recent* session for which its members were responsible. + SetIdSession get(session_for_set): map SetId => Option; } add_extra_genesis { config(authorities): Vec<(AuthorityId, AuthorityWeight)>; @@ -243,7 +251,7 @@ decl_module! { impl Module { /// Get the current set of authorities, along with their respective weights. - pub fn grandpa_authorities() -> Vec<(AuthorityId, u64)> { + pub fn grandpa_authorities() -> Vec<(AuthorityId, AuthorityWeight)> { Authorities::get() } @@ -292,7 +300,7 @@ impl Module { /// No change should be signaled while any change is pending. Returns /// an error if a change is already pending. pub fn schedule_change( - next_authorities: Vec<(AuthorityId, u64)>, + next_authorities: Vec<(AuthorityId, AuthorityWeight)>, in_blocks: T::BlockNumber, forced: Option, ) -> Result { @@ -337,29 +345,34 @@ impl Module { } impl Module { + /// Attempt to extract a GRANDPA log from a generic digest. pub fn grandpa_log(digest: &DigestOf) -> Option> { let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); digest.convert_first(|l| l.try_to::>(id)) } + /// Attempt to extract a pending set-change signal from a digest. pub fn pending_change(digest: &DigestOf) -> Option> { Self::grandpa_log(digest).and_then(|signal| signal.try_into_change()) } + /// Attempt to extract a forced set-change signal from a digest. pub fn forced_change(digest: &DigestOf) -> Option<(T::BlockNumber, ScheduledChange)> { Self::grandpa_log(digest).and_then(|signal| signal.try_into_forced_change()) } + /// Attempt to extract a pause signal from a digest. pub fn pending_pause(digest: &DigestOf) -> Option { Self::grandpa_log(digest).and_then(|signal| signal.try_into_pause()) } + /// Attempt to extract a resume signal from a digest. pub fn pending_resume(digest: &DigestOf) -> Option { @@ -367,7 +380,9 @@ impl Module { } } -impl session::OneSessionHandler for Module { +impl session::OneSessionHandler for Module + where T: session::Trait +{ type Key = AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) @@ -380,18 +395,27 @@ impl session::OneSessionHandler for Module { fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) where I: Iterator { - // instant changes - if changed { + // Always issue a change if `session` says that the validators have changed. + // Even if their session keys are the same as before, the underyling economic + // identities have changed. + let current_set_id = if changed { let next_authorities = validators.map(|(_, k)| (k, 1)).collect::>(); - let last_authorities = >::grandpa_authorities(); - if next_authorities != last_authorities { - if let Some((further_wait, median)) = >::take() { - let _ = Self::schedule_change(next_authorities, further_wait, Some(median)); - } else { - let _ = Self::schedule_change(next_authorities, Zero::zero(), None); - } + if let Some((further_wait, median)) = >::take() { + let _ = Self::schedule_change(next_authorities, further_wait, Some(median)); + } else { + let _ = Self::schedule_change(next_authorities, Zero::zero(), None); } - } + CurrentSetId::mutate(|s| { *s += 1; *s }) + } else { + // nothing's changed, neither economic conditions nor session keys. update the pointer + // of the current set. + Self::current_set_id() + }; + + // if we didn't issue a change, we update the mapping to note that the current + // set corresponds to the latest equivalent session (i.e. now). + let session_index = >::current_index(); + SetIdSession::insert(current_set_id, &session_index); } fn on_disabled(i: usize) { @@ -412,8 +436,8 @@ impl finality_tracker::OnFinalizationStalled for Modul #[derive(Copy, Clone, PartialOrd, Ord, Eq, PartialEq, Encode, Decode)] struct GrandpaTimeSlot { // The order of these matters for `derive(Ord)`. - set_id: u64, - round: u64, + set_id: SetId, + round: RoundNumber, } // TODO [slashing]: Integrate this. diff --git a/srml/im-online/src/lib.rs b/srml/im-online/src/lib.rs index 0a5d03770bb4d..06a350655bc9d 100644 --- a/srml/im-online/src/lib.rs +++ b/srml/im-online/src/lib.rs @@ -67,7 +67,7 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use app_crypto::RuntimeAppPublic; +use app_crypto::{AppPublic, RuntimeAppPublic}; use codec::{Encode, Decode}; use primitives::offchain::{OpaqueNetworkState, StorageKind}; use rstd::prelude::*; @@ -75,7 +75,7 @@ use session::historical::IdentificationTuple; use sr_io::Printable; use sr_primitives::{ Perbill, ApplyError, - traits::{Extrinsic as ExtrinsicT, Convert}, + traits::{Convert, Extrinsic as ExtrinsicT, Member}, transaction_validity::{TransactionValidity, TransactionLongevity, ValidTransaction}, }; use sr_staking_primitives::{ @@ -83,28 +83,44 @@ use sr_staking_primitives::{ offence::{ReportOffence, Offence, Kind}, }; use srml_support::{ - StorageValue, decl_module, decl_event, decl_storage, StorageDoubleMap, print, ensure + decl_module, decl_event, decl_storage, print, ensure, + Parameter, StorageValue, StorageDoubleMap, }; use system::ensure_none; -mod app { - pub use app_crypto::ed25519 as crypto; - use app_crypto::{app_crypto, key_types::IM_ONLINE, ed25519}; +pub mod sr25519 { + mod app_sr25519 { + use app_crypto::{app_crypto, key_types::IM_ONLINE, sr25519}; + app_crypto!(sr25519, IM_ONLINE); + } + + /// An i'm online keypair using sr25519 as its crypto. + #[cfg(feature = "std")] + pub type AuthorityPair = app_sr25519::Pair; - app_crypto!(ed25519, IM_ONLINE); + /// An i'm online signature using sr25519 as its crypto. + pub type AuthoritySignature = app_sr25519::Signature; + + /// An i'm online identifier using sr25519 as its crypto. + pub type AuthorityId = app_sr25519::Public; } -/// A Babe authority keypair. Necessarily equivalent to the schnorrkel public key used in -/// the main Babe module. If that ever changes, then this must, too. -#[cfg(feature = "std")] -pub type AuthorityPair = app::Pair; +pub mod ed25519 { + mod app_ed25519 { + use app_crypto::{app_crypto, key_types::IM_ONLINE, ed25519}; + app_crypto!(ed25519, IM_ONLINE); + } + + /// An i'm online keypair using ed25519 as its crypto. + #[cfg(feature = "std")] + pub type AuthorityPair = app_ed25519::Pair; -/// A Babe authority signature. -pub type AuthoritySignature = app::Signature; + /// An i'm online signature using ed25519 as its crypto. + pub type AuthoritySignature = app_ed25519::Signature; -/// A Babe authority identifier. Necessarily equivalent to the schnorrkel public key used in -/// the main Babe module. If that ever changes, then this must, too. -pub type AuthorityId = app::Public; + /// An i'm online identifier using ed25519 as its crypto. + pub type AuthorityId = app_ed25519::Public; +} // The local storage database key under which the worker progress status // is tracked. @@ -158,10 +174,13 @@ pub struct Heartbeat } pub trait Trait: system::Trait + session::historical::Trait { + /// The identifier type for an authority. + type AuthorityId: Member + Parameter + AppPublic + RuntimeAppPublic + Default; + /// The overarching event type. - type Event: From + Into<::Event>; + type Event: From> + Into<::Event>; - /// The function call. + /// A dispatchable call type. type Call: From>; /// A extrinsic right from the external world. This is unchecked and so @@ -181,7 +200,9 @@ pub trait Trait: system::Trait + session::historical::Trait { } decl_event!( - pub enum Event { + pub enum Event where + ::AuthorityId, + { /// A new heartbeat was received from `AuthorityId` HeartbeatReceived(AuthorityId), } @@ -193,7 +214,7 @@ decl_storage! { GossipAt get(gossip_at): T::BlockNumber; /// The current set of keys that may issue a heartbeat. - Keys get(keys): Vec; + Keys get(keys): Vec; /// For each session index we keep a mapping of `AuthorityId` /// to `offchain::OpaqueNetworkState`. @@ -201,10 +222,10 @@ decl_storage! { blake2_256(AuthIndex) => Vec; } add_extra_genesis { - config(keys): Vec; + config(keys): Vec; build(| storage: &mut (sr_primitives::StorageOverlay, sr_primitives::ChildrenStorageOverlay), - config: &GenesisConfig + config: &GenesisConfig | { sr_io::with_storage( storage, @@ -217,12 +238,12 @@ decl_storage! { decl_module! { pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; + fn deposit_event() = default; fn heartbeat( origin, heartbeat: Heartbeat, - signature: AuthoritySignature + signature: ::Signature ) { ensure_none(origin)?; @@ -232,7 +253,7 @@ decl_module! { ¤t_session, &heartbeat.authority_index ); - let keys = Keys::get(); + let keys = Keys::::get(); let public = keys.get(heartbeat.authority_index as usize); if let (true, Some(public)) = (!exists, public) { let signature_valid = heartbeat.using_encoded(|encoded_heartbeat| { @@ -240,7 +261,7 @@ decl_module! { }); ensure!(signature_valid, "Invalid heartbeat signature."); - Self::deposit_event(Event::HeartbeatReceived(public.clone())); + Self::deposit_event(Event::::HeartbeatReceived(public.clone())); let network_state = heartbeat.network_state.encode(); ::insert( @@ -297,8 +318,8 @@ impl Module { fn do_gossip_at(block_number: T::BlockNumber) -> Result<(), OffchainErr> { // we run only when a local authority key is configured - let authorities = Keys::get(); - let mut local_keys = app::Public::all(); + let authorities = Keys::::get(); + let mut local_keys = T::AuthorityId::all(); local_keys.sort(); for (authority_index, key) in authorities.into_iter() @@ -389,27 +410,27 @@ impl Module { } } - fn initialize_keys(keys: &[AuthorityId]) { + fn initialize_keys(keys: &[T::AuthorityId]) { if !keys.is_empty() { - assert!(Keys::get().is_empty(), "Keys are already initialized!"); - Keys::put_ref(keys); + assert!(Keys::::get().is_empty(), "Keys are already initialized!"); + Keys::::put_ref(keys); } } } impl session::OneSessionHandler for Module { - type Key = AuthorityId; + type Key = T::AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator + where I: Iterator { let keys = validators.map(|x| x.1).collect::>(); Self::initialize_keys(&keys); } fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, _queued_validators: I) - where I: Iterator + where I: Iterator { // Reset heartbeats ::remove_prefix(&>::current_index()); @@ -418,7 +439,7 @@ impl session::OneSessionHandler for Module { >::put(>::block_number()); // Remember who the authorities are for the new session. - Keys::put(validators.map(|x| x.1).collect::>()); + Keys::::put(validators.map(|x| x.1).collect::>()); } fn on_before_session_ending() { @@ -426,7 +447,7 @@ impl session::OneSessionHandler for Module { let current_session = >::current_index(); - let keys = Keys::get(); + let keys = Keys::::get(); let current_elected = T::CurrentElectedSet::current_elected_set(); // The invariant is that these two are of the same length. @@ -481,7 +502,7 @@ impl srml_support::unsigned::ValidateUnsigned for Module { } // verify that the incoming (unverified) pubkey is actually an authority id - let keys = Keys::get(); + let keys = Keys::::get(); let authority_id = match keys.get(heartbeat.authority_index as usize) { Some(id) => id, None => return TransactionValidity::Invalid(ApplyError::BadSignature as i8), diff --git a/srml/session/src/lib.rs b/srml/session/src/lib.rs index f8e13d529b875..c474962c650db 100644 --- a/srml/session/src/lib.rs +++ b/srml/session/src/lib.rs @@ -170,6 +170,12 @@ impl< pub trait OnSessionEnding { /// Handle the fact that the session is ending, and optionally provide the new validator set. /// + /// Even if the validator-set is the same as before, if any underlying economic + /// conditions have changed (i.e. stake-weights), the new validator set must be returned. + /// This is necessary for consensus engines making use of the session module to + /// issue a validator-set change so misbehavior can be provably associated with the new + /// economic conditions as opposed to the old. + /// /// `ending_index` is the index of the currently ending session. /// The returned validator set, if any, will not be applied until `will_apply_at`. /// `will_apply_at` is guaranteed to be at least `ending_index + 1`, since session indices don't @@ -192,7 +198,11 @@ pub trait SessionHandler { /// should provide the same validator set. fn on_genesis_session(validators: &[(ValidatorId, Ks)]); - /// Session set has changed; act appropriately. + /// Session set has changed; act appropriately. Note that this can be called + /// before initialization of your module. + /// + /// `changed` is true whenever any of the session keys or underlying economic + /// identities or weightings behind those keys has changed. fn on_new_session( changed: bool, validators: &[(ValidatorId, Ks)], @@ -217,11 +227,19 @@ pub trait OneSessionHandler { fn on_genesis_session<'a, I: 'a>(validators: I) where I: Iterator, ValidatorId: 'a; - /// Session set has changed; act appropriately. + /// Session set has changed; act appropriately. Note that this can be called + /// before initialization of your module. + /// + /// `changed` is true when at least one of the session keys + /// or the underlying economic identities/distribution behind one the + /// session keys has changed, false otherwise. + /// + /// The `validators` are the validators of the incoming session, and `queued_validators` + /// will follow. fn on_new_session<'a, I: 'a>( - _changed: bool, - _validators: I, - _queued_validators: I + changed: bool, + validators: I, + queued_validators: I, ) where I: Iterator, ValidatorId: 'a; @@ -341,10 +359,8 @@ decl_storage! { /// Current index of the session. CurrentIndex get(current_index): SessionIndex; - /// True if anything has changed in this session. - Changed: bool; - - /// Queued keys changed. + /// True if the underlying economic identities or weighting behind the validators + /// has changed in the queued validator set. QueuedChanged: bool; /// The queued keys for the next session. When the next session begins, these keys @@ -443,13 +459,10 @@ decl_module! { Self::do_set_keys(&who, keys)?; - // Something changed. - Changed::put(true); - Ok(()) } - /// Called when a block is finalized. Will rotate session if it is the last + /// Called when a block is initialized. Will rotate session if it is the last /// block of the current session. fn on_initialize(n: T::BlockNumber) { if T::ShouldEndSession::should_end_session(n) { @@ -467,7 +480,9 @@ impl Module { let session_index = CurrentIndex::get(); let changed = QueuedChanged::get(); - let mut next_changed = Changed::take(); + + // Inform the session handlers that a session is going to end. + T::SessionHandler::on_before_session_ending(); // Get queued session keys and validators. let session_keys = >::get(); @@ -479,12 +494,16 @@ impl Module { let applied_at = session_index + 2; // Get next validator set. - let maybe_validators = T::OnSessionEnding::on_session_ending(session_index, applied_at); - let next_validators = if let Some(validators) = maybe_validators { - next_changed = true; - validators + let maybe_next_validators = T::OnSessionEnding::on_session_ending(session_index, applied_at); + let (next_validators, next_identities_changed) + = if let Some(validators) = maybe_next_validators + { + // NOTE: as per the documentation on `OnSessionEnding`, we consider + // the validator set as having changed even if the validators are the + // same as before, as underlying economic conditions may have changed. + (validators, true) } else { - >::get() + (>::get(), false) }; // Increment session index. @@ -492,9 +511,34 @@ impl Module { CurrentIndex::put(session_index); // Queue next session keys. - let queued_amalgamated = next_validators.into_iter() - .map(|a| { let k = Self::load_keys(&a).unwrap_or_default(); (a, k) }) - .collect::>(); + let (queued_amalgamated, next_changed) = { + // until we are certain there has been a change, iterate the prior + // validators along with the current and check for changes + let mut changed = next_identities_changed; + + let mut now_session_keys = session_keys.iter(); + let mut check_next_changed = |keys: &T::Keys| { + if changed { return } + // since a new validator set always leads to `changed` starting + // as true, we can ensure that `now_session_keys` and `next_validators` + // have the same length. this function is called once per iteration. + if let Some(&(_, ref old_keys)) = now_session_keys.next() { + if old_keys != keys { + changed = true; + return + } + } + }; + let queued_amalgamated = next_validators.into_iter() + .map(|a| { + let k = Self::load_keys(&a).unwrap_or_default(); + check_next_changed(&k); + (a, k) + }) + .collect::>(); + + (queued_amalgamated, changed) + }; >::put(queued_amalgamated.clone()); QueuedChanged::put(next_changed); @@ -503,13 +547,16 @@ impl Module { Self::deposit_event(Event::NewSession(session_index)); // Tell everyone about the new session keys. - T::SessionHandler::on_new_session::(changed, &session_keys, &queued_amalgamated); + T::SessionHandler::on_new_session::( + changed, + &session_keys, + &queued_amalgamated, + ); } /// Disable the validator of index `i`. pub fn disable_index(i: usize) { T::SessionHandler::on_disabled(i); - Changed::put(true); } /// Disable the validator identified by `c`. (If using with the staking module, this would be @@ -554,8 +601,6 @@ impl Module { let key_data = old_keys.get_raw(id); Self::clear_key_owner(id, key_data); } - - Changed::put(true); } } @@ -621,6 +666,7 @@ mod tests { use mock::{ NEXT_VALIDATORS, SESSION_CHANGED, TEST_SESSION_CHANGED, authorities, force_new_session, set_next_validators, set_session_length, session_changed, Test, Origin, System, Session, + reset_before_session_end_called, before_session_end_called, }; fn new_test_ext() -> runtime_io::TestExternalities { @@ -668,13 +714,13 @@ mod tests { Session::on_free_balance_zero(&1); assert_eq!(Session::load_keys(&1), None); assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), None); - - assert!(Changed::get()); }) } #[test] fn authorities_should_track_validators() { + reset_before_session_end_called(); + with_externalities(&mut new_test_ext(), || { set_next_validators(vec![1, 2]); force_new_session(); @@ -685,6 +731,8 @@ mod tests { ]); assert_eq!(Session::validators(), vec![1, 2, 3]); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); + assert!(before_session_end_called()); + reset_before_session_end_called(); force_new_session(); initialize_block(2); @@ -694,6 +742,8 @@ mod tests { ]); assert_eq!(Session::validators(), vec![1, 2]); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); + assert!(before_session_end_called()); + reset_before_session_end_called(); set_next_validators(vec![1, 2, 4]); assert_ok!(Session::set_keys(Origin::signed(4), UintAuthorityId(4).into(), vec![])); @@ -706,6 +756,7 @@ mod tests { ]); assert_eq!(Session::validators(), vec![1, 2]); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); + assert!(before_session_end_called()); force_new_session(); initialize_block(4); @@ -787,38 +838,63 @@ mod tests { #[test] fn session_changed_flag_works() { + reset_before_session_end_called(); + with_externalities(&mut new_test_ext(), || { TEST_SESSION_CHANGED.with(|l| *l.borrow_mut() = true); force_new_session(); initialize_block(1); assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); force_new_session(); initialize_block(2); assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); Session::disable_index(0); force_new_session(); initialize_block(3); assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); force_new_session(); initialize_block(4); assert!(session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); force_new_session(); initialize_block(5); assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); assert_ok!(Session::set_keys(Origin::signed(2), UintAuthorityId(5).into(), vec![])); force_new_session(); initialize_block(6); assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + // changing the keys of a validator leads to change. + assert_ok!(Session::set_keys(Origin::signed(69), UintAuthorityId(69).into(), vec![])); force_new_session(); initialize_block(7); assert!(session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + // while changing the keys of a non-validator does not. + force_new_session(); + initialize_block(7); + assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); }); } diff --git a/srml/session/src/mock.rs b/srml/session/src/mock.rs index c5608e1a54571..445076be65202 100644 --- a/srml/session/src/mock.rs +++ b/srml/session/src/mock.rs @@ -44,6 +44,7 @@ impl_outer_origin! { } thread_local! { + pub static VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); pub static NEXT_VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); pub static AUTHORITIES: RefCell> = RefCell::new(vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); @@ -51,6 +52,9 @@ thread_local! { pub static SESSION_LENGTH: RefCell = RefCell::new(2); pub static SESSION_CHANGED: RefCell = RefCell::new(false); pub static TEST_SESSION_CHANGED: RefCell = RefCell::new(false); + pub static DISABLED: RefCell = RefCell::new(false); + // Stores if `on_before_session_end` was called + pub static BEFORE_SESSION_END_CALLED: RefCell = RefCell::new(false); } pub struct TestShouldEndSession; @@ -76,14 +80,27 @@ impl SessionHandler for TestSessionHandler { .collect() ); } - fn on_disabled(_validator_index: usize) {} + fn on_disabled(_validator_index: usize) { + DISABLED.with(|l| *l.borrow_mut() = true) + } + fn on_before_session_ending() { + BEFORE_SESSION_END_CALLED.with(|b| *b.borrow_mut() = true); + } } pub struct TestOnSessionEnding; impl OnSessionEnding for TestOnSessionEnding { fn on_session_ending(_: SessionIndex, _: SessionIndex) -> Option> { if !TEST_SESSION_CHANGED.with(|l| *l.borrow()) { - Some(NEXT_VALIDATORS.with(|l| l.borrow().clone())) + VALIDATORS.with(|v| { + let mut v = v.borrow_mut(); + *v = NEXT_VALIDATORS.with(|l| l.borrow().clone()); + Some(v.clone()) + }) + } else if DISABLED.with(|l| std::mem::replace(&mut *l.borrow_mut(), false)) { + // If there was a disabled validator, underlying conditions have changed + // so we return `Some`. + Some(VALIDATORS.with(|v| v.borrow().clone())) } else { None } @@ -92,16 +109,13 @@ impl OnSessionEnding for TestOnSessionEnding { #[cfg(feature = "historical")] impl crate::historical::OnSessionEnding for TestOnSessionEnding { - fn on_session_ending(_: SessionIndex, _: SessionIndex) + fn on_session_ending(ending_index: SessionIndex, will_apply_at: SessionIndex) -> Option<(Vec, Vec<(u64, u64)>)> { - if !TEST_SESSION_CHANGED.with(|l| *l.borrow()) { - let last_validators = Session::validators(); - let last_identifications = last_validators.into_iter().map(|v| (v, v)).collect(); - Some((NEXT_VALIDATORS.with(|l| l.borrow().clone()), last_identifications)) - } else { - None - } + let pair_with_ids = |vals: &[u64]| vals.iter().map(|&v| (v, v)).collect::>(); + >::on_session_ending(ending_index, will_apply_at) + .map(|vals| (pair_with_ids(&vals), vals)) + .map(|(ids, vals)| (vals, ids)) } } @@ -125,8 +139,17 @@ pub fn set_next_validators(next: Vec) { NEXT_VALIDATORS.with(|v| *v.borrow_mut() = next); } +pub fn before_session_end_called() -> bool { + BEFORE_SESSION_END_CALLED.with(|b| *b.borrow()) +} + +pub fn reset_before_session_end_called() { + BEFORE_SESSION_END_CALLED.with(|b| *b.borrow_mut() = false); +} + #[derive(Clone, Eq, PartialEq)] pub struct Test; + parameter_types! { pub const BlockHashCount: u64 = 250; pub const MaximumBlockWeight: u32 = 1024; diff --git a/srml/staking/src/lib.rs b/srml/staking/src/lib.rs index e7bb42c64dfe5..2e46d6b2b4a49 100644 --- a/srml/staking/src/lib.rs +++ b/srml/staking/src/lib.rs @@ -465,9 +465,6 @@ type ExpoMap = BTreeMap< Exposure<::AccountId, BalanceOf> >; -pub const DEFAULT_SESSIONS_PER_ERA: u32 = 3; -pub const DEFAULT_BONDING_DURATION: u32 = 1; - /// Means for interacting with a specialized version of the `session` trait. /// /// This is needed because `Staking` sets the `ValidatorIdOf` of the `session::Trait` @@ -1182,7 +1179,7 @@ impl Module { let rewards = CurrentEraRewards::take(); let now = T::Time::now(); let previous_era_start = >::mutate(|v| { - rstd::mem::replace(v, now.clone()) + rstd::mem::replace(v, now) }); let era_duration = now - previous_era_start; if !era_duration.is_zero() { @@ -1355,6 +1352,10 @@ impl Module { // Set the new validator set in sessions. >::put(&elected_stashes); + // In order to keep the property required by `n_session_ending` + // that we must return the new validator set even if it's the same as the old, + // as long as any underlying economic conditions have changed, we don't attempt + // to do any optimization where we compare against the prior set. (slot_stake, Some(elected_stashes)) } else { // There were not enough candidates for even our minimal level of functionality. diff --git a/srml/support/Cargo.toml b/srml/support/Cargo.toml index 2befc11b19538..cf8c90993aef6 100644 --- a/srml/support/Cargo.toml +++ b/srml/support/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] serde = { version = "1.0", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.0.5", default-features = false, features = ["derive"] } srml-metadata = { path = "../metadata", default-features = false } sr-std = { path = "../../core/sr-std", default-features = false } runtime_io = { package = "sr-io", path = "../../core/sr-io", default-features = false } diff --git a/srml/support/procedural/src/storage/impls.rs b/srml/support/procedural/src/storage/impls.rs index dd93663673514..d67bc5a116e9d 100644 --- a/srml/support/procedural/src/storage/impls.rs +++ b/srml/support/procedural/src/storage/impls.rs @@ -43,6 +43,8 @@ pub(crate) struct Impls<'a, I: Iterator> { pub instance_opts: &'a InstanceOpts, pub type_infos: DeclStorageTypeInfos<'a>, pub fielddefault: TokenStream2, + pub default_delegator_ident: syn::Ident, + pub default_delegator_return: TokenStream2, pub prefix: String, pub cratename: &'a syn::Ident, pub name: &'a syn::Ident, @@ -60,6 +62,8 @@ impl<'a, I: Iterator> Impls<'a, I> { instance_opts, type_infos, fielddefault, + default_delegator_ident, + default_delegator_return, prefix, name, attrs, @@ -116,6 +120,17 @@ impl<'a, I: Iterator> Impls<'a, I> { // generator for value quote! { + #visibility struct #default_delegator_ident<#struct_trait>( + #scrate::rstd::marker::PhantomData<(#trait_and_instance)> + ) #where_clause; + impl<#impl_trait> #scrate::traits::StorageDefault<#typ> + for #default_delegator_ident<#trait_and_instance> #where_clause + { + fn default() -> Option<#typ> { + #default_delegator_return + } + } + #( #[ #attrs ] )* #visibility struct #name<#struct_trait>( #scrate::rstd::marker::PhantomData<(#trait_and_instance)> @@ -125,6 +140,7 @@ impl<'a, I: Iterator> Impls<'a, I> { for #name<#trait_and_instance> #where_clause { type Query = #value_type; + type Default = #default_delegator_ident<#trait_and_instance>; /// Get the storage key. fn key() -> &'static [u8] { @@ -168,6 +184,8 @@ impl<'a, I: Iterator> Impls<'a, I> { instance_opts, type_infos, fielddefault, + default_delegator_ident, + default_delegator_return, prefix, name, attrs, @@ -230,6 +248,17 @@ impl<'a, I: Iterator> Impls<'a, I> { // generator for map quote!{ + #visibility struct #default_delegator_ident<#struct_trait>( + #scrate::rstd::marker::PhantomData<(#trait_and_instance)> + ) #where_clause; + impl<#impl_trait> #scrate::traits::StorageDefault<#typ> + for #default_delegator_ident<#trait_and_instance> #where_clause + { + fn default() -> Option<#typ> { + #default_delegator_return + } + } + #( #[ #attrs ] )* #visibility struct #name<#struct_trait>( #scrate::rstd::marker::PhantomData<(#trait_and_instance)> @@ -239,8 +268,8 @@ impl<'a, I: Iterator> Impls<'a, I> { for #name<#trait_and_instance> #where_clause { type Query = #value_type; - type Hasher = #scrate::#hasher; + type Default = #default_delegator_ident<#trait_and_instance>; /// Get the prefix key in storage. fn prefix() -> &'static [u8] { @@ -283,6 +312,10 @@ impl<'a, I: Iterator> Impls<'a, I> { impl<#impl_trait> #scrate::storage::hashed::generator::AppendableStorageMap<#kty, #typ> for #name<#trait_and_instance> #where_clause {} + + impl<#impl_trait> #scrate::storage::hashed::generator::DecodeLengthStorageMap<#kty, #typ> + for #name<#trait_and_instance> #where_clause + {} } } @@ -295,6 +328,8 @@ impl<'a, I: Iterator> Impls<'a, I> { instance_opts, type_infos, fielddefault, + default_delegator_ident, + default_delegator_return, prefix, name, attrs, @@ -567,12 +602,23 @@ impl<'a, I: Iterator> Impls<'a, I> { #structure + #visibility struct #default_delegator_ident<#struct_trait>( + #scrate::rstd::marker::PhantomData<(#trait_and_instance)> + ) #where_clause; + impl<#impl_trait> #scrate::traits::StorageDefault<#typ> + for #default_delegator_ident<#trait_and_instance> #where_clause + { + fn default() -> Option<#typ> { + #default_delegator_return + } + } + impl<#impl_trait> #scrate::storage::hashed::generator::StorageMap<#kty, #typ> for #name<#trait_and_instance> #where_clause { type Query = #value_type; - type Hasher = #scrate::#hasher; + type Default = #default_delegator_ident<#trait_and_instance>; /// Get the prefix key in storage. fn prefix() -> &'static [u8] { @@ -666,6 +712,42 @@ impl<'a, I: Iterator> Impls<'a, I> { #mutate_impl; ret } + + // Swap must be overriden not to break links. + fn swap>( + key1: &#kty, + key2: &#kty, + storage: &mut S, + ) { + use self::#inner_module::Utils; + + let final_key1 = &*#as_map::key_for(key1); + let final_key2 = &*#as_map::key_for(key2); + let full_value_1 = Self::read_with_linkage(storage, final_key1); + let full_value_2 = Self::read_with_linkage(storage, final_key2); + + match (full_value_1, full_value_2) { + // Just keep linkage in order and only swap values. + (Some((value1, linkage1)), Some((value2, linkage2))) => { + storage.put(final_key1, &(value2, linkage1)); + storage.put(final_key2, &(value1, linkage2)); + } + // Remove key and insert the new one. + (Some((value, linkage)), None) => { + #as_map::remove(key1, storage); + let linkage = Self::new_head_linkage(storage, key2); + storage.put(final_key2, &(value, linkage)); + } + // Remove key and insert the new one. + (None, Some((value, linkage))) => { + #as_map::remove(key2, storage); + let linkage = Self::new_head_linkage(storage, key1); + storage.put(final_key1, &(value, linkage)); + } + // No-op. + (None, None) => (), + } + } } impl<#impl_trait> #scrate::storage::hashed::generator::EnumerableStorageMap<#kty, #typ> @@ -694,6 +776,10 @@ impl<'a, I: Iterator> Impls<'a, I> { }) } } + + impl<#impl_trait> #scrate::storage::hashed::generator::DecodeLengthStorageMap<#kty, #typ> + for #name<#trait_and_instance> #where_clause + {} } } diff --git a/srml/support/procedural/src/storage/transformation.rs b/srml/support/procedural/src/storage/transformation.rs index d378907cb1e72..039bf8c8f611b 100644 --- a/srml/support/procedural/src/storage/transformation.rs +++ b/srml/support/procedural/src/storage/transformation.rs @@ -35,7 +35,7 @@ use syn::{ }, parse_macro_input, }; -use quote::quote; +use quote::{quote, quote_spanned}; use super::*; @@ -417,7 +417,16 @@ fn decl_store_extra_genesis( } assimilate_require_generic |= ext::expr_contains_ident(&expr.content, traitinstance); let content = &expr.content; - scall = quote!( ( #content ) ); + scall = quote_spanned! { expr.span() => + let scall: fn( + &mut ( + #scrate::sr_primitives::StorageOverlay, + #scrate::sr_primitives::ChildrenStorageOverlay + ), + &Self + ) = #content; + scall + }; has_scall = true; }, } @@ -753,10 +762,26 @@ fn decl_storage_items( } = sline; let type_infos = get_type_infos(storage_type); + let fielddefault = default_value.inner + .as_ref() + .map(|d| &d.expr) + .map(|d| quote!( #d )) + .unwrap_or_else(|| quote!{ Default::default() }); let kind = type_infos.kind.clone(); // Propagate doc attributes. let attrs = attrs.inner.iter().filter_map(|a| a.parse_meta().ok()).filter(|m| m.name() == "doc"); + // create default value delegator + let default_delegator_ident = Ident::new( + &format!("{}{}", name.to_string(), "DefaultDelegator"), + proc_macro2::Span::call_site(), + ); + let default_delegator_return = if !type_infos.is_option { + quote! { Some(#fielddefault) } + } else { + quote! { #fielddefault } + }; + let i = impls::Impls { scrate, visibility, @@ -765,8 +790,9 @@ fn decl_storage_items( traittype, instance_opts, type_infos, - fielddefault: default_value.inner.as_ref().map(|d| &d.expr).map(|d| quote!( #d )) - .unwrap_or_else(|| quote!{ Default::default() }), + fielddefault, + default_delegator_ident, + default_delegator_return, prefix: build_prefix(cratename, name), name, attrs, diff --git a/srml/support/src/lib.rs b/srml/support/src/lib.rs index 5623cab70f2ab..9ebd848fca4c4 100644 --- a/srml/support/src/lib.rs +++ b/srml/support/src/lib.rs @@ -62,7 +62,8 @@ mod double_map; pub mod traits; pub use self::storage::{ - StorageValue, StorageMap, EnumerableStorageMap, StorageDoubleMap, AppendableStorageMap + StorageValue, StorageMap, EnumerableStorageMap, StorageDoubleMap, AppendableStorageMap, + DecodeLengthStorageMap, }; pub use self::hashable::Hashable; pub use self::dispatch::{Parameter, Dispatchable, Callable, IsSubType}; @@ -342,6 +343,35 @@ mod tests { }); } + #[test] + fn linked_map_swap_works() { + with_externalities(&mut new_test_ext(), || { + OptionLinkedMap::insert(0, 0); + OptionLinkedMap::insert(1, 1); + OptionLinkedMap::insert(2, 2); + OptionLinkedMap::insert(3, 3); + + let collect = || OptionLinkedMap::enumerate().collect::>(); + assert_eq!(collect(), vec![(3, 3), (2, 2), (1, 1), (0, 0)]); + + // Two existing + OptionLinkedMap::swap(1, 2); + assert_eq!(collect(), vec![(3, 3), (2, 1), (1, 2), (0, 0)]); + + // Back to normal + OptionLinkedMap::swap(2, 1); + assert_eq!(collect(), vec![(3, 3), (2, 2), (1, 1), (0, 0)]); + + // Left existing + OptionLinkedMap::swap(2, 5); + assert_eq!(collect(), vec![(5, 2), (3, 3), (1, 1), (0, 0)]); + + // Right existing + OptionLinkedMap::swap(5, 2); + assert_eq!(collect(), vec![(2, 2), (3, 3), (1, 1), (0, 0)]); + }); + } + #[test] fn linked_map_basic_insert_remove_should_work() { with_externalities(&mut new_test_ext(), || { diff --git a/srml/support/src/storage/hashed/generator.rs b/srml/support/src/storage/hashed/generator.rs index f8b8fb5483e1e..27b459e3ffa0e 100644 --- a/srml/support/src/storage/hashed/generator.rs +++ b/srml/support/src/storage/hashed/generator.rs @@ -14,12 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! Abstract storage to use on HashedStorage trait +//! Abstract storage to use on HashedStorage trait. Please refer to the +//! [top level docs](../../index.html) for more detailed documentation about storage traits and functions. use crate::codec::{self, Encode}; -use crate::rstd::prelude::{Vec, Box}; +use crate::rstd::{prelude::{Vec, Box}, iter::FromIterator}; #[cfg(feature = "std")] use crate::storage::unhashed::generator::UnhashedStorage; +use crate::traits::{StorageDefault, Len}; use runtime_io::{twox_64, twox_128, blake2_128, twox_256, blake2_256}; pub trait StorageHasher: 'static { @@ -164,6 +166,9 @@ impl HashedStorage for sr_primitives::StorageOverlay { pub trait StorageValue { /// The type that get/take returns. type Query; + /// Something that can provide the default value of this storage type. + type Default: StorageDefault; + /// Get the storage key. fn key() -> &'static [u8]; @@ -202,24 +207,75 @@ pub trait StorageValue { /// Append the given items to the value in the storage. /// /// `T` is required to implement `codec::EncodeAppend`. - fn append, I: codec::Encode>( - items: &[I], storage: &mut S - ) -> Result<(), &'static str> where T: codec::EncodeAppend { + fn append<'a, S, I, R>( + items: R, + storage: &mut S, + ) -> Result<(), &'static str> where + S: HashedStorage, + I: 'a + codec::Encode, + T: codec::EncodeAppend, + R: IntoIterator, + R::IntoIter: ExactSizeIterator, + { let new_val = ::append( - storage.get_raw(Self::key()).unwrap_or_default(), + // if the key exists, directly append to it. + storage.get_raw(Self::key()).unwrap_or_else(|| { + // otherwise, try and read a proper __provided__ default. + Self::Default::default().map(|v| v.encode()) + // or just use the Rust's `default()` value. + .unwrap_or_default() + }), items, ).map_err(|_| "Could not append given item")?; storage.put_raw(Self::key(), &new_val); Ok(()) } + + /// Safely append the given items to the value in the storage. If a codec error occurs, then the + /// old (presumably corrupt) value is replaced with the given `items`. + /// + /// `T` is required to implement `codec::EncodeAppend`. + fn append_or_put<'a, S, I, R>( + items: R, + storage: &mut S, + ) where + S: HashedStorage, + I: 'a + codec::Encode + Clone, + T: codec::EncodeAppend + FromIterator, + R: IntoIterator + Clone, + R::IntoIter: ExactSizeIterator, + { + Self::append(items.clone(), storage) + .unwrap_or_else(|_| Self::put(&items.into_iter().cloned().collect(), storage)); + } + + /// Read the length of the value in a fast way, without decoding the entire value. + /// + /// `T` is required to implement `Codec::DecodeLength`. + /// + /// Note that `0` is returned as the default value if no encoded value exists at the given key. + /// Therefore, this function cannot be used as a sign of _existence_. use the `::exists()` + /// function for this purpose. + fn decode_len>(storage: &mut S) -> Result + where T: codec::DecodeLength, T: Len + { + // attempt to get the length directly. + if let Some(k) = storage.get_raw(Self::key()) { + ::len(&k).map_err(|e| e.what()) + } else { + Ok(Self::Default::default().map(|v| v.len()).unwrap_or(0)) + } + } } /// A strongly-typed map in storage. pub trait StorageMap { /// The type that get/take returns. type Query; - + /// Hasher type type Hasher: StorageHasher; + /// Something that can provide the default value of this storage type. + type Default: StorageDefault; /// Get the prefix key in storage. fn prefix() -> &'static [u8]; @@ -295,16 +351,69 @@ pub trait EnumerableStorageMap: StorageMap: StorageMap { /// Append the given items to the value in the storage. /// - /// `T` is required to implement `codec::EncodeAppend`. - fn append, I: codec::Encode>( - key : &K, items: &[I], storage: &mut S - ) -> Result<(), &'static str> where V: codec::EncodeAppend { + /// `V` is required to implement `codec::EncodeAppend`. + fn append<'a, S, I, R>( + key : &K, + items: R, + storage: &mut S, + ) -> Result<(), &'static str> where + S: HashedStorage, + I: 'a + codec::Encode, + V: codec::EncodeAppend, + R: IntoIterator + Clone, + R::IntoIter: ExactSizeIterator, + { let k = Self::key_for(key); let new_val = ::append( - storage.get_raw(&k[..]).unwrap_or_default(), + storage.get_raw(&k[..]).unwrap_or_else(|| { + // otherwise, try and read a proper __provided__ default. + Self::Default::default().map(|v| v.encode()) + // or just use the default value. + .unwrap_or_default() + }), items, ).map_err(|_| "Could not append given item")?; storage.put_raw(&k[..], &new_val); Ok(()) } + + /// Safely append the given items to the value in the storage. If a codec error occurs, then the + /// old (presumably corrupt) value is replaced with the given `items`. + /// + /// `T` is required to implement `codec::EncodeAppend`. + fn append_or_insert<'a, S, I, R>( + key : &K, + items: R, + storage: &mut S, + ) where + S: HashedStorage, + I: 'a + codec::Encode + Clone, + V: codec::EncodeAppend + crate::rstd::iter::FromIterator, + R: IntoIterator + Clone, + R::IntoIter: ExactSizeIterator, + { + Self::append(key, items.clone(), storage) + .unwrap_or_else(|_| Self::insert(key, &items.into_iter().cloned().collect(), storage)); + } +} + +/// A storage map with a decodable length. +pub trait DecodeLengthStorageMap: StorageMap { + /// Read the length of the value in a fast way, without decoding the entire value. + /// + /// `T` is required to implement `Codec::DecodeLength`. + /// + /// Note that `0` is returned as the default value if no encoded value exists at the given key. + /// Therefore, this function cannot be used as a sign of _existence_. use the `::exists()` + /// function for this purpose. + fn decode_len>(key: &K, storage: &mut S) -> Result + where V: codec::DecodeLength, V: Len + { + let k = Self::key_for(key); + if let Some(v) = storage.get_raw(&k[..]) { + ::len(&v).map_err(|e| e.what()) + } else { + Ok(Self::Default::default().map(|v| v.len()).unwrap_or(0)) + } + } } diff --git a/srml/support/src/storage/mod.rs b/srml/support/src/storage/mod.rs index 385fad42eb260..aa3faee87852d 100644 --- a/srml/support/src/storage/mod.rs +++ b/srml/support/src/storage/mod.rs @@ -17,10 +17,11 @@ //! Stuff to do with the runtime's storage. use crate::rstd::prelude::*; -use crate::rstd::borrow::Borrow; +use crate::rstd::{borrow::Borrow, iter::FromIterator}; use codec::{Codec, Encode, Decode, KeyedVec, EncodeAppend}; use hashed::generator::{HashedStorage, StorageHasher}; use unhashed::generator::UnhashedStorage; +use crate::traits::{StorageDefault, Len}; #[macro_use] pub mod storage_items; @@ -107,6 +108,8 @@ impl UnhashedStorage for RuntimeStorage { pub trait StorageValue { /// The type that get/take return. type Query; + /// Something that can provide the default value of this storage type. + type Default: StorageDefault; /// Get the storage key. fn key() -> &'static [u8]; @@ -136,12 +139,39 @@ pub trait StorageValue { /// Append the given item to the value in the storage. /// /// `T` is required to implement `codec::EncodeAppend`. - fn append(items: &[I]) -> Result<(), &'static str> - where T: EncodeAppend; + fn append<'a, I, R>(items: R) -> Result<(), &'static str> where + I: 'a + Encode, + T: EncodeAppend, + R: IntoIterator, + R::IntoIter: ExactSizeIterator; + + /// Append the given items to the value in the storage. + /// + /// `T` is required to implement `Codec::EncodeAppend`. + /// + /// Upon any failure, it replaces `items` as the new value (assuming that the previous stored + /// data is simply corrupt and no longer usable). + /// + /// ### WARNING + /// + /// use with care; if your use-case is not _exactly_ as what this function is doing, + /// you should use append and sensibly handle failure within the runtime code if it happens. + fn append_or_put<'a, I, R>(items: R) where + I: 'a + Encode + Clone, + T: EncodeAppend + FromIterator, + R: IntoIterator + Clone, + R::IntoIter: ExactSizeIterator; + + /// Read the length of the value in a fast way, without decoding the entire value. + /// + /// `T` is required to implement `Codec::DecodeLength`. + fn decode_len() -> Result + where T: codec::DecodeLength, T: Len; } impl StorageValue for U where U: hashed::generator::StorageValue { type Query = U::Query; + type Default = U::Default; fn key() -> &'static [u8] { >::key() @@ -167,17 +197,35 @@ impl StorageValue for U where U: hashed::generator::StorageValue fn take() -> Self::Query { U::take(&mut RuntimeStorage) } - fn append(items: &[I]) -> Result<(), &'static str> - where T: EncodeAppend + fn append<'a, I, R>(items: R) -> Result<(), &'static str> where + I: 'a + Encode, + T: EncodeAppend, + R: IntoIterator, + R::IntoIter: ExactSizeIterator, { U::append(items, &mut RuntimeStorage) } + fn append_or_put<'a, I, R>(items: R) where + I: 'a + Encode + Clone, + T: EncodeAppend + FromIterator, + R: IntoIterator + Clone, + R::IntoIter: ExactSizeIterator, + { + U::append_or_put(items, &mut RuntimeStorage) + } + fn decode_len() -> Result + where T: codec::DecodeLength, T: Len + { + U::decode_len(&mut RuntimeStorage) + } } /// A strongly-typed map in storage. pub trait StorageMap { /// The type that get/take return. type Query; + /// Something that can provide the default value of this storage type. + type Default: StorageDefault; /// Get the prefix key in storage. fn prefix() -> &'static [u8]; @@ -213,6 +261,7 @@ pub trait StorageMap { impl StorageMap for U where U: hashed::generator::StorageMap { type Query = U::Query; + type Default = U::Default; fn prefix() -> &'static [u8] { >::prefix() @@ -260,18 +309,85 @@ pub trait AppendableStorageMap: StorageMap { /// Append the given item to the value in the storage. /// /// `T` is required to implement `codec::EncodeAppend`. - fn append, I: Encode>(key: KeyArg, items: &[I]) -> Result<(), &'static str> - where V: EncodeAppend; + fn append<'a, KeyArg, I, R>( + key: KeyArg, + items: R, + ) -> Result<(), &'static str> where + KeyArg: Borrow, + I: 'a + codec::Encode, + V: EncodeAppend, + R: IntoIterator + Clone, + R::IntoIter: ExactSizeIterator; + + /// Append the given items to the value in the storage. + /// + /// `T` is required to implement `codec::EncodeAppend`. + /// + /// Upon any failure, it replaces `items` as the new value (assuming that the previous stored + /// data is simply corrupt and no longer usable). + /// + /// WARNING: use with care; if your use-case is not _exactly_ as what this function is doing, + /// you should use append and sensibly handle failure within the runtime code if it happens. + fn append_or_insert<'a, KeyArg, I, R>( + key: KeyArg, + items: R, + ) where + KeyArg: Borrow, + I: 'a + codec::Encode + Clone, + V: codec::EncodeAppend + FromIterator, + R: IntoIterator + Clone, + R::IntoIter: ExactSizeIterator; } impl AppendableStorageMap for U where U: hashed::generator::AppendableStorageMap { - fn append, I: Encode>(key: KeyArg, items: &[I]) -> Result<(), &'static str> - where V: EncodeAppend + fn append<'a, KeyArg, I, R>( + key: KeyArg, + items: R, + ) -> Result<(), &'static str> where + KeyArg: Borrow, + I: 'a + codec::Encode, + V: EncodeAppend, + R: IntoIterator + Clone, + R::IntoIter: ExactSizeIterator, { U::append(key.borrow(), items, &mut RuntimeStorage) } + + fn append_or_insert<'a, KeyArg, I, R>( + key: KeyArg, + items: R, + ) where + KeyArg: Borrow, + I: 'a + codec::Encode + Clone, + V: codec::EncodeAppend + FromIterator, + R: IntoIterator + Clone, + R::IntoIter: ExactSizeIterator, + { + U::append_or_insert(key.borrow(), items, &mut RuntimeStorage) + } +} + +/// A storage map with a decodable length. +pub trait DecodeLengthStorageMap: StorageMap { + /// Read the length of the value in a fast way, without decoding the entire value. + /// + /// `T` is required to implement `Codec::DecodeLength`. + /// + /// Has the same logic as [`StorageValue`](trait.StorageValue.html). + fn decode_len>(key: KeyArg) -> Result + where V: codec::DecodeLength, V: Len; +} + +impl DecodeLengthStorageMap for U + where U: hashed::generator::DecodeLengthStorageMap +{ + fn decode_len>(key: KeyArg) -> Result + where V: codec::DecodeLength, V: Len + { + U::decode_len(key.borrow(), &mut RuntimeStorage) + } } /// A storage map that can be enumerated. diff --git a/srml/support/src/storage/storage_items.rs b/srml/support/src/storage/storage_items.rs index 06cb8fc55b5cd..a2a5c3229fa00 100644 --- a/srml/support/src/storage/storage_items.rs +++ b/srml/support/src/storage/storage_items.rs @@ -172,6 +172,7 @@ macro_rules! __storage_items_internal { impl $crate::storage::hashed::generator::StorageValue<$ty> for $name { type Query = $gettype; + type Default = (); /// Get the storage key. fn key() -> &'static [u8] { @@ -221,8 +222,8 @@ macro_rules! __storage_items_internal { impl $crate::storage::hashed::generator::StorageMap<$kty, $ty> for $name { type Query = $gettype; - type Hasher = $crate::Blake2_256; + type Default = (); /// Get the prefix key in storage. fn prefix() -> &'static [u8] { @@ -795,18 +796,41 @@ mod test3 { #[cfg(test)] #[allow(dead_code)] -mod test_map_vec_append { +mod test_append_and_len { + use crate::storage::{AppendableStorageMap, DecodeLengthStorageMap, StorageMap, StorageValue}; + use runtime_io::{with_externalities, TestExternalities}; + use codec::{Encode, Decode}; + pub trait Trait { type Origin; type BlockNumber; } + decl_module! { pub struct Module for enum Call where origin: T::Origin {} } + + #[derive(PartialEq, Eq, Clone, Encode, Decode)] + struct NoDef(u32); + crate::decl_storage! { trait Store for Module as Test { + NoDefault: Option; + JustVec: Vec; + JustVecWithDefault: Vec = vec![6, 9]; + OptionVec: Option>; + OptionVecWithDefault: Option> = Some(vec![6, 9]); + MapVec: map u32 => Vec; + MapVecWithDefault: map u32 => Vec = vec![6, 9]; + OptionMapVec: map u32 => Option>; + OptionMapVecWithDefault: map u32 => Option> = Some(vec![6, 9]); + + LinkedMapVec: linked_map u32 => Vec; + LinkedMapVecWithDefault: linked_map u32 => Vec = vec![6, 9]; + OptionLinkedMapVec: linked_map u32 => Option>; + OptionLinkedMapVecWithDefault: linked_map u32 => Option> = Some(vec![6, 9]); } } @@ -817,21 +841,116 @@ mod test_map_vec_append { type BlockNumber = u32; } + #[test] + fn default_for_option() { + with_externalities(&mut TestExternalities::default(), || { + assert_eq!(OptionVecWithDefault::get(), Some(vec![6, 9])); + assert_eq!(OptionVec::get(), None); + assert_eq!(JustVec::get(), vec![]); + }); + } + #[test] fn append_works() { - use crate::storage::{AppendableStorageMap, StorageMap, StorageValue}; - use runtime_io::{with_externalities, TestExternalities}; + with_externalities(&mut TestExternalities::default(), || { + let _ = MapVec::append(1, [1, 2, 3].iter()); + let _ = MapVec::append(1, [4, 5].iter()); + assert_eq!(MapVec::get(1), vec![1, 2, 3, 4, 5]); + + let _ = JustVec::append([1, 2, 3].iter()); + let _ = JustVec::append([4, 5].iter()); + assert_eq!(JustVec::get(), vec![1, 2, 3, 4, 5]); + }); + } + + #[test] + fn append_works_for_default() { + with_externalities(&mut TestExternalities::default(), || { + assert_eq!(JustVecWithDefault::get(), vec![6, 9]); + let _ = JustVecWithDefault::append([1].iter()); + assert_eq!(JustVecWithDefault::get(), vec![6, 9, 1]); + + assert_eq!(MapVecWithDefault::get(0), vec![6, 9]); + let _ = MapVecWithDefault::append(0, [1].iter()); + assert_eq!(MapVecWithDefault::get(0), vec![6, 9, 1]); + + assert_eq!(OptionVec::get(), None); + let _ = OptionVec::append([1].iter()); + assert_eq!(OptionVec::get(), Some(vec![1])); + }); + } + #[test] + fn append_or_put_works() { with_externalities(&mut TestExternalities::default(), || { - let _ = MapVec::append(1, &[1, 2, 3]); - let _ = MapVec::append(1, &[4, 5]); + let _ = MapVec::append_or_insert(1, [1, 2, 3].iter()); + let _ = MapVec::append_or_insert(1, [4, 5].iter()); assert_eq!(MapVec::get(1), vec![1, 2, 3, 4, 5]); - let _ = JustVec::append(&[1, 2, 3]); - let _ = JustVec::append(&[4, 5]); + let _ = JustVec::append_or_put([1, 2, 3].iter()); + let _ = JustVec::append_or_put([4, 5].iter()); assert_eq!(JustVec::get(), vec![1, 2, 3, 4, 5]); }); } + + #[test] + fn len_works() { + with_externalities(&mut TestExternalities::default(), || { + JustVec::put(&vec![1, 2, 3, 4]); + OptionVec::put(&vec![1, 2, 3, 4, 5]); + MapVec::insert(1, &vec![1, 2, 3, 4, 5, 6]); + LinkedMapVec::insert(2, &vec![1, 2, 3]); + + assert_eq!(JustVec::decode_len().unwrap(), 4); + assert_eq!(OptionVec::decode_len().unwrap(), 5); + assert_eq!(MapVec::decode_len(1).unwrap(), 6); + assert_eq!(LinkedMapVec::decode_len(2).unwrap(), 3); + }); + } + + #[test] + fn len_works_for_default() { + with_externalities(&mut TestExternalities::default(), || { + // vec + assert_eq!(JustVec::get(), vec![]); + assert_eq!(JustVec::decode_len(), Ok(0)); + + assert_eq!(JustVecWithDefault::get(), vec![6, 9]); + assert_eq!(JustVecWithDefault::decode_len(), Ok(2)); + + assert_eq!(OptionVec::get(), None); + assert_eq!(OptionVec::decode_len(), Ok(0)); + + assert_eq!(OptionVecWithDefault::get(), Some(vec![6, 9])); + assert_eq!(OptionVecWithDefault::decode_len(), Ok(2)); + + // map + assert_eq!(MapVec::get(0), vec![]); + assert_eq!(MapVec::decode_len(0), Ok(0)); + + assert_eq!(MapVecWithDefault::get(0), vec![6, 9]); + assert_eq!(MapVecWithDefault::decode_len(0), Ok(2)); + + assert_eq!(OptionMapVec::get(0), None); + assert_eq!(OptionMapVec::decode_len(0), Ok(0)); + + assert_eq!(OptionMapVecWithDefault::get(0), Some(vec![6, 9])); + assert_eq!(OptionMapVecWithDefault::decode_len(0), Ok(2)); + + // linked map + assert_eq!(LinkedMapVec::get(0), vec![]); + assert_eq!(LinkedMapVec::decode_len(0), Ok(0)); + + assert_eq!(LinkedMapVecWithDefault::get(0), vec![6, 9]); + assert_eq!(LinkedMapVecWithDefault::decode_len(0), Ok(2)); + + assert_eq!(OptionLinkedMapVec::get(0), None); + assert_eq!(OptionLinkedMapVec::decode_len(0), Ok(0)); + + assert_eq!(OptionLinkedMapVecWithDefault::get(0), Some(vec![6, 9])); + assert_eq!(OptionLinkedMapVecWithDefault::decode_len(0), Ok(2)); + }); + } } diff --git a/srml/support/src/traits.rs b/srml/support/src/traits.rs index cca9fd2b10376..a5e52f5f66c4f 100644 --- a/srml/support/src/traits.rs +++ b/srml/support/src/traits.rs @@ -26,6 +26,28 @@ use crate::sr_primitives::ConsensusEngineId; use super::for_each_tuple; +/// A trait that can return the default value of a storage item. This must only ever be implemented +/// for a special delegator struct for each storage item +pub trait StorageDefault: Sized { + /// Return the default value of type `V`. `None`, if `V` does not have a proper default value. + fn default() -> Option; +} + +// FIXME #1466 This is needed for `storage_items!`. Should be removed once it is deprecated. +impl StorageDefault for () { fn default() -> Option { Some(Default::default()) } } + +/// Anything that can have a `::len()` method. +pub trait Len { + /// Return the length of data type. + fn len(&self) -> usize; +} + +impl Len for T where ::IntoIter: ExactSizeIterator { + fn len(&self) -> usize { + self.clone().into_iter().len() + } +} + /// A trait for querying a single fixed value from a type. pub trait Get { /// Return a constant value. @@ -620,7 +642,7 @@ bitmask! { } pub trait Time { - type Moment: SimpleArithmetic + Codec + Clone + Default; + type Moment: SimpleArithmetic + Codec + Clone + Default + Copy; fn now() -> Self::Moment; } diff --git a/srml/system/src/lib.rs b/srml/system/src/lib.rs index 56093f4f7ec8e..2343af8bec125 100644 --- a/srml/system/src/lib.rs +++ b/srml/system/src/lib.rs @@ -42,6 +42,23 @@ //! //! See the [`Module`](./struct.Module.html) struct for details of publicly available functions. //! +//! ### Signed Extensions +//! +//! The system module defines the following extensions: +//! +//! - [`CheckWeight`]: Checks the weight and length of the block and ensure that it does not +//! exceed the limits. +//! - ['CheckNonce']: Checks the nonce of the transaction. Contains a single payload of type +//! `T::Index`. +//! - [`CheckEra`]: Checks the era of the transaction. Contains a single payload of type `Era`. +//! - [`CheckGenesis`]: Checks the provided genesis hash of the transaction. Must be a part of the +//! signed payload of the transaction. +//! - [`CheckVersion`]: Checks that the runtime version is the same as the one encoded in the +//! transaction. +//! +//! Lookup the runtime aggregator file (e.g. `node/runtime`) to see the full list of signed +//! extensions included in a chain. +//! //! ## Usage //! //! ### Prerequisites @@ -561,7 +578,7 @@ impl Module { // We perform early return if we've reached the maximum capacity of the event list, // so `Events` seems to be corrupted. Also, this has happened after the start of execution // (since the event list is cleared at the block initialization). - if >::append(&[event]).is_err() { + if >::append([event].into_iter()).is_err() { // The most sensible thing to do here is to just ignore this event and wait until the // new block. return; diff --git a/srml/timestamp/src/lib.rs b/srml/timestamp/src/lib.rs index 78a741aff48a5..1b03b5b9af79f 100644 --- a/srml/timestamp/src/lib.rs +++ b/srml/timestamp/src/lib.rs @@ -195,9 +195,9 @@ macro_rules! impl_timestamp_set { ); ( $($t:ident)* ) => { - impl),*> OnTimestampSet for ($($t,)*) { + impl),*> OnTimestampSet for ($($t,)*) { fn on_timestamp_set(moment: Moment) { - $($t::on_timestamp_set(moment.clone());)* + $($t::on_timestamp_set(moment);)* } } } @@ -209,7 +209,7 @@ for_each_tuple!(impl_timestamp_set); pub trait Trait: system::Trait { /// Type used for expressing timestamp. type Moment: Parameter + Default + SimpleArithmetic - + Scale; + + Scale + Copy; /// Something which can be notified when the timestamp is set. Set this to `()` if not needed. type OnTimestampSet: OnTimestampSet; @@ -246,7 +246,7 @@ decl_module! { Self::now().is_zero() || now >= Self::now() + T::MinimumPeriod::get(), "Timestamp must increment by at least between sequential blocks" ); - ::Now::put(now.clone()); + ::Now::put(now); ::DidUpdate::put(true); >::on_timestamp_set(now); diff --git a/srml/treasury/src/lib.rs b/srml/treasury/src/lib.rs index 12ef4fc14e2b1..1e709b74a613b 100644 --- a/srml/treasury/src/lib.rs +++ b/srml/treasury/src/lib.rs @@ -89,11 +89,6 @@ type NegativeImbalanceOf = <::Currency as Currency< + ReservableCurrency; diff --git a/test-utils/transaction-factory/src/complex_mode.rs b/test-utils/transaction-factory/src/complex_mode.rs index 85b12248d80ba..ed76a66b09083 100644 --- a/test-utils/transaction-factory/src/complex_mode.rs +++ b/test-utils/transaction-factory/src/complex_mode.rs @@ -41,29 +41,30 @@ use std::sync::Arc; use log::info; +use client::Client; use client::block_builder::api::BlockBuilder; use client::runtime_api::ConstructRuntimeApi; +use primitives::{Blake2Hasher, Hasher}; use sr_primitives::generic::BlockId; use sr_primitives::traits::{Block as BlockT, ProvideRuntimeApi, One, Zero}; -use substrate_service::{ - FactoryBlock, FullClient, ServiceFactory, ComponentClient, FullComponents -}; use crate::{RuntimeAdapter, create_block}; -pub fn next( +pub fn next( factory_state: &mut RA, - client: &Arc>>, + client: &Arc>, version: u32, genesis_hash: ::Hash, prior_block_hash: ::Hash, - prior_block_id: BlockId, -) -> Option<::Block> + prior_block_id: BlockId, +) -> Option where - F: ServiceFactory, - F::RuntimeApi: ConstructRuntimeApi, FullClient>, - FullClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: BlockBuilder>, + Block: BlockT::Out>, + Exec: client::CallExecutor + Send + Sync + Clone, + Backend: client::backend::Backend + Send, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: BlockBuilder, + RtApi: ConstructRuntimeApi> + Send + Sync, RA: RuntimeAdapter, { let total = factory_state.start_number() + factory_state.num() * factory_state.rounds(); @@ -102,7 +103,7 @@ where let inherents = client.runtime_api().inherent_extrinsics(&prior_block_id, inherents) .expect("Failed to create inherent extrinsics"); - let block = create_block::(&client, transfer, inherents); + let block = create_block::(&client, transfer, inherents); info!( "Created block {} with hash {}. Transferring {} from {} to {}.", factory_state.block_no() + RA::Number::one(), diff --git a/test-utils/transaction-factory/src/lib.rs b/test-utils/transaction-factory/src/lib.rs index 16bb08a2b436d..5d63f906a73cf 100644 --- a/test-utils/transaction-factory/src/lib.rs +++ b/test-utils/transaction-factory/src/lib.rs @@ -26,22 +26,19 @@ use std::fmt::Display; use log::info; -use client::block_builder::api::BlockBuilder; -use client::runtime_api::ConstructRuntimeApi; +use client::{Client, block_builder::api::BlockBuilder, runtime_api::ConstructRuntimeApi}; use consensus_common::{ BlockOrigin, BlockImportParams, InherentData, ForkChoiceStrategy, SelectChain }; use consensus_common::block_import::BlockImport; use codec::{Decode, Encode}; +use primitives::{Blake2Hasher, Hasher}; use sr_primitives::generic::BlockId; use sr_primitives::traits::{ Block as BlockT, Header as HeaderT, ProvideRuntimeApi, SimpleArithmetic, One, Zero, }; -use substrate_service::{ - FactoryBlock, FactoryFullConfiguration, FullClient, new_client, - ServiceFactory, ComponentClient, FullComponents}; pub use crate::modes::Mode; pub mod modes; @@ -95,15 +92,19 @@ pub trait RuntimeAdapter { /// Manufactures transactions. The exact amount depends on /// `mode`, `num` and `rounds`. -pub fn factory( +pub fn factory( mut factory_state: RA, - mut config: FactoryFullConfiguration, + client: &Arc>, + select_chain: &Sc, ) -> cli::error::Result<()> where - F: ServiceFactory, - F::RuntimeApi: ConstructRuntimeApi, FullClient>, - FullClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: BlockBuilder>, + Block: BlockT::Out>, + Exec: client::CallExecutor + Send + Sync + Clone, + Backend: client::backend::Backend + Send, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: BlockBuilder, + RtApi: ConstructRuntimeApi> + Send + Sync, + Sc: SelectChain, RA: RuntimeAdapter, <::Block as BlockT>::Hash: From, { @@ -112,20 +113,16 @@ where return Err(cli::error::Error::Input(msg)); } - let client = new_client::(&config)?; - - let select_chain = F::build_select_chain(&mut config, client.clone())?; - - let best_header: Result<::Header, cli::error::Error> = + let best_header: Result<::Header, cli::error::Error> = select_chain.best_chain().map_err(|e| format!("{:?}", e).into()); let mut best_hash = best_header?.hash(); - let best_block_id = BlockId::::hash(best_hash); + let best_block_id = BlockId::::hash(best_hash); let version = client.runtime_version_at(&best_block_id)?.spec_version; let genesis_hash = client.block_hash(Zero::zero())? .expect("Genesis block always exists; qed").into(); while let Some(block) = match factory_state.mode() { - Mode::MasterToNToM => complex_mode::next::( + Mode::MasterToNToM => complex_mode::next::( &mut factory_state, &client, version, @@ -133,7 +130,7 @@ where best_hash.into(), best_block_id, ), - _ => simple_modes::next::( + _ => simple_modes::next::( &mut factory_state, &client, version, @@ -143,7 +140,7 @@ where ), } { best_hash = block.header().hash(); - import_block::(&client, block); + import_block(&client, block); info!("Imported block at {}", factory_state.block_no()); } @@ -152,16 +149,18 @@ where } /// Create a baked block from a transfer extrinsic and timestamp inherent. -pub fn create_block( - client: &Arc>>, +pub fn create_block( + client: &Arc>, transfer: ::Extrinsic, - inherent_extrinsics: Vec<::Extrinsic>, -) -> ::Block + inherent_extrinsics: Vec<::Extrinsic>, +) -> Block where - F: ServiceFactory, - FullClient: ProvideRuntimeApi, - F::RuntimeApi: ConstructRuntimeApi, FullClient>, - as ProvideRuntimeApi>::Api: BlockBuilder>, + Block: BlockT::Out>, + Exec: client::CallExecutor + Send + Sync + Clone, + Backend: client::backend::Backend + Send, + Client: ProvideRuntimeApi, + RtApi: ConstructRuntimeApi> + Send + Sync, + as ProvideRuntimeApi>::Api: BlockBuilder, RA: RuntimeAdapter, { let mut block = client.new_block(Default::default()).expect("Failed to create new block"); @@ -177,10 +176,13 @@ where block.bake().expect("Failed to bake block") } -fn import_block( - client: &Arc>>, - block: ::Block -) -> () where F: ServiceFactory +fn import_block( + client: &Arc>, + block: Block +) -> () where + Block: BlockT::Out>, + Exec: client::CallExecutor + Send + Sync + Clone, + Backend: client::backend::Backend + Send, { let import = BlockImportParams { origin: BlockOrigin::File, diff --git a/test-utils/transaction-factory/src/simple_modes.rs b/test-utils/transaction-factory/src/simple_modes.rs index ec4f484fa9827..bcbb91200657f 100644 --- a/test-utils/transaction-factory/src/simple_modes.rs +++ b/test-utils/transaction-factory/src/simple_modes.rs @@ -36,29 +36,30 @@ use std::sync::Arc; use log::info; +use client::Client; use client::block_builder::api::BlockBuilder; use client::runtime_api::ConstructRuntimeApi; +use primitives::{Blake2Hasher, Hasher}; use sr_primitives::traits::{Block as BlockT, ProvideRuntimeApi, One}; use sr_primitives::generic::BlockId; -use substrate_service::{ - FactoryBlock, FullClient, ServiceFactory, ComponentClient, FullComponents -}; use crate::{Mode, RuntimeAdapter, create_block}; -pub fn next( +pub fn next( factory_state: &mut RA, - client: &Arc>>, + client: &Arc>, version: u32, genesis_hash: ::Hash, prior_block_hash: ::Hash, - prior_block_id: BlockId, -) -> Option<::Block> + prior_block_id: BlockId, +) -> Option where - F: ServiceFactory, - F::RuntimeApi: ConstructRuntimeApi, FullClient>, - FullClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: BlockBuilder>, + Block: BlockT::Out>, + Exec: client::CallExecutor + Send + Sync + Clone, + Backend: client::backend::Backend + Send, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: BlockBuilder, + RtApi: ConstructRuntimeApi> + Send + Sync, RA: RuntimeAdapter, { if factory_state.block_no() >= factory_state.num() { @@ -93,7 +94,7 @@ where let inherents = client.runtime_api().inherent_extrinsics(&prior_block_id, inherents) .expect("Failed to create inherent extrinsics"); - let block = create_block::(&client, transfer, inherents); + let block = create_block::(&client, transfer, inherents); factory_state.set_block_no(factory_state.block_no() + RA::Number::one());