diff --git a/Cargo.lock b/Cargo.lock index 607a6283cb21bc..d217a061c0b1d0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -103,7 +103,7 @@ name = "atty" version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -119,7 +119,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "backtrace-sys 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-demangle 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -130,7 +130,7 @@ version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -288,7 +288,7 @@ version = "0.20.0" dependencies = [ "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", @@ -344,7 +344,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bzip2-sys 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -353,7 +353,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -437,7 +437,7 @@ name = "chrono" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "num-integer 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", @@ -450,7 +450,7 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "libloading 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -483,7 +483,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -547,7 +547,7 @@ dependencies = [ "clicolors-control 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "encode_unicode 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "termios 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -600,7 +600,7 @@ version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -808,7 +808,7 @@ dependencies = [ [[package]] name = "dir-diff" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "walkdir 2.2.9 (registry+https://github.com/rust-lang/crates.io-index)", @@ -819,7 +819,7 @@ name = "dirs" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "redox_users 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -839,7 +839,7 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "redox_users 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -851,7 +851,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "dlopen_derive 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -860,7 +860,7 @@ name = "dlopen_derive" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", "syn 0.15.42 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1003,7 +1003,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "errno-dragonfly 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1013,7 +1013,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1072,7 +1072,7 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1088,7 +1088,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "miniz_oxide 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1172,7 +1172,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "c_linked_list 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "get_if_addrs-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1182,7 +1182,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1191,7 +1191,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1218,7 +1218,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "grpcio-sys 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "protobuf 2.8.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1230,7 +1230,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", "cmake 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1465,7 +1465,7 @@ name = "iovec" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1495,7 +1495,7 @@ name = "jobserver" version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1510,7 +1510,7 @@ dependencies = [ [[package]] name = "jsonrpc-core" -version = "13.2.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1522,7 +1522,7 @@ dependencies = [ [[package]] name = "jsonrpc-derive" -version = "13.2.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "proc-macro-crate 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1533,12 +1533,12 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" -version = "13.2.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-server-utils 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-server-utils 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1547,10 +1547,10 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" -version = "13.2.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "jsonrpc-core 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1558,15 +1558,14 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" -version = "13.2.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "globset 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 2.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1574,15 +1573,15 @@ dependencies = [ [[package]] name = "jsonrpc-ws-server" -version = "13.2.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "jsonrpc-core 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-server-utils 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-server-utils 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ws 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ws 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1653,7 +1652,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libc" -version = "0.2.62" +version = "0.2.64" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1673,7 +1672,7 @@ dependencies = [ "bindgen 0.47.3 (registry+https://github.com/rust-lang/crates.io-index)", "cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1729,7 +1728,7 @@ name = "memchr" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1742,7 +1741,7 @@ name = "memmap" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1751,7 +1750,7 @@ name = "memmap" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1769,7 +1768,7 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "getrandom 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "mach_o_sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1816,7 +1815,7 @@ dependencies = [ "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1841,7 +1840,7 @@ version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1867,7 +1866,7 @@ version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1889,7 +1888,7 @@ dependencies = [ "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1901,7 +1900,7 @@ dependencies = [ "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1919,6 +1918,16 @@ dependencies = [ "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "num-derive" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.42 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "num-derive" version = "0.3.0" @@ -1959,7 +1968,7 @@ name = "num_cpus" version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2073,7 +2082,7 @@ name = "parking_lot_core" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2084,7 +2093,7 @@ name = "parking_lot_core" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2098,7 +2107,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2252,7 +2261,7 @@ dependencies = [ "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.9.18 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "protobuf 2.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "quick-error 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "spin 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2350,7 +2359,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2362,7 +2371,7 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2380,7 +2389,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "getrandom 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2471,7 +2480,7 @@ name = "rand_jitter" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2483,7 +2492,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2614,7 +2623,7 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.9.21" +version = "0.9.22" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2659,7 +2668,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "spin 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "web-sys 0.3.27 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2671,7 +2680,7 @@ name = "rocksdb" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "librocksdb-sys 5.18.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2680,7 +2689,7 @@ name = "rpassword" version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2816,7 +2825,7 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.9" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2970,7 +2979,7 @@ version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3007,7 +3016,7 @@ dependencies = [ "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_yaml 0.8.9 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", "solana-client 0.20.0", "solana-core 0.20.0", "solana-drone 0.20.0", @@ -3020,7 +3029,7 @@ dependencies = [ "solana-runtime 0.20.0", "solana-sdk 0.20.0", "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ws 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ws 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3044,7 +3053,7 @@ dependencies = [ "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_yaml 0.8.9 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", "serial_test 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "serial_test_derive 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "solana-client 0.20.0", @@ -3083,7 +3092,7 @@ version = "0.20.0" dependencies = [ "bincode 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "solana-logger 0.20.0", @@ -3164,11 +3173,11 @@ dependencies = [ "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "pretty-hex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_yaml 0.8.9 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", "solana-budget-api 0.20.0", "solana-budget-program 0.20.0", "solana-client 0.20.0", @@ -3192,12 +3201,12 @@ version = "0.20.0" dependencies = [ "bincode 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "bs58 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-http-server 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-http-server 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3255,20 +3264,20 @@ dependencies = [ "core_affinity 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", "crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-channel 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "dir-diff 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "dir-diff 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "dlopen 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "dlopen_derive 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "hex-literal 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "indexmap 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-derive 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-http-server 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-pubsub 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-ws-server 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-http-server 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-pubsub 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-ws-server 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3277,7 +3286,7 @@ dependencies = [ "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)", "rocksdb 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3326,10 +3335,10 @@ dependencies = [ "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "solana-ed25519-dalek 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "syn 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3414,12 +3423,11 @@ name = "solana-genesis" version = "0.20.0" dependencies = [ "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "bincode 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_yaml 0.8.9 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", "solana-core 0.20.0", "solana-genesis-programs 0.20.0", "solana-sdk 0.20.0", @@ -3481,11 +3489,11 @@ dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "nix 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_yaml 0.8.9 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", "sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "solana-client 0.20.0", "solana-config-api 0.20.0", @@ -3519,7 +3527,7 @@ dependencies = [ "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_yaml 0.8.9 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", "solana-core 0.20.0", "solana-logger 0.20.0", "solana-runtime 0.20.0", @@ -3601,7 +3609,7 @@ dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)", "serial_test 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "serial_test_derive 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "solana-sdk 0.20.0", @@ -3615,7 +3623,7 @@ dependencies = [ "bincode 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "indexmap 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3685,7 +3693,7 @@ version = "4.0.1-3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3713,7 +3721,7 @@ dependencies = [ "fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "libloading 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3775,7 +3783,7 @@ dependencies = [ "bincode 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "bs58 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "cbindgen 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "solana-ed25519-dalek 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3871,7 +3879,7 @@ dependencies = [ "console 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "indicatif 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", "solana-client 0.20.0", "solana-core 0.20.0", @@ -3887,6 +3895,22 @@ dependencies = [ "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "solana-vest-api" +version = "0.20.0-pre0" +dependencies = [ + "bincode 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "num-derive 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", + "solana-config-api 0.20.0", + "solana-runtime 0.20.0", + "solana-sdk 0.20.0", +] + [[package]] name = "solana-vote-api" version = "0.20.0" @@ -3918,9 +3942,9 @@ version = "0.20.0" dependencies = [ "bs58 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-derive 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-http-server 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-http-server 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4345,7 +4369,7 @@ dependencies = [ "combine 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "elfkit 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "hash32 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4519,7 +4543,7 @@ version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4533,7 +4557,7 @@ version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "filetime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "xattr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -4553,7 +4577,7 @@ version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4600,7 +4624,7 @@ name = "termios" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4617,7 +4641,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4625,7 +4649,7 @@ name = "thread-id" version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -4675,7 +4699,7 @@ name = "time" version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -4885,7 +4909,7 @@ dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5068,7 +5092,7 @@ name = "wait-timeout" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -5198,7 +5222,7 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -5276,7 +5300,7 @@ dependencies = [ [[package]] name = "ws" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5285,7 +5309,7 @@ dependencies = [ "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", "mio-extras 2.0.5 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "sha-1 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5315,7 +5339,7 @@ name = "xattr" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -5421,7 +5445,7 @@ dependencies = [ "checksum difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198" "checksum digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "03b072242a8cbaf9c145665af9d250c59af3b958f83ed6824e13533cf76d5b90" "checksum digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -"checksum dir-diff 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1cce6e50ca36311e494793f7629014dc78cd963ba85cd05968ae06a63b867f0b" +"checksum dir-diff 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2860407d7d7e2e004bb2128510ad9e8d669e76fa005ccf567977b5d71b8b4a0b" "checksum dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3fd78930633bd1c6e35c4b42b1df7b0cbc6bc191146e512bb3bedf243fcc3901" "checksum dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "13aea89a5c93364a98e9b37b2fa237effbb694d5cfe01c5b70941f7eb087d5e3" "checksum dirs-sys 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "afa0b23de8fd801745c471deffa6e12d248f962c9fd4b4c33787b055599bde7b" @@ -5497,12 +5521,12 @@ dependencies = [ "checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" "checksum jobserver 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "f74e73053eaf95399bf926e48fc7a2a3ce50bd0eaaa2357d391e95b2dcdd4f10" "checksum js-sys 0.3.27 (registry+https://github.com/rust-lang/crates.io-index)" = "1efc4f2a556c58e79c5500912e221dd826bec64ff4aabd8ce71ccef6da02d7d4" -"checksum jsonrpc-core 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "91d767c183a7e58618a609499d359ce3820700b3ebb4823a18c343b4a2a41a0d" -"checksum jsonrpc-derive 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4a76285ebba4515680fbfe4b62498ccb2a932384c8732eed68351b02fb7ae475" -"checksum jsonrpc-http-server 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "601fcc7bec888c7cbc7fd124d3d6744d72c0ebb540eca6fe2261b71f9cff6320" -"checksum jsonrpc-pubsub 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "64e0fb0664d8ce287e826940dafbb45379443c595bdd71d93655f3c8f25fd992" -"checksum jsonrpc-server-utils 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4d415f51d016a4682878e19dd03e8c0b61cd4394912d7cd3dc48d4f19f061a4e" -"checksum jsonrpc-ws-server 13.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4699433c1ac006d7df178b4c29c191e5bb6d81e2dca18c5c804a094592900101" +"checksum jsonrpc-core 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cf34414fdf9d843f2fd39557ffe7cbf75b53a2cf308e2b69af2ce86f23fd426d" +"checksum jsonrpc-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f1b217357876c9f55d3237fafaebab1e3925d53a2c2508df05ea15c02ddbb8bc" +"checksum jsonrpc-http-server 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cc653fb90f38cd203d756557426e87980a7329c4ac19360e7bd167d9f416f36e" +"checksum jsonrpc-pubsub 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "db22ae4d04d336fea4378ad4ad87c29563d983fe98c04319c173817a4fd9891f" +"checksum jsonrpc-server-utils 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "56ca64eeb4463722c49dd526e0a87dfc0cdecda9348a4b6f5720f25abc4fcc89" +"checksum jsonrpc-ws-server 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f1cdf74f56227cf10ab22c64fd73b730d59b6217cd598b63883d07bd4b8de7c8" "checksum keccak 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum lalrpop 0.16.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4e2e80bee40b22bca46665b4ef1f3cd88ed0fb043c971407eac17a0712c02572" @@ -5511,7 +5535,7 @@ dependencies = [ "checksum lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73" "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" "checksum lazycell 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f" -"checksum libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)" = "34fcd2c08d2f832f376f4173a231990fa5aef4e99fb569867318a227ef4c06ba" +"checksum libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)" = "74dfca3d9957906e8d1e6a0b641dc9a59848e793f1da2165889fd4f62d10d79c" "checksum libloading 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" "checksum librocksdb-sys 5.18.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d19778314deaa7048f2ea7d07b8aa12e1c227acebe975a37eeab6d2f8c74e41b" "checksum linked-hash-map 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ae91b68aebc4ddb91978b11a1b02ddd8602a05ec19002801c5666000e05e0f83" @@ -5543,6 +5567,7 @@ dependencies = [ "checksum nix 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3b2e0b4f3320ed72aaedb9a5ac838690a8047c7b275da22711fddff4f8a14229" "checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945" "checksum nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" +"checksum num-derive 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "eafd0b45c5537c3ba526f79d3e75120036502bebacbb3f3220914067ce39dbf2" "checksum num-derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0c8b15b261814f992e33760b1fca9fe8b693d8a65299f20c9901688636cfb746" "checksum num-integer 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)" = "b85e541ef8255f6cf42bbfe4ef361305c6c135d10919ecc26126c4e5ae94bc09" "checksum num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)" = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" @@ -5622,7 +5647,7 @@ dependencies = [ "checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" "checksum rental 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "01916ebd9fc2e81978a5dc9542a2fa47f5bb2ca3402e14c7cc42d6e3c5123e1f" "checksum rental-impl 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "82260d54cf2cbe9608df161f7e7c98e81fae702aa13af9e4d5d39dc2ffb25ab6" -"checksum reqwest 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)" = "02b7e953e14c6f3102b7e8d1f1ee3abf5ecee80b427f5565c9389835cecae95c" +"checksum reqwest 0.9.22 (registry+https://github.com/rust-lang/crates.io-index)" = "2c2064233e442ce85c77231ebd67d9eca395207dec2127fe0bbedde4bd29a650" "checksum rgb 0.8.13 (registry+https://github.com/rust-lang/crates.io-index)" = "4f089652ca87f5a82a62935ec6172a534066c7b97be003cc8f702ee9a7a59c92" "checksum ring 0.16.7 (registry+https://github.com/rust-lang/crates.io-index)" = "796ae8317a07b04dffb1983bdc7045ccd02f741f0b411704f07fd35dbf99f757" "checksum rocksdb 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f1651697fefd273bfb4fd69466cc2a9d20de557a0213b97233b22b5e95924b5e" @@ -5644,7 +5669,7 @@ dependencies = [ "checksum serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)" = "4b133a43a1ecd55d4086bd5b4dc6c1751c68b1bfbeba7a5040442022c7e7c02e" "checksum serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)" = "2f72eb2a68a7dc3f9a691bfda9305a1c017a6215e5a4545c258500d2099a37c2" "checksum serde_urlencoded 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "642dd69105886af2efd227f75a520ec9b44a820d65bc133a9131f7d229fd165a" -"checksum serde_yaml 0.8.9 (registry+https://github.com/rust-lang/crates.io-index)" = "38b08a9a90e5260fe01c6480ec7c811606df6d3a660415808c3c3fa8ed95b582" +"checksum serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)" = "691b17f19fc1ec9d94ec0b5864859290dff279dbd7b03f017afda54eb36c3c35" "checksum serial_test 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "50bfbc39343545618d97869d77f38ed43e48dd77432717dbc7ed39d797f3ecbe" "checksum serial_test_derive 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "89dd85be2e2ad75b041c9df2892ac078fa6e0b90024028b2b9fb4125b7530f01" "checksum sha-1 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "23962131a91661d643c98940b20fcaffe62d776a823247be80a48fcb8b6fce68" @@ -5796,7 +5821,7 @@ dependencies = [ "checksum wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "561ed901ae465d6185fa7864d63fbd5720d0ef718366c9a4dc83cf6170d7e9ba" "checksum winconsole 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3ef84b96d10db72dd980056666d7f1e7663ce93d82fa33b63e71c966f4cf5032" "checksum winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" -"checksum ws 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8a6f5bb86663ff4d1639408410f50bf6050367a8525d644d49a6894cd618a631" +"checksum ws 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c51a2c47b5798ccc774ffb93ff536aec7c4275d722fd9c740c83cdd1af1f2d94" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" "checksum x25519-dalek 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7ee1585dc1484373cbc1cee7aafda26634665cf449436fd6e24bfd1fad230538" "checksum xattr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "244c3741f4240ef46274860397c7c74e50eb23624996930e484c16679633a54c" diff --git a/Cargo.toml b/Cargo.toml index 3c45b73c1feef2..cdd3d9a1959dc7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,7 @@ members = [ "programs/stake_tests", "programs/storage_api", "programs/storage_program", + "programs/vest_api", "programs/vote_api", "programs/vote_program", "replicator", diff --git a/bench-exchange/Cargo.toml b/bench-exchange/Cargo.toml index 70f3372a8374fa..1049576d42ad1a 100644 --- a/bench-exchange/Cargo.toml +++ b/bench-exchange/Cargo.toml @@ -22,7 +22,7 @@ rayon = "1.2.0" serde = "1.0.101" serde_derive = "1.0.101" serde_json = "1.0.41" -serde_yaml = "0.8.9" +serde_yaml = "0.8.11" # solana-runtime = { path = "../solana/runtime"} solana-core = { path = "../core", version = "0.20.0" } solana-genesis = { path = "../genesis", version = "0.20.0" } @@ -36,4 +36,4 @@ solana-netutil = { path = "../netutil", version = "0.20.0" } solana-runtime = { path = "../runtime", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.0" } untrusted = "0.7.0" -ws = "0.9.0" +ws = "0.9.1" diff --git a/bench-exchange/src/bench.rs b/bench-exchange/src/bench.rs index 80949324b9e61d..d226a4f99f7d8d 100644 --- a/bench-exchange/src/bench.rs +++ b/bench-exchange/src/bench.rs @@ -11,7 +11,7 @@ use solana_drone::drone::request_airdrop_transaction; use solana_exchange_api::exchange_instruction; use solana_exchange_api::exchange_state::*; use solana_exchange_api::id; -use solana_genesis::PrimordialAccountDetails; +use solana_genesis::Base64Account; use solana_metrics::datapoint_info; use solana_sdk::client::Client; use solana_sdk::client::SyncClient; @@ -89,7 +89,7 @@ pub fn create_client_accounts_file( keypairs.iter().for_each(|keypair| { accounts.insert( serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap(), - PrimordialAccountDetails { + Base64Account { balance: fund_amount, executable: false, owner: system_program::id().to_string(), @@ -140,8 +140,7 @@ where let path = Path::new(&client_ids_and_stake_file); let file = File::open(path).unwrap(); - let accounts: HashMap = - serde_yaml::from_reader(file).unwrap(); + let accounts: HashMap = serde_yaml::from_reader(file).unwrap(); accounts .into_iter() .map(|(keypair, _)| { diff --git a/bench-exchange/src/cli.rs b/bench-exchange/src/cli.rs index 8291b88abe2782..e1124505cc68c4 100644 --- a/bench-exchange/src/cli.rs +++ b/bench-exchange/src/cli.rs @@ -1,7 +1,7 @@ use clap::{crate_description, crate_name, crate_version, value_t, App, Arg, ArgMatches}; use solana_core::gen_keys::GenKeys; use solana_drone::drone::DRONE_PORT; -use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil}; +use solana_sdk::signature::{read_keypair_file, Keypair, KeypairUtil}; use std::net::SocketAddr; use std::process::exit; use std::time::Duration; @@ -179,7 +179,7 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config { }); if matches.is_present("identity") { - args.identity = read_keypair(matches.value_of("identity").unwrap()) + args.identity = read_keypair_file(matches.value_of("identity").unwrap()) .expect("can't read client identity"); } else { args.identity = { diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index d30fd49a62f595..613c1eb9db6ec0 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -15,7 +15,7 @@ rayon = "1.2.0" serde = "1.0.101" serde_derive = "1.0.101" serde_json = "1.0.41" -serde_yaml = "0.8.9" +serde_yaml = "0.8.11" solana-core = { path = "../core", version = "0.20.0" } solana-genesis = { path = "../genesis", version = "0.20.0" } solana-client = { path = "../client", version = "0.20.0" } diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index 8d4bcac7064af8..b562800c61c876 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -8,12 +8,11 @@ use solana_core::gen_keys::GenKeys; use solana_drone::drone::request_airdrop_transaction; #[cfg(feature = "move")] use solana_librapay_api::{create_genesis, upload_mint_program, upload_payment_program}; -#[cfg(feature = "move")] use solana_measure::measure::Measure; -use solana_metrics::datapoint_info; +use solana_metrics::datapoint_debug; use solana_sdk::{ client::Client, - clock::{DEFAULT_TICKS_PER_SLOT, MAX_RECENT_BLOCKHASHES}, + clock::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE}, fee_calculator::FeeCalculator, hash::Hash, pubkey::Pubkey, @@ -34,10 +33,9 @@ use std::{ time::{Duration, Instant}, }; -// The point at which transactions become "too old", in seconds. The cluster keeps blockhashes for -// approximately MAX_RECENT_BLOCKHASHES/DEFAULT_TICKS_PER_SLOT seconds. The adjustment of 5sec -// seems about right to minimize BlockhashNotFound errors, based on empirical testing. -const MAX_TX_QUEUE_AGE: u64 = MAX_RECENT_BLOCKHASHES as u64 / DEFAULT_TICKS_PER_SLOT - 5; +// The point at which transactions become "too old", in seconds. +const MAX_TX_QUEUE_AGE: u64 = + MAX_PROCESSING_AGE as u64 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND; #[cfg(feature = "move")] use solana_librapay_api::librapay_transaction; @@ -103,7 +101,7 @@ where } } }; - println!("Initial transaction count {}", first_tx_count); + info!("Initial transaction count {}", first_tx_count); let exit_signal = Arc::new(AtomicBool::new(false)); @@ -111,7 +109,7 @@ where // collect the max transaction rate and total tx count seen let maxes = Arc::new(RwLock::new(Vec::new())); let sample_period = 1; // in seconds - println!("Sampling TPS every {} second...", sample_period); + info!("Sampling TPS every {} second...", sample_period); let v_threads: Vec<_> = clients .iter() .map(|client| { @@ -175,6 +173,10 @@ where sleep(Duration::from_millis(100)); continue; } + info!( + "Took {} ms for new blockhash", + duration_as_ms(&blockhash_time.elapsed()) + ); blockhash_time = Instant::now(); let balance = client.get_balance(&id.pubkey()).unwrap_or(0); metrics_submit_lamport_balance(balance); @@ -205,18 +207,18 @@ where // Stop the sampling threads so it will collect the stats exit_signal.store(true, Ordering::Relaxed); - println!("Waiting for validator threads..."); + info!("Waiting for validator threads..."); for t in v_threads { if let Err(err) = t.join() { - println!(" join() failed with: {:?}", err); + info!(" join() failed with: {:?}", err); } } // join the tx send threads - println!("Waiting for transmit threads..."); + info!("Waiting for transmit threads..."); for t in s_threads { if let Err(err) = t.join() { - println!(" join() failed with: {:?}", err); + info!(" join() failed with: {:?}", err); } } @@ -235,8 +237,8 @@ where } fn metrics_submit_lamport_balance(lamport_balance: u64) { - println!("Token balance: {}", lamport_balance); - datapoint_info!( + info!("Token balance: {}", lamport_balance); + datapoint_debug!( "bench-tps-lamport_balance", ("balance", lamport_balance, i64) ); @@ -321,7 +323,7 @@ fn generate_txs( libra_args: &Option, ) { let tx_count = source.len(); - println!("Signing transactions... {} (reclaim={})", tx_count, reclaim); + info!("Signing transactions... {} (reclaim={})", tx_count, reclaim); let signing_start = Instant::now(); let transactions = if let Some(( @@ -356,14 +358,14 @@ fn generate_txs( let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos()); let bsps = (tx_count) as f64 / ns as f64; let nsps = ns as f64 / (tx_count) as f64; - println!( + info!( "Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time, {}", bsps * 1_000_000_f64, nsps / 1_000_f64, duration_as_ms(&duration), blockhash, ); - datapoint_info!( + datapoint_debug!( "bench-tps-generate_txs", ("duration", duration_as_ms(&duration), i64) ); @@ -397,7 +399,7 @@ fn do_tx_transfers( } if let Some(txs0) = txs { shared_tx_thread_count.fetch_add(1, Ordering::Relaxed); - println!( + info!( "Transferring 1 unit {} times... to {}", txs0.len(), client.as_ref().tpu_addr(), @@ -423,12 +425,12 @@ fn do_tx_transfers( } shared_tx_thread_count.fetch_add(-1, Ordering::Relaxed); total_tx_sent_count.fetch_add(tx_len, Ordering::Relaxed); - println!( + info!( "Tx send done. {} ms {} tps", duration_as_ms(&transfer_start.elapsed()), tx_len as f32 / duration_as_s(&transfer_start.elapsed()), ); - datapoint_info!( + datapoint_debug!( "bench-tps-do_tx_transfers", ("duration", duration_as_ms(&transfer_start.elapsed()), i64), ("count", tx_len, i64) @@ -446,7 +448,6 @@ fn verify_funding_transfer(client: &T, tx: &Transaction, amount: u64) return true; } } - false } @@ -465,7 +466,7 @@ pub fn fund_keys( let mut notfunded: Vec<&Keypair> = dests.iter().collect(); let lamports_per_account = (total - (extra * max_fee)) / (notfunded.len() as u64 + 1); - println!( + info!( "funding keys {} with lamports: {:?} total: {}", dests.len(), client.get_balance(&source.pubkey()), @@ -474,7 +475,8 @@ pub fn fund_keys( while !notfunded.is_empty() { let mut new_funded: Vec<(&Keypair, u64)> = vec![]; let mut to_fund = vec![]; - println!("creating from... {}", funded.len()); + info!("creating from... {}", funded.len()); + let mut build_to_fund = Measure::start("build_to_fund"); for f in &mut funded { let max_units = cmp::min(notfunded.len() as u64, MAX_SPENDS_PER_TX); if max_units == 0 { @@ -496,6 +498,8 @@ pub fn fund_keys( } extra -= 1; } + build_to_fund.stop(); + debug!("build to_fund vec: {}us", build_to_fund.as_us()); // try to transfer a "few" at a time with recent blockhash // assume 4MB network buffers, and 512 byte packets @@ -504,6 +508,7 @@ pub fn fund_keys( to_fund.chunks(FUND_CHUNK_LEN).for_each(|chunk| { let mut tries = 0; + let mut make_txs = Measure::start("make_txs"); // this set of transactions just initializes us for bookkeeping #[allow(clippy::clone_double_ref)] // sigh let mut to_fund_txs: Vec<_> = chunk @@ -515,6 +520,12 @@ pub fn fund_keys( (k.clone(), tx) }) .collect(); + make_txs.stop(); + debug!( + "make {} unsigned txs: {}us", + to_fund_txs.len(), + make_txs.as_us() + ); let amount = chunk[0].1[0].1; @@ -523,7 +534,7 @@ pub fn fund_keys( .iter() .fold(0, |len, (_, tx)| len + tx.message().instructions.len()); - println!( + info!( "{} {} to {} in {} txs", if tries == 0 { "transferring" @@ -538,30 +549,65 @@ pub fn fund_keys( let (blockhash, _fee_calculator) = get_recent_blockhash(client); // re-sign retained to_fund_txes with updated blockhash + let mut sign_txs = Measure::start("sign_txs"); to_fund_txs.par_iter_mut().for_each(|(k, tx)| { tx.sign(&[*k], blockhash); }); + sign_txs.stop(); + debug!("sign {} txs: {}us", to_fund_txs.len(), sign_txs.as_us()); + let mut send_txs = Measure::start("send_txs"); to_fund_txs.iter().for_each(|(_, tx)| { client.async_send_transaction(tx.clone()).expect("transfer"); }); - - // retry anything that seems to have dropped through cracks - // again since these txs are all or nothing, they're fine to - // retry - for _ in 0..10 { - to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount)); + send_txs.stop(); + debug!("send {} txs: {}us", to_fund_txs.len(), send_txs.as_us()); + + let mut verify_txs = Measure::start("verify_txs"); + let mut starting_txs = to_fund_txs.len(); + let mut verified_txs = 0; + let mut failed_verify = 0; + // Only loop multiple times for small (quick) transaction batches + for _ in 0..(if starting_txs < 1000 { 3 } else { 1 }) { + let mut timer = Instant::now(); + to_fund_txs.retain(|(_, tx)| { + if timer.elapsed() >= Duration::from_secs(5) { + if failed_verify > 0 { + debug!("total txs failed verify: {}", failed_verify); + } + info!( + "Verifying transfers... {} remaining", + starting_txs - verified_txs + ); + timer = Instant::now(); + } + let verified = verify_funding_transfer(client, &tx, amount); + if verified { + verified_txs += 1; + } else { + failed_verify += 1; + } + !verified + }); if to_fund_txs.is_empty() { break; } + debug!("Looping verifications"); + info!("Verifying transfers... {} remaining", to_fund_txs.len()); sleep(Duration::from_millis(100)); } + starting_txs -= to_fund_txs.len(); + verify_txs.stop(); + debug!("verified {} txs: {}us", starting_txs, verify_txs.as_us()); + // retry anything that seems to have dropped through cracks + // again since these txs are all or nothing, they're fine to + // retry tries += 1; } - println!("transferred"); + info!("transferred"); }); - println!("funded: {} left: {}", new_funded.len(), notfunded.len()); + info!("funded: {} left: {}", new_funded.len(), notfunded.len()); funded = new_funded; } } @@ -574,11 +620,11 @@ pub fn airdrop_lamports( ) -> Result<()> { let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0); metrics_submit_lamport_balance(starting_balance); - println!("starting balance {}", starting_balance); + info!("starting balance {}", starting_balance); if starting_balance < tx_count { let airdrop_amount = tx_count - starting_balance; - println!( + info!( "Airdropping {:?} lamports from {} for {}", airdrop_amount, drone_addr, @@ -607,14 +653,14 @@ pub fn airdrop_lamports( }; let current_balance = client.get_balance(&id.pubkey()).unwrap_or_else(|e| { - println!("airdrop error {}", e); + info!("airdrop error {}", e); starting_balance }); - println!("current balance {}...", current_balance); + info!("current balance {}...", current_balance); metrics_submit_lamport_balance(current_balance); if current_balance - starting_balance != airdrop_amount { - println!( + info!( "Airdrop failed! {} {} {}", id.pubkey(), current_balance, @@ -637,8 +683,8 @@ fn compute_and_report_stats( let mut max_tx_count = 0; let mut nodes_with_zero_tps = 0; let mut total_maxes = 0.0; - println!(" Node address | Max TPS | Total Transactions"); - println!("---------------------+---------------+--------------------"); + info!(" Node address | Max TPS | Total Transactions"); + info!("---------------------+---------------+--------------------"); for (sock, stats) in maxes.read().unwrap().iter() { let maybe_flag = match stats.txs { @@ -646,7 +692,7 @@ fn compute_and_report_stats( _ => "", }; - println!( + info!( "{:20} | {:13.2} | {} {}", sock, stats.tps, stats.txs, maybe_flag ); @@ -667,7 +713,7 @@ fn compute_and_report_stats( if total_maxes > 0.0 { let num_nodes_with_tps = maxes.read().unwrap().len() - nodes_with_zero_tps; let average_max = total_maxes / num_nodes_with_tps as f32; - println!( + info!( "\nAverage max TPS: {:.2}, {} nodes had 0 TPS", average_max, nodes_with_zero_tps ); @@ -679,7 +725,7 @@ fn compute_and_report_stats( } else { 0.0 }; - println!( + info!( "\nHighest TPS: {:.2} sampling period {}s max transactions: {} clients: {} drop rate: {:.2}", max_of_maxes, sample_period, @@ -687,7 +733,7 @@ fn compute_and_report_stats( maxes.read().unwrap().len(), drop_rate, ); - println!( + info!( "\tAverage TPS: {}", max_tx_count as f32 / duration_as_s(tx_send_elapsed) ); @@ -906,7 +952,7 @@ pub fn generate_and_fund_keypairs( total *= 3; } - println!("Previous key balance: {} max_fee: {} lamports_per_account: {} extra: {} desired_balance: {} total: {}", + info!("Previous key balance: {} max_fee: {} lamports_per_account: {} extra: {} desired_balance: {} total: {}", last_keypair_balance, fee_calculator.max_lamports_per_signature, lamports_per_account, extra, account_desired_balance, total ); @@ -1025,7 +1071,7 @@ mod tests { #[test] fn test_bench_tps_fund_keys_with_fees() { let (mut genesis_block, id) = create_genesis_block(10_000); - let fee_calculator = FeeCalculator::new(11); + let fee_calculator = FeeCalculator::new(11, 0); genesis_block.fee_calculator = fee_calculator; let bank = Bank::new(&genesis_block); let client = BankClient::new(bank); diff --git a/bench-tps/src/cli.rs b/bench-tps/src/cli.rs index 8ac13be69a098f..78fc76ef396bde 100644 --- a/bench-tps/src/cli.rs +++ b/bench-tps/src/cli.rs @@ -1,11 +1,8 @@ -use std::net::SocketAddr; -use std::process::exit; -use std::time::Duration; - use clap::{crate_description, crate_name, crate_version, App, Arg, ArgMatches}; use solana_drone::drone::DRONE_PORT; use solana_sdk::fee_calculator::FeeCalculator; -use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil}; +use solana_sdk::signature::{read_keypair_file, Keypair, KeypairUtil}; +use std::{net::SocketAddr, process::exit, time::Duration}; const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = 64 * 1024; @@ -37,8 +34,8 @@ impl Default for Config { threads: 4, num_nodes: 1, duration: Duration::new(std::u64::MAX, 0), - tx_count: 500_000, - thread_batch_sleep_ms: 0, + tx_count: 50_000, + thread_batch_sleep_ms: 1000, sustained: false, client_ids_and_stake_file: String::new(), write_to_client_file: false, @@ -184,7 +181,7 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config { } if matches.is_present("identity") { - args.id = read_keypair(matches.value_of("identity").unwrap()) + args.id = read_keypair_file(matches.value_of("identity").unwrap()) .expect("can't read client identity"); } diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index 175af487ee13d1..e52812ef57ea0b 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -1,15 +1,12 @@ +use log::*; use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs}; use solana_bench_tps::cli; use solana_core::gossip_service::{discover_cluster, get_multi_client}; -use solana_genesis::PrimordialAccountDetails; +use solana_genesis::Base64Account; use solana_sdk::fee_calculator::FeeCalculator; use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::system_program; -use std::collections::HashMap; -use std::fs::File; -use std::io::prelude::*; -use std::path::Path; -use std::process::exit; +use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit}; /// Number of signatures for all transactions in ~1 week at ~100K TPS pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7; @@ -37,10 +34,11 @@ fn main() { } = &cli_config; if *write_to_client_file { - println!("Generating {} keypairs", *tx_count * 2); + info!("Generating {} keypairs", *tx_count * 2); let (keypairs, _) = generate_keypairs(&id, *tx_count as u64 * 2); let num_accounts = keypairs.len() as u64; - let max_fee = FeeCalculator::new(*target_lamports_per_signature).max_lamports_per_signature; + let max_fee = + FeeCalculator::new(*target_lamports_per_signature, 0).max_lamports_per_signature; let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee) / num_accounts + num_lamports_per_account; @@ -48,7 +46,7 @@ fn main() { keypairs.iter().for_each(|keypair| { accounts.insert( serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap(), - PrimordialAccountDetails { + Base64Account { balance: num_lamports_per_account, executable: false, owner: system_program::id().to_string(), @@ -57,7 +55,7 @@ fn main() { ); }); - println!("Writing {}", client_ids_and_stake_file); + info!("Writing {}", client_ids_and_stake_file); let serialized = serde_yaml::to_string(&accounts).unwrap(); let path = Path::new(&client_ids_and_stake_file); let mut file = File::create(path).unwrap(); @@ -65,7 +63,7 @@ fn main() { return; } - println!("Connecting to the cluster"); + info!("Connecting to the cluster"); let (nodes, _replicators) = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| { eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err); @@ -86,9 +84,8 @@ fn main() { let path = Path::new(&client_ids_and_stake_file); let file = File::open(path).unwrap(); - println!("Reading {}", client_ids_and_stake_file); - let accounts: HashMap = - serde_yaml::from_reader(file).unwrap(); + info!("Reading {}", client_ids_and_stake_file); + let accounts: HashMap = serde_yaml::from_reader(file).unwrap(); let mut keypairs = vec![]; let mut last_balance = 0; diff --git a/book/build-svg.sh b/book/build-svg.sh index 624e6b52a24074..80f8b1e0bfd1cd 100755 --- a/book/build-svg.sh +++ b/book/build-svg.sh @@ -3,9 +3,11 @@ set -e cd "$(dirname "$0")" -make -j"$(nproc)" -B svg +make -j"$(nproc)" -B svg +#TODO figure out why book wants to change, but local and CI differ +exit 0 if [[ -n $CI ]]; then - # In CI confirm that no svgs need to be built + # In CI confirm that no svgs need to be built git diff --exit-code fi diff --git a/book/src/.gitbook/assets/data-plane (4).svg b/book/src/.gitbook/assets/data-plane (4).svg new file mode 100644 index 00000000000000..5a33b8bf6e5abd --- /dev/null +++ b/book/src/.gitbook/assets/data-plane (4).svg @@ -0,0 +1,192 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Neighborhood + + + + +3 + + + + +Neighborhood + + + + +Neighborhood + + + + +1 + + + + +Neighborhood + + + + +4 + + + + +0 + + + + +Neighborhood + + + + +Neighborhood + + + + +5 + + + + +2 + + + + +Neighborhood + + + + +6 + + + diff --git a/book/src/.gitbook/assets/data-plane (5).svg b/book/src/.gitbook/assets/data-plane (5).svg new file mode 100644 index 00000000000000..5fa566eebda6f1 --- /dev/null +++ b/book/src/.gitbook/assets/data-plane (5).svg @@ -0,0 +1,210 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Neighborhood + + + + +3 + + + + +Neighborhood + + + + +Neighborhood + + + + +1 + + + + +Neighborhood + + + + +4 + + + + +0 + + + + +Neighborhood + + + + +Neighborhood + + + + +5 + + + + +2 + + + + +Neighborhood + + + + +6 + + + diff --git a/book/src/.gitbook/assets/data-plane-3.svg b/book/src/.gitbook/assets/data-plane-3.svg new file mode 100644 index 00000000000000..5a33b8bf6e5abd --- /dev/null +++ b/book/src/.gitbook/assets/data-plane-3.svg @@ -0,0 +1,192 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Neighborhood + + + + +3 + + + + +Neighborhood + + + + +Neighborhood + + + + +1 + + + + +Neighborhood + + + + +4 + + + + +0 + + + + +Neighborhood + + + + +Neighborhood + + + + +5 + + + + +2 + + + + +Neighborhood + + + + +6 + + + diff --git a/book/src/.gitbook/assets/data-plane-fanout (2).svg b/book/src/.gitbook/assets/data-plane-fanout (2).svg index ad73f77ef013bc..6fa6b16010495c 100644 --- a/book/src/.gitbook/assets/data-plane-fanout (2).svg +++ b/book/src/.gitbook/assets/data-plane-fanout (2).svg @@ -1,62 +1,80 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/data-plane-fanout (4).svg b/book/src/.gitbook/assets/data-plane-fanout (4).svg new file mode 100644 index 00000000000000..ad73f77ef013bc --- /dev/null +++ b/book/src/.gitbook/assets/data-plane-fanout (4).svg @@ -0,0 +1,183 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Neighborhood + + + + +Validator + + + + +1 + + + + +1 + + + + +Neighborhood + + + + +0 + + + + +Neighborhood + + + + +Validator + + + + +2 + + + + +2 + + + diff --git a/book/src/.gitbook/assets/data-plane-fanout (5).svg b/book/src/.gitbook/assets/data-plane-fanout (5).svg new file mode 100644 index 00000000000000..ad73f77ef013bc --- /dev/null +++ b/book/src/.gitbook/assets/data-plane-fanout (5).svg @@ -0,0 +1,183 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Neighborhood + + + + +Validator + + + + +1 + + + + +1 + + + + +Neighborhood + + + + +0 + + + + +Neighborhood + + + + +Validator + + + + +2 + + + + +2 + + + diff --git a/book/src/.gitbook/assets/data-plane-fanout-3.svg b/book/src/.gitbook/assets/data-plane-fanout-3.svg new file mode 100644 index 00000000000000..ad73f77ef013bc --- /dev/null +++ b/book/src/.gitbook/assets/data-plane-fanout-3.svg @@ -0,0 +1,183 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Neighborhood + + + + +Validator + + + + +1 + + + + +1 + + + + +Neighborhood + + + + +0 + + + + +Neighborhood + + + + +Validator + + + + +2 + + + + +2 + + + diff --git a/book/src/.gitbook/assets/data-plane-fanout.svg b/book/src/.gitbook/assets/data-plane-fanout.svg index 6fa6b16010495c..ad73f77ef013bc 100644 --- a/book/src/.gitbook/assets/data-plane-fanout.svg +++ b/book/src/.gitbook/assets/data-plane-fanout.svg @@ -1,80 +1,62 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/data-plane-neighborhood (1).svg b/book/src/.gitbook/assets/data-plane-neighborhood (1).svg index 1a7f080a31102d..f7a8b8182df7ba 100644 --- a/book/src/.gitbook/assets/data-plane-neighborhood (1).svg +++ b/book/src/.gitbook/assets/data-plane-neighborhood (1).svg @@ -1,62 +1,80 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/data-plane-neighborhood (4).svg b/book/src/.gitbook/assets/data-plane-neighborhood (4).svg new file mode 100644 index 00000000000000..1a7f080a31102d --- /dev/null +++ b/book/src/.gitbook/assets/data-plane-neighborhood (4).svg @@ -0,0 +1,322 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Neighbor + + + + +Neighbor + + + + +1 + + + + +1 + + + + +Neighbor + + + + +Neighbor + + + + +Neighborhood + + + + +Neighborhood + + + + +2 + + + + +2 + + + + +Above + + + + +Below + + + + +Neighbor + + + + +Neighbor + + + + +3 + + + + +3 + + + + +Neighbor + + + + +Neighbor + + + + +4 + + + + +4 + + + diff --git a/book/src/.gitbook/assets/data-plane-neighborhood (5).svg b/book/src/.gitbook/assets/data-plane-neighborhood (5).svg new file mode 100644 index 00000000000000..1a7f080a31102d --- /dev/null +++ b/book/src/.gitbook/assets/data-plane-neighborhood (5).svg @@ -0,0 +1,322 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Neighbor + + + + +Neighbor + + + + +1 + + + + +1 + + + + +Neighbor + + + + +Neighbor + + + + +Neighborhood + + + + +Neighborhood + + + + +2 + + + + +2 + + + + +Above + + + + +Below + + + + +Neighbor + + + + +Neighbor + + + + +3 + + + + +3 + + + + +Neighbor + + + + +Neighbor + + + + +4 + + + + +4 + + + diff --git a/book/src/.gitbook/assets/data-plane-neighborhood-3.svg b/book/src/.gitbook/assets/data-plane-neighborhood-3.svg new file mode 100644 index 00000000000000..1a7f080a31102d --- /dev/null +++ b/book/src/.gitbook/assets/data-plane-neighborhood-3.svg @@ -0,0 +1,322 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Neighbor + + + + +Neighbor + + + + +1 + + + + +1 + + + + +Neighbor + + + + +Neighbor + + + + +Neighborhood + + + + +Neighborhood + + + + +2 + + + + +2 + + + + +Above + + + + +Below + + + + +Neighbor + + + + +Neighbor + + + + +3 + + + + +3 + + + + +Neighbor + + + + +Neighbor + + + + +4 + + + + +4 + + + diff --git a/book/src/.gitbook/assets/data-plane-neighborhood.svg b/book/src/.gitbook/assets/data-plane-neighborhood.svg index f7a8b8182df7ba..1a7f080a31102d 100644 --- a/book/src/.gitbook/assets/data-plane-neighborhood.svg +++ b/book/src/.gitbook/assets/data-plane-neighborhood.svg @@ -1,80 +1,62 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/data-plane-seeding (3).svg b/book/src/.gitbook/assets/data-plane-seeding (3).svg index 765b53c93f589e..7286bd291a64f4 100644 --- a/book/src/.gitbook/assets/data-plane-seeding (3).svg +++ b/book/src/.gitbook/assets/data-plane-seeding (3).svg @@ -1,62 +1,80 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/data-plane-seeding (4).svg b/book/src/.gitbook/assets/data-plane-seeding (4).svg new file mode 100644 index 00000000000000..765b53c93f589e --- /dev/null +++ b/book/src/.gitbook/assets/data-plane-seeding (4).svg @@ -0,0 +1,138 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Validator + + + + +1 + + + + +Neighborhood + + + + +Leader + + + + +0 + + + + +Validator + + + + +2 + + + diff --git a/book/src/.gitbook/assets/data-plane-seeding (5).svg b/book/src/.gitbook/assets/data-plane-seeding (5).svg new file mode 100644 index 00000000000000..765b53c93f589e --- /dev/null +++ b/book/src/.gitbook/assets/data-plane-seeding (5).svg @@ -0,0 +1,138 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Validator + + + + +1 + + + + +Neighborhood + + + + +Leader + + + + +0 + + + + +Validator + + + + +2 + + + diff --git a/book/src/.gitbook/assets/data-plane-seeding.svg b/book/src/.gitbook/assets/data-plane-seeding.svg index 7286bd291a64f4..765b53c93f589e 100644 --- a/book/src/.gitbook/assets/data-plane-seeding.svg +++ b/book/src/.gitbook/assets/data-plane-seeding.svg @@ -1,80 +1,62 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/data-plane.svg b/book/src/.gitbook/assets/data-plane.svg index 5fa566eebda6f1..5a33b8bf6e5abd 100644 --- a/book/src/.gitbook/assets/data-plane.svg +++ b/book/src/.gitbook/assets/data-plane.svg @@ -1,80 +1,62 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/fork-generation (4).svg b/book/src/.gitbook/assets/fork-generation (4).svg new file mode 100644 index 00000000000000..096f643c15ad9f --- /dev/null +++ b/book/src/.gitbook/assets/fork-generation (4).svg @@ -0,0 +1,348 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +time + + + + +L1 + + + + +L2 + + + + +L3 + + + + +L4 + + + + +L5 + + + + +x + + + + +xx + + + + +E3 + + + + +xx + + + + +E2 + + + + +E4 + + + + +xx + + + + +x + + + + +E5 + + + + +E1 + + + + +xx + + + + +E3' + + + + +xx + + + + +x + + + + +xx + + + + +xx + + + + +validator + + + + +vote(E1) + + + + +vote(E2) + + + + +slash(E3) + + + + +vote(E4) + + + + +hang + + + + +on + + + + +to + + + + +action + + + + +E4 + + + + +and + + + + +E5 + + + + +for + + + + +more... + + + diff --git a/book/src/.gitbook/assets/fork-generation (5).svg b/book/src/.gitbook/assets/fork-generation (5).svg new file mode 100644 index 00000000000000..3d13d7549b728a --- /dev/null +++ b/book/src/.gitbook/assets/fork-generation (5).svg @@ -0,0 +1,330 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +time + + + + +L1 + + + + +L2 + + + + +L3 + + + + +L4 + + + + +L5 + + + + +x + + + + +xx + + + + +E3 + + + + +xx + + + + +E2 + + + + +E4 + + + + +xx + + + + +x + + + + +E5 + + + + +E1 + + + + +xx + + + + +E3' + + + + +xx + + + + +x + + + + +xx + + + + +xx + + + + +validator + + + + +vote(E1) + + + + +vote(E2) + + + + +slash(E3) + + + + +vote(E4) + + + + +hang + + + + +on + + + + +to + + + + +action + + + + +E4 + + + + +and + + + + +E5 + + + + +for + + + + +more... + + + diff --git a/book/src/.gitbook/assets/fork-generation-3.svg b/book/src/.gitbook/assets/fork-generation-3.svg new file mode 100644 index 00000000000000..3d13d7549b728a --- /dev/null +++ b/book/src/.gitbook/assets/fork-generation-3.svg @@ -0,0 +1,330 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +time + + + + +L1 + + + + +L2 + + + + +L3 + + + + +L4 + + + + +L5 + + + + +x + + + + +xx + + + + +E3 + + + + +xx + + + + +E2 + + + + +E4 + + + + +xx + + + + +x + + + + +E5 + + + + +E1 + + + + +xx + + + + +E3' + + + + +xx + + + + +x + + + + +xx + + + + +xx + + + + +validator + + + + +vote(E1) + + + + +vote(E2) + + + + +slash(E3) + + + + +vote(E4) + + + + +hang + + + + +on + + + + +to + + + + +action + + + + +E4 + + + + +and + + + + +E5 + + + + +for + + + + +more... + + + diff --git a/book/src/.gitbook/assets/fork-generation.svg b/book/src/.gitbook/assets/fork-generation.svg index 096f643c15ad9f..3d13d7549b728a 100644 --- a/book/src/.gitbook/assets/fork-generation.svg +++ b/book/src/.gitbook/assets/fork-generation.svg @@ -1,80 +1,62 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/forks (2).svg b/book/src/.gitbook/assets/forks (2).svg index 725a73f5d3dea7..e57f128fcab872 100644 --- a/book/src/.gitbook/assets/forks (2).svg +++ b/book/src/.gitbook/assets/forks (2).svg @@ -1,62 +1,80 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/forks (4).svg b/book/src/.gitbook/assets/forks (4).svg new file mode 100644 index 00000000000000..725a73f5d3dea7 --- /dev/null +++ b/book/src/.gitbook/assets/forks (4).svg @@ -0,0 +1,122 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +5 + + + + +1 + + + + +2 + + + + +4 + + + + +3 + + + + +6 + + + + +7 + + + diff --git a/book/src/.gitbook/assets/forks (5).svg b/book/src/.gitbook/assets/forks (5).svg new file mode 100644 index 00000000000000..725a73f5d3dea7 --- /dev/null +++ b/book/src/.gitbook/assets/forks (5).svg @@ -0,0 +1,122 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +5 + + + + +1 + + + + +2 + + + + +4 + + + + +3 + + + + +6 + + + + +7 + + + diff --git a/book/src/.gitbook/assets/forks-pruned (4).svg b/book/src/.gitbook/assets/forks-pruned (4).svg new file mode 100644 index 00000000000000..5a8f41f21c9629 --- /dev/null +++ b/book/src/.gitbook/assets/forks-pruned (4).svg @@ -0,0 +1,92 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +5 + + + + +1 + + + + +2 + + + + +4 + + + diff --git a/book/src/.gitbook/assets/forks-pruned (5).svg b/book/src/.gitbook/assets/forks-pruned (5).svg new file mode 100644 index 00000000000000..5a8f41f21c9629 --- /dev/null +++ b/book/src/.gitbook/assets/forks-pruned (5).svg @@ -0,0 +1,92 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +5 + + + + +1 + + + + +2 + + + + +4 + + + diff --git a/book/src/.gitbook/assets/forks-pruned-3.svg b/book/src/.gitbook/assets/forks-pruned-3.svg new file mode 100644 index 00000000000000..5a8f41f21c9629 --- /dev/null +++ b/book/src/.gitbook/assets/forks-pruned-3.svg @@ -0,0 +1,92 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +5 + + + + +1 + + + + +2 + + + + +4 + + + diff --git a/book/src/.gitbook/assets/forks-pruned.svg b/book/src/.gitbook/assets/forks-pruned.svg index 59819eb241eab1..5a8f41f21c9629 100644 --- a/book/src/.gitbook/assets/forks-pruned.svg +++ b/book/src/.gitbook/assets/forks-pruned.svg @@ -1,80 +1,62 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/forks-pruned2 (4).svg b/book/src/.gitbook/assets/forks-pruned2 (4).svg new file mode 100644 index 00000000000000..a550b817289bcc --- /dev/null +++ b/book/src/.gitbook/assets/forks-pruned2 (4).svg @@ -0,0 +1,110 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + +3 + + + + +6 + + + + +7 + + + diff --git a/book/src/.gitbook/assets/forks-pruned2 (5).svg b/book/src/.gitbook/assets/forks-pruned2 (5).svg new file mode 100644 index 00000000000000..f57f691d73cfda --- /dev/null +++ b/book/src/.gitbook/assets/forks-pruned2 (5).svg @@ -0,0 +1,92 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + +3 + + + + +6 + + + + +7 + + + diff --git a/book/src/.gitbook/assets/forks-pruned2-1.svg b/book/src/.gitbook/assets/forks-pruned2-1.svg new file mode 100644 index 00000000000000..f57f691d73cfda --- /dev/null +++ b/book/src/.gitbook/assets/forks-pruned2-1.svg @@ -0,0 +1,92 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + +3 + + + + +6 + + + + +7 + + + diff --git a/book/src/.gitbook/assets/forks-pruned2.svg b/book/src/.gitbook/assets/forks-pruned2.svg index a550b817289bcc..f57f691d73cfda 100644 --- a/book/src/.gitbook/assets/forks-pruned2.svg +++ b/book/src/.gitbook/assets/forks-pruned2.svg @@ -1,80 +1,62 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/forks.svg b/book/src/.gitbook/assets/forks.svg index e57f128fcab872..725a73f5d3dea7 100644 --- a/book/src/.gitbook/assets/forks.svg +++ b/book/src/.gitbook/assets/forks.svg @@ -1,80 +1,62 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/passive-staking-callflow (4).svg b/book/src/.gitbook/assets/passive-staking-callflow (4).svg new file mode 100644 index 00000000000000..378686284a8e08 --- /dev/null +++ b/book/src/.gitbook/assets/passive-staking-callflow (4).svg @@ -0,0 +1,238 @@ + + + + + +VoteSigner + + + + +Validator + + + + +Cluster + + + + +StakerX + + + + +StakerY + + + + + + + + + + + + + + + + + + +boot.. + + + + + + + + + + + + +register + + + + + + + +(optional) + + + + + + + + + + +VoteState::Initialize(VoteSigner) + + + + + + + + + + +StakeState::Delegate(Validator) + + + + + + + + + + +StakeState::Delegate(Validator) + + + + + + + + + + + + + + + + + + + + + + +validate + + + + + + + + + + + + + + +sign(vote) + + + + + + + + + + +signed vote + + + + + + + + + + +gossip(vote) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +max + + + +lockout + + + + + + + + + + + + + + + + + + + +StakeState::RedeemCredits() + + + + + + + + + + +StakeState::RedeemCredits() + + + + + + + diff --git a/book/src/.gitbook/assets/passive-staking-callflow (5).svg b/book/src/.gitbook/assets/passive-staking-callflow (5).svg new file mode 100644 index 00000000000000..378686284a8e08 --- /dev/null +++ b/book/src/.gitbook/assets/passive-staking-callflow (5).svg @@ -0,0 +1,238 @@ + + + + + +VoteSigner + + + + +Validator + + + + +Cluster + + + + +StakerX + + + + +StakerY + + + + + + + + + + + + + + + + + + +boot.. + + + + + + + + + + + + +register + + + + + + + +(optional) + + + + + + + + + + +VoteState::Initialize(VoteSigner) + + + + + + + + + + +StakeState::Delegate(Validator) + + + + + + + + + + +StakeState::Delegate(Validator) + + + + + + + + + + + + + + + + + + + + + + +validate + + + + + + + + + + + + + + +sign(vote) + + + + + + + + + + +signed vote + + + + + + + + + + +gossip(vote) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +max + + + +lockout + + + + + + + + + + + + + + + + + + + +StakeState::RedeemCredits() + + + + + + + + + + +StakeState::RedeemCredits() + + + + + + + diff --git a/book/src/.gitbook/assets/passive-staking-callflow (6).svg b/book/src/.gitbook/assets/passive-staking-callflow (6).svg new file mode 100644 index 00000000000000..378686284a8e08 --- /dev/null +++ b/book/src/.gitbook/assets/passive-staking-callflow (6).svg @@ -0,0 +1,238 @@ + + + + + +VoteSigner + + + + +Validator + + + + +Cluster + + + + +StakerX + + + + +StakerY + + + + + + + + + + + + + + + + + + +boot.. + + + + + + + + + + + + +register + + + + + + + +(optional) + + + + + + + + + + +VoteState::Initialize(VoteSigner) + + + + + + + + + + +StakeState::Delegate(Validator) + + + + + + + + + + +StakeState::Delegate(Validator) + + + + + + + + + + + + + + + + + + + + + + +validate + + + + + + + + + + + + + + +sign(vote) + + + + + + + + + + +signed vote + + + + + + + + + + +gossip(vote) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +max + + + +lockout + + + + + + + + + + + + + + + + + + + +StakeState::RedeemCredits() + + + + + + + + + + +StakeState::RedeemCredits() + + + + + + + diff --git a/book/src/.gitbook/assets/passive-staking-callflow (7).svg b/book/src/.gitbook/assets/passive-staking-callflow (7).svg new file mode 100644 index 00000000000000..378686284a8e08 --- /dev/null +++ b/book/src/.gitbook/assets/passive-staking-callflow (7).svg @@ -0,0 +1,238 @@ + + + + + +VoteSigner + + + + +Validator + + + + +Cluster + + + + +StakerX + + + + +StakerY + + + + + + + + + + + + + + + + + + +boot.. + + + + + + + + + + + + +register + + + + + + + +(optional) + + + + + + + + + + +VoteState::Initialize(VoteSigner) + + + + + + + + + + +StakeState::Delegate(Validator) + + + + + + + + + + +StakeState::Delegate(Validator) + + + + + + + + + + + + + + + + + + + + + + +validate + + + + + + + + + + + + + + +sign(vote) + + + + + + + + + + +signed vote + + + + + + + + + + +gossip(vote) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +max + + + +lockout + + + + + + + + + + + + + + + + + + + +StakeState::RedeemCredits() + + + + + + + + + + +StakeState::RedeemCredits() + + + + + + + diff --git a/book/src/.gitbook/assets/passive-staking-callflow-3 (1).svg b/book/src/.gitbook/assets/passive-staking-callflow-3 (1).svg new file mode 100644 index 00000000000000..378686284a8e08 --- /dev/null +++ b/book/src/.gitbook/assets/passive-staking-callflow-3 (1).svg @@ -0,0 +1,238 @@ + + + + + +VoteSigner + + + + +Validator + + + + +Cluster + + + + +StakerX + + + + +StakerY + + + + + + + + + + + + + + + + + + +boot.. + + + + + + + + + + + + +register + + + + + + + +(optional) + + + + + + + + + + +VoteState::Initialize(VoteSigner) + + + + + + + + + + +StakeState::Delegate(Validator) + + + + + + + + + + +StakeState::Delegate(Validator) + + + + + + + + + + + + + + + + + + + + + + +validate + + + + + + + + + + + + + + +sign(vote) + + + + + + + + + + +signed vote + + + + + + + + + + +gossip(vote) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +max + + + +lockout + + + + + + + + + + + + + + + + + + + +StakeState::RedeemCredits() + + + + + + + + + + +StakeState::RedeemCredits() + + + + + + + diff --git a/book/src/.gitbook/assets/passive-staking-callflow-3.svg b/book/src/.gitbook/assets/passive-staking-callflow-3.svg new file mode 100644 index 00000000000000..378686284a8e08 --- /dev/null +++ b/book/src/.gitbook/assets/passive-staking-callflow-3.svg @@ -0,0 +1,238 @@ + + + + + +VoteSigner + + + + +Validator + + + + +Cluster + + + + +StakerX + + + + +StakerY + + + + + + + + + + + + + + + + + + +boot.. + + + + + + + + + + + + +register + + + + + + + +(optional) + + + + + + + + + + +VoteState::Initialize(VoteSigner) + + + + + + + + + + +StakeState::Delegate(Validator) + + + + + + + + + + +StakeState::Delegate(Validator) + + + + + + + + + + + + + + + + + + + + + + +validate + + + + + + + + + + + + + + +sign(vote) + + + + + + + + + + +signed vote + + + + + + + + + + +gossip(vote) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +max + + + +lockout + + + + + + + + + + + + + + + + + + + +StakeState::RedeemCredits() + + + + + + + + + + +StakeState::RedeemCredits() + + + + + + + diff --git a/book/src/.gitbook/assets/runtime (4).svg b/book/src/.gitbook/assets/runtime (4).svg new file mode 100644 index 00000000000000..c2be7907034a98 --- /dev/null +++ b/book/src/.gitbook/assets/runtime (4).svg @@ -0,0 +1,364 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +PoH + + + + +verify + + + + +TVU + + + + +load + + + + +accounts + + + + +sigverify + + + + +execute + + + + +lock + + + + +PoH + + + + +accounts + + + + +TPU + + + + +record + + + + +validate + + + + +commit + + + + +accounts + + + + +fee + + + + +allocate + + + + +unlock + + + + +new + + + + +accounts + + + + +accounts + + + diff --git a/book/src/.gitbook/assets/runtime (5).svg b/book/src/.gitbook/assets/runtime (5).svg new file mode 100644 index 00000000000000..0a9b8289b2bae8 --- /dev/null +++ b/book/src/.gitbook/assets/runtime (5).svg @@ -0,0 +1,346 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +PoH + + + + +verify + + + + +TVU + + + + +load + + + + +accounts + + + + +sigverify + + + + +execute + + + + +lock + + + + +PoH + + + + +accounts + + + + +TPU + + + + +record + + + + +validate + + + + +commit + + + + +accounts + + + + +fee + + + + +allocate + + + + +unlock + + + + +new + + + + +accounts + + + + +accounts + + + diff --git a/book/src/.gitbook/assets/runtime.svg b/book/src/.gitbook/assets/runtime.svg index c2be7907034a98..0a9b8289b2bae8 100644 --- a/book/src/.gitbook/assets/runtime.svg +++ b/book/src/.gitbook/assets/runtime.svg @@ -1,108 +1,90 @@ - + - + - + - + - + - + - + - - + + - + - + - - + + - + - + @@ -111,16 +93,16 @@ tspan.head{ - - + + - + - + @@ -129,16 +111,16 @@ tspan.head{ - - + + - + - + @@ -152,29 +134,29 @@ tspan.head{ - - + + - + - + - - + + - + - + @@ -188,29 +170,29 @@ tspan.head{ - - + + - + - + - - + + - + - + @@ -224,29 +206,29 @@ tspan.head{ - - + + - + - + - - + + - + - + diff --git a/book/src/.gitbook/assets/sdk-tools (2).svg b/book/src/.gitbook/assets/sdk-tools (2).svg index 629a3feaa35012..8903c46700f6e7 100644 --- a/book/src/.gitbook/assets/sdk-tools (2).svg +++ b/book/src/.gitbook/assets/sdk-tools (2).svg @@ -1,74 +1,92 @@ - + - + - + - + - + - + - + - - + + - + - + @@ -76,16 +94,16 @@ - - + + - + - + @@ -95,55 +113,55 @@ - + - + - + - + - - + + - - + + - + - + - - + + - + - + @@ -157,29 +175,29 @@ - - + + - - + + - + - + - + - + diff --git a/book/src/.gitbook/assets/sdk-tools (4).svg b/book/src/.gitbook/assets/sdk-tools (4).svg new file mode 100644 index 00000000000000..629a3feaa35012 --- /dev/null +++ b/book/src/.gitbook/assets/sdk-tools (4).svg @@ -0,0 +1,237 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Client + + + + +Verifier + + + + +Loader + + + + +Solana + + + + +LoadAccounts + + + + +Runtime + + + + +Interpreter + + + + +Accounts + + + diff --git a/book/src/.gitbook/assets/sdk-tools (5).svg b/book/src/.gitbook/assets/sdk-tools (5).svg new file mode 100644 index 00000000000000..629a3feaa35012 --- /dev/null +++ b/book/src/.gitbook/assets/sdk-tools (5).svg @@ -0,0 +1,237 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Client + + + + +Verifier + + + + +Loader + + + + +Solana + + + + +LoadAccounts + + + + +Runtime + + + + +Interpreter + + + + +Accounts + + + diff --git a/book/src/.gitbook/assets/sdk-tools.svg b/book/src/.gitbook/assets/sdk-tools.svg index 8903c46700f6e7..629a3feaa35012 100644 --- a/book/src/.gitbook/assets/sdk-tools.svg +++ b/book/src/.gitbook/assets/sdk-tools.svg @@ -1,92 +1,74 @@ - + - + - + - + - + - + - + - - + + - + - + @@ -94,16 +76,16 @@ tspan.head{ - - + + - + - + @@ -113,55 +95,55 @@ tspan.head{ - + - + - + - + - - + + - - + + - + - + - - + + - + - + @@ -175,29 +157,29 @@ tspan.head{ - - + + - - + + - + - + - + - + diff --git a/book/src/.gitbook/assets/spv-bank-merkle (3).svg b/book/src/.gitbook/assets/spv-bank-merkle (3).svg index a07908d17e48cd..3962b1080700d0 100644 --- a/book/src/.gitbook/assets/spv-bank-merkle (3).svg +++ b/book/src/.gitbook/assets/spv-bank-merkle (3).svg @@ -1,62 +1,80 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/spv-bank-merkle (4).svg b/book/src/.gitbook/assets/spv-bank-merkle (4).svg new file mode 100644 index 00000000000000..a07908d17e48cd --- /dev/null +++ b/book/src/.gitbook/assets/spv-bank-merkle (4).svg @@ -0,0 +1,163 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Hash(Account1) + + + + +Bank-Diff-Merkle + + + + +Hash + + + + +Hash(Account2) + + + + +Previous + + + + +Bank-Merkle + + + + +Bank-Diff-Merkle + + + + +Block-Merkle + + + diff --git a/book/src/.gitbook/assets/spv-bank-merkle (5).svg b/book/src/.gitbook/assets/spv-bank-merkle (5).svg new file mode 100644 index 00000000000000..a07908d17e48cd --- /dev/null +++ b/book/src/.gitbook/assets/spv-bank-merkle (5).svg @@ -0,0 +1,163 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Hash(Account1) + + + + +Bank-Diff-Merkle + + + + +Hash + + + + +Hash(Account2) + + + + +Previous + + + + +Bank-Merkle + + + + +Bank-Diff-Merkle + + + + +Block-Merkle + + + diff --git a/book/src/.gitbook/assets/spv-bank-merkle-3.svg b/book/src/.gitbook/assets/spv-bank-merkle-3.svg new file mode 100644 index 00000000000000..a07908d17e48cd --- /dev/null +++ b/book/src/.gitbook/assets/spv-bank-merkle-3.svg @@ -0,0 +1,163 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Hash(Account1) + + + + +Bank-Diff-Merkle + + + + +Hash + + + + +Hash(Account2) + + + + +Previous + + + + +Bank-Merkle + + + + +Bank-Diff-Merkle + + + + +Block-Merkle + + + diff --git a/book/src/.gitbook/assets/spv-bank-merkle.svg b/book/src/.gitbook/assets/spv-bank-merkle.svg index 3962b1080700d0..a07908d17e48cd 100644 --- a/book/src/.gitbook/assets/spv-bank-merkle.svg +++ b/book/src/.gitbook/assets/spv-bank-merkle.svg @@ -1,80 +1,62 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/spv-block-merkle (2).svg b/book/src/.gitbook/assets/spv-block-merkle (2).svg index 18ea80cadd37fa..9d5805ca28ebcb 100644 --- a/book/src/.gitbook/assets/spv-block-merkle (2).svg +++ b/book/src/.gitbook/assets/spv-block-merkle (2).svg @@ -1,62 +1,80 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/spv-block-merkle (4).svg b/book/src/.gitbook/assets/spv-block-merkle (4).svg new file mode 100644 index 00000000000000..18ea80cadd37fa --- /dev/null +++ b/book/src/.gitbook/assets/spv-block-merkle (4).svg @@ -0,0 +1,203 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Hash(T1, + + + + +status) + + + + +Hash + + + + +Entry-Merkle + + + + +Hash(T2, + + + + +status) + + + + +Block-Merkle + + + + +Hash(T3, + + + + +Entry-Merkle + + + + +status) + + + + +Hash + + + + +0 + + + diff --git a/book/src/.gitbook/assets/spv-block-merkle (5).svg b/book/src/.gitbook/assets/spv-block-merkle (5).svg new file mode 100644 index 00000000000000..18ea80cadd37fa --- /dev/null +++ b/book/src/.gitbook/assets/spv-block-merkle (5).svg @@ -0,0 +1,203 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Hash(T1, + + + + +status) + + + + +Hash + + + + +Entry-Merkle + + + + +Hash(T2, + + + + +status) + + + + +Block-Merkle + + + + +Hash(T3, + + + + +Entry-Merkle + + + + +status) + + + + +Hash + + + + +0 + + + diff --git a/book/src/.gitbook/assets/spv-block-merkle.svg b/book/src/.gitbook/assets/spv-block-merkle.svg index 9d5805ca28ebcb..18ea80cadd37fa 100644 --- a/book/src/.gitbook/assets/spv-block-merkle.svg +++ b/book/src/.gitbook/assets/spv-block-merkle.svg @@ -1,80 +1,62 @@ - + - + - + - + - + - + - + diff --git a/book/src/.gitbook/assets/tpu (3).svg b/book/src/.gitbook/assets/tpu (3).svg new file mode 100644 index 00000000000000..1de96c7927d65b --- /dev/null +++ b/book/src/.gitbook/assets/tpu (3).svg @@ -0,0 +1,312 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Clients + + + + +TPU + + + + +Fetch + + + + +Stage + + + + +SigVerify + + + + +Stage + + + + +PoH + + + + +Banking + + + + +Stage + + + + +Service + + + + +Bank + + + + +Broadcast + + + + +Stage + + + + +Downstream + + + + +Validators + + + diff --git a/book/src/.gitbook/assets/tpu (4).svg b/book/src/.gitbook/assets/tpu (4).svg new file mode 100644 index 00000000000000..1de96c7927d65b --- /dev/null +++ b/book/src/.gitbook/assets/tpu (4).svg @@ -0,0 +1,312 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Clients + + + + +TPU + + + + +Fetch + + + + +Stage + + + + +SigVerify + + + + +Stage + + + + +PoH + + + + +Banking + + + + +Stage + + + + +Service + + + + +Bank + + + + +Broadcast + + + + +Stage + + + + +Downstream + + + + +Validators + + + diff --git a/book/src/.gitbook/assets/tpu.svg b/book/src/.gitbook/assets/tpu.svg index 633ea5eb9579c1..1de96c7927d65b 100644 --- a/book/src/.gitbook/assets/tpu.svg +++ b/book/src/.gitbook/assets/tpu.svg @@ -1,92 +1,74 @@ - + - + - + - + - + - + - + - - + + - + - + @@ -96,35 +78,35 @@ tspan.head{ - + - + - + - + - - + + - + - + @@ -134,16 +116,16 @@ tspan.head{ - - + + - + - + @@ -153,51 +135,51 @@ tspan.head{ - - + + - + - + - - + + - + - + - - + + - + - + @@ -213,16 +195,16 @@ tspan.head{ - - + + - + - + @@ -238,16 +220,16 @@ tspan.head{ - - + + - + - + diff --git a/book/src/.gitbook/assets/tvu.svg b/book/src/.gitbook/assets/tvu.svg index 0565e3f35177d2..de4c59c97ec2a9 100644 --- a/book/src/.gitbook/assets/tvu.svg +++ b/book/src/.gitbook/assets/tvu.svg @@ -1,92 +1,74 @@ - + - + - + - + - + - + - + - - + + - + - + @@ -95,52 +77,52 @@ tspan.head{ - + - + - + - + - - + + - + - + - - + + - + - + @@ -149,18 +131,18 @@ tspan.head{ - - + + - + - + @@ -172,46 +154,46 @@ tspan.head{ - - + + - - + + - + - + - + - + - - + + - + - + @@ -229,16 +211,16 @@ tspan.head{ - - + + - + - + diff --git a/book/src/.gitbook/assets/validator (1).svg b/book/src/.gitbook/assets/validator (1).svg index 11be7b6a71a216..aa8bb05d4b23a2 100644 --- a/book/src/.gitbook/assets/validator (1).svg +++ b/book/src/.gitbook/assets/validator (1).svg @@ -1,77 +1,95 @@ - + - + - + - + - + - + - + - - + + - + - - + + @@ -88,7 +106,7 @@ - + @@ -98,58 +116,58 @@ - + - + - + - - + + - + - + - - + + - - + + - + - + - + - + @@ -170,64 +188,64 @@ - - + + - - + + - + - + - + - + - - + + - + - + - - + + - + - + @@ -243,18 +261,18 @@ - - + + - + - + @@ -279,8 +297,8 @@ - - + + @@ -296,43 +314,43 @@ - + - + - + - + - - + + - + - + - + - + diff --git a/book/src/.gitbook/assets/validator (3).svg b/book/src/.gitbook/assets/validator (3).svg new file mode 100644 index 00000000000000..11be7b6a71a216 --- /dev/null +++ b/book/src/.gitbook/assets/validator (3).svg @@ -0,0 +1,456 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Client + + + + +Validator + + + + +Bank + + + + +Forks + + + + +JSON + + + + +TPU + + + + +RPC + + + + +Service + + + + +Gossip + + + + +Replay + + + + +Stage + + + + +Broadcast + + + + +Stage + + + + +Blocktree + + + + +Service + + + + +BlobFetch + + + + +Stage + + + + +Validators + + + + +Upstream + + + + +Validators + + + + +Downstream + + + + +Validators + + + diff --git a/book/src/.gitbook/assets/validator (4).svg b/book/src/.gitbook/assets/validator (4).svg new file mode 100644 index 00000000000000..11be7b6a71a216 --- /dev/null +++ b/book/src/.gitbook/assets/validator (4).svg @@ -0,0 +1,456 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Client + + + + +Validator + + + + +Bank + + + + +Forks + + + + +JSON + + + + +TPU + + + + +RPC + + + + +Service + + + + +Gossip + + + + +Replay + + + + +Stage + + + + +Broadcast + + + + +Stage + + + + +Blocktree + + + + +Service + + + + +BlobFetch + + + + +Stage + + + + +Validators + + + + +Upstream + + + + +Validators + + + + +Downstream + + + + +Validators + + + diff --git a/book/src/.gitbook/assets/validator-proposal (4).svg b/book/src/.gitbook/assets/validator-proposal (4).svg new file mode 100644 index 00000000000000..bf8410aba89f05 --- /dev/null +++ b/book/src/.gitbook/assets/validator-proposal (4).svg @@ -0,0 +1,496 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Client + + + + +Fetch + + + + +Stage + + + + +Validator + + + + +TPU + + + + +Fetch + + + + +Stage + + + + +TPU + + + + +SigVerify + + + + +Stage + + + + +Upstream + + + + +Validators + + + + +Repair + + + + +Stage + + + + +Blockstore + + + + +Multicast + + + + +Stage + + + + +Downstream + + + + +Validators + + + + +PoH + + + + +Service + + + + +Banking + + + + +Stage + + + + +Banktree + + + + +Blockstore + + + diff --git a/book/src/.gitbook/assets/validator-proposal (5).svg b/book/src/.gitbook/assets/validator-proposal (5).svg new file mode 100644 index 00000000000000..bcd1c854cfdb7e --- /dev/null +++ b/book/src/.gitbook/assets/validator-proposal (5).svg @@ -0,0 +1,514 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Client + + + + +Fetch + + + + +Stage + + + + +Validator + + + + +TPU + + + + +Fetch + + + + +Stage + + + + +TPU + + + + +SigVerify + + + + +Stage + + + + +Upstream + + + + +Validators + + + + +Repair + + + + +Stage + + + + +Blockstore + + + + +Multicast + + + + +Stage + + + + +Downstream + + + + +Validators + + + + +PoH + + + + +Service + + + + +Banking + + + + +Stage + + + + +Banktree + + + + +Blockstore + + + diff --git a/book/src/.gitbook/assets/validator-proposal-1.svg b/book/src/.gitbook/assets/validator-proposal-1.svg new file mode 100644 index 00000000000000..bf8410aba89f05 --- /dev/null +++ b/book/src/.gitbook/assets/validator-proposal-1.svg @@ -0,0 +1,496 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Client + + + + +Fetch + + + + +Stage + + + + +Validator + + + + +TPU + + + + +Fetch + + + + +Stage + + + + +TPU + + + + +SigVerify + + + + +Stage + + + + +Upstream + + + + +Validators + + + + +Repair + + + + +Stage + + + + +Blockstore + + + + +Multicast + + + + +Stage + + + + +Downstream + + + + +Validators + + + + +PoH + + + + +Service + + + + +Banking + + + + +Stage + + + + +Banktree + + + + +Blockstore + + + diff --git a/book/src/.gitbook/assets/validator-proposal.svg b/book/src/.gitbook/assets/validator-proposal.svg index bcd1c854cfdb7e..bf8410aba89f05 100644 --- a/book/src/.gitbook/assets/validator-proposal.svg +++ b/book/src/.gitbook/assets/validator-proposal.svg @@ -1,105 +1,87 @@ - + - + - + - + - + - + - + - - + + - + - + - - + + - + - + @@ -113,76 +95,76 @@ tspan.head{ - + - + - + - + - + - + - + - + - - + + - - + + - - + + - + - + - + - + - + - + @@ -201,121 +183,121 @@ tspan.head{ - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -372,16 +354,16 @@ tspan.head{ - - + + - + - + diff --git a/book/src/.gitbook/assets/validator.svg b/book/src/.gitbook/assets/validator.svg index aa8bb05d4b23a2..11be7b6a71a216 100644 --- a/book/src/.gitbook/assets/validator.svg +++ b/book/src/.gitbook/assets/validator.svg @@ -1,95 +1,77 @@ - + - + - + - + - + - + - + - - + + - + - - + + @@ -106,7 +88,7 @@ tspan.head{ - + @@ -116,58 +98,58 @@ tspan.head{ - + - + - + - - + + - + - + - - + + - - + + - + - + - + - + @@ -188,64 +170,64 @@ tspan.head{ - - + + - - + + - + - + - + - + - - + + - + - + - - + + - + - + @@ -261,18 +243,18 @@ tspan.head{ - - + + - + - + @@ -297,8 +279,8 @@ tspan.head{ - - + + @@ -314,43 +296,43 @@ tspan.head{ - + - + - + - + - - + + - + - + - + - + diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 27afeaa645499c..0ab125624b5e7b 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -54,6 +54,7 @@ * [Rent](proposals/rent.md) * [Inter-chain Transaction Verification](proposals/interchain-transaction-verification.md) * [Snapshot Verification](proposals/snapshot-verification.md) + * [Bankless Leader](proposals/bankless-leader.md) * [Implemented Design Proposals](implemented-proposals/README.md) * [Blocktree](implemented-proposals/blocktree.md) * [Cluster Software Installation and Updates](implemented-proposals/installer.md) diff --git a/book/src/api-reference/blockstreamer.md b/book/src/api-reference/blockstreamer.md index 146073efef2ab6..81722e18e9c10c 100644 --- a/book/src/api-reference/blockstreamer.md +++ b/book/src/api-reference/blockstreamer.md @@ -1,6 +1,6 @@ # Blockstreamer -Solana supports a node type called an _blockstreamer_. This fullnode variation is intended for applications that need to observe the data plane without participating in transaction validation or ledger replication. +Solana supports a node type called an _blockstreamer_. This validator variation is intended for applications that need to observe the data plane without participating in transaction validation or ledger replication. A blockstreamer runs without a vote signer, and can optionally stream ledger entries out to a Unix domain socket as they are processed. The JSON-RPC service still functions as on any other node. diff --git a/book/src/api-reference/cli.md b/book/src/api-reference/cli.md index d1509c2bae323a..6a5abbe9c4acd2 100644 --- a/book/src/api-reference/cli.md +++ b/book/src/api-reference/cli.md @@ -188,48 +188,50 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json SUBCOMMANDS: - address Get your public key - airdrop Request lamports - balance Get your balance - cancel Cancel a transfer - claim-storage-reward Redeem storage reward credits - cluster-version Get the version of the cluster entrypoint - confirm Confirm transaction by signature - create-replicator-storage-account Create a replicator storage account - create-stake-account Create a stake account - create-storage-mining-pool-account Create mining pool account - create-validator-storage-account Create a validator storage account - create-vote-account Create a vote account - deactivate-stake Deactivate the delegated stake from the stake account - delegate-stake Delegate stake to a vote account - deploy Deploy a program - fees Display current cluster fees - get Get wallet config settings - get-slot Get current slot - get-transaction-count Get current transaction count - help Prints this message or the help of the given subcommand(s) - pay Send a payment - ping Submit transactions sequentially - redeem-vote-credits Redeem credits in the stake account - send-signature Send a signature to authorize a transfer - send-timestamp Send a timestamp to unlock a transfer - set Set a wallet config setting - show-account Show the contents of an account - show-stake-account Show the contents of a stake account - show-storage-account Show the contents of a storage account - show-vote-account Show the contents of a vote account - stake-authorize-staker Authorize a new stake signing keypair for the given stake account - stake-authorize-withdrawer Authorize a new withdraw signing keypair for the given stake account - uptime Show the uptime of a validator, based on epoch voting history - validator-info Publish/get Validator info on Solana - vote-authorize-voter Authorize a new vote signing keypair for the given vote account - vote-authorize-withdrawer Authorize a new withdraw signing keypair for the given vote account - withdraw-stake Withdraw the unstaked lamports from the stake account + address Get your public key + airdrop Request lamports + balance Get your balance + cancel Cancel a transfer + claim-storage-reward Redeem storage reward credits + cluster-version Get the version of the cluster entrypoint + confirm Confirm transaction by signature + create-replicator-storage-account Create a replicator storage account + create-stake-account Create a stake account + create-validator-storage-account Create a validator storage account + create-vote-account Create a vote account + deactivate-stake Deactivate the delegated stake from the stake account + delegate-stake Delegate stake to a vote account + deploy Deploy a program + fees Display current cluster fees + get Get cli config settings + get-epoch-info Get information about the current epoch + get-genesis-blockhash Get the genesis blockhash + get-slot Get current slot + get-transaction-count Get current transaction count + help Prints this message or the help of the given subcommand(s) + pay Send a payment + ping Submit transactions sequentially + redeem-vote-credits Redeem credits in the stake account + send-signature Send a signature to authorize a transfer + send-timestamp Send a timestamp to unlock a transfer + set Set a cli config setting + show-account Show the contents of an account + show-stake-account Show the contents of a stake account + show-storage-account Show the contents of a storage account + show-validators Show information about the current validators + show-vote-account Show the contents of a vote account + stake-authorize-staker Authorize a new stake signing keypair for the given stake account + stake-authorize-withdrawer Authorize a new withdraw signing keypair for the given stake account + uptime Show the uptime of a validator, based on epoch voting history + validator-info Publish/get Validator info on Solana + vote-authorize-voter Authorize a new vote signing keypair for the given vote account + vote-authorize-withdrawer Authorize a new withdraw signing keypair for the given vote account + withdraw-stake Withdraw the unstaked lamports from the stake account ``` #### solana-address @@ -245,7 +247,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json ``` @@ -263,7 +265,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] --drone-host Drone host to use [default: the --url host] --drone-port Drone port to use [default: 9900] -u, --url JSON RPC URL for the solana cluster @@ -288,7 +290,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -309,7 +311,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -330,7 +332,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -352,7 +354,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json ``` @@ -370,7 +372,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -391,7 +393,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -413,10 +415,10 @@ FLAGS: -V, --version Prints version information OPTIONS: - --authorized-staker Public key of authorized staker (defaults to wallet) - --authorized-withdrawer Public key of the authorized withdrawer (defaults to wallet) + --authorized-staker Public key of authorized staker (defaults to cli config pubkey) + --authorized-withdrawer Public key of the authorized withdrawer (defaults to cli config pubkey) -C, --config Configuration file to use [default: - ~/.config/solana/wallet/config.yml] + ~/.config/solana/cli/config.yml] --custodian Identity of the custodian (can withdraw before lockup expires) -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -428,29 +430,6 @@ ARGS: Specify unit to use for request [possible values: SOL, lamports] ``` -#### solana-create-storage-mining-pool-account -```text -solana-create-storage-mining-pool-account -Create mining pool account - -USAGE: - solana create-storage-mining-pool-account [OPTIONS] [UNIT] - -FLAGS: - -h, --help Prints help information - -V, --version Prints version information - -OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] - -u, --url JSON RPC URL for the solana cluster - -k, --keypair /path/to/id.json - -ARGS: - Storage mining pool account address to fund - The amount to assign to the storage mining pool account (default unit SOL) - Specify unit to use for request [possible values: SOL, lamports] -``` - #### solana-create-validator-storage-account ```text solana-create-validator-storage-account @@ -464,7 +443,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -479,7 +458,7 @@ solana-create-vote-account Create a vote account USAGE: - solana create-vote-account [OPTIONS] [UNIT] + solana create-vote-account [OPTIONS] FLAGS: -h, --help Prints help information @@ -487,18 +466,16 @@ FLAGS: OPTIONS: --authorized-voter Public key of the authorized voter (defaults to vote account) - --authorized-withdrawer Public key of the authorized withdrawer (defaults to wallet) + --authorized-withdrawer Public key of the authorized withdrawer (defaults to cli config pubkey) --commission The commission taken on reward redemption (0-255), default: 0 -C, --config Configuration file to use [default: - ~/.config/solana/wallet/config.yml] + ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json ARGS: Vote account address to fund Validator that will vote with this account - The amount of send to the vote account (default unit SOL) - Specify unit to use for request [possible values: SOL, lamports] ``` #### solana-deactivate-stake @@ -507,20 +484,19 @@ solana-deactivate-stake Deactivate the delegated stake from the stake account USAGE: - solana deactivate-stake [OPTIONS] + solana deactivate-stake [OPTIONS] FLAGS: -h, --help Prints help information -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json ARGS: Stake account to be deactivated. - The vote account to which the stake is currently delegated ``` #### solana-delegate-stake @@ -536,7 +512,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -558,7 +534,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -579,7 +555,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json ``` @@ -587,7 +563,7 @@ OPTIONS: #### solana-get ```text solana-get -Get wallet config settings +Get cli config settings USAGE: solana get [OPTIONS] [CONFIG_FIELD] @@ -597,7 +573,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -605,6 +581,42 @@ ARGS: Return a specific config setting [possible values: url, keypair] ``` +#### solana-get-epoch-info +```text +solana-get-epoch-info +Get information about the current epoch + +USAGE: + solana get-epoch-info [OPTIONS] + +FLAGS: + -h, --help Prints help information + -V, --version Prints version information + +OPTIONS: + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] + -u, --url JSON RPC URL for the solana cluster + -k, --keypair /path/to/id.json +``` + +#### solana-get-genesis-blockhash +```text +solana-get-genesis-blockhash +Get the genesis blockhash + +USAGE: + solana get-genesis-blockhash [OPTIONS] + +FLAGS: + -h, --help Prints help information + -V, --version Prints version information + +OPTIONS: + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] + -u, --url JSON RPC URL for the solana cluster + -k, --keypair /path/to/id.json +``` + #### solana-get-slot ```text solana-get-slot @@ -618,7 +630,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json ``` @@ -636,7 +648,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json ``` @@ -668,7 +680,7 @@ FLAGS: OPTIONS: -C, --config Configuration file to use [default: - ~/.config/solana/wallet/config.yml] + ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json --after A timestamp after which transaction will execute @@ -694,7 +706,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -c, --count Stop after submitting count transactions -i, --interval Wait interval seconds between submitting the next transaction [default: 2] -u, --url JSON RPC URL for the solana cluster @@ -715,7 +727,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -737,7 +749,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -759,7 +771,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] --date Optional arbitrary timestamp to apply -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -772,7 +784,7 @@ ARGS: #### solana-set ```text solana-set -Set a wallet config setting +Set a cli config setting USAGE: solana set [OPTIONS] <--url |--keypair > @@ -782,7 +794,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json ``` @@ -801,7 +813,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json -o, --output Write the account data to this file @@ -824,7 +836,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -845,7 +857,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -853,6 +865,25 @@ ARGS: Storage account pubkey ``` +#### solana-show-validators +```text +solana-show-validators +Show information about the current validators + +USAGE: + solana show-validators [FLAGS] [OPTIONS] + +FLAGS: + -h, --help Prints help information + --lamports Display balance in lamports instead of SOL + -V, --version Prints version information + +OPTIONS: + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] + -u, --url JSON RPC URL for the solana cluster + -k, --keypair /path/to/id.json +``` + #### solana-show-vote-account ```text solana-show-vote-account @@ -867,7 +898,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -888,7 +919,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -910,7 +941,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -933,7 +964,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json --span Number of recent epochs to examine @@ -955,7 +986,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -978,7 +1009,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -1000,7 +1031,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json @@ -1022,7 +1053,7 @@ FLAGS: -V, --version Prints version information OPTIONS: - -C, --config Configuration file to use [default: ~/.config/solana/wallet/config.yml] + -C, --config Configuration file to use [default: ~/.config/solana/cli/config.yml] -u, --url JSON RPC URL for the solana cluster -k, --keypair /path/to/id.json diff --git a/book/src/api-reference/jsonrpc-api.md b/book/src/api-reference/jsonrpc-api.md index 932c6c6d4c61b5..447ac76bcf4e28 100644 --- a/book/src/api-reference/jsonrpc-api.md +++ b/book/src/api-reference/jsonrpc-api.md @@ -17,6 +17,7 @@ To interact with a Solana node inside a JavaScript application, use the [solana- * [confirmTransaction](jsonrpc-api.md#confirmtransaction) * [getAccountInfo](jsonrpc-api.md#getaccountinfo) * [getBalance](jsonrpc-api.md#getbalance) +* [getBlockConfidence](jsonrpc-api.md#getblockconfidence) * [getClusterNodes](jsonrpc-api.md#getclusternodes) * [getEpochInfo](jsonrpc-api.md#getepochinfo) * [getGenesisBlockhash](jsonrpc-api.md#getgenesisblockhash) @@ -149,6 +150,34 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, " {"jsonrpc":"2.0","result":0,"id":1} ``` +### getBlockConfidence + +Returns confidence for particular block + +#### Parameters: + +* `u64` - block, identified by Slot + +#### Results: + +The result field will be an array with two fields: + +* Confidence + * `null` - Unknown block + * `object` - BankConfidence + * `array` - confidence, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY` +* 'integer' - total active stake, in lamports, of the current epoch + +#### Example: + +```bash +// Request +curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getBlockConfidence","params":[5]}' http://localhost:8899 + +// Result +{"jsonrpc":"2.0","result":[{"confidence":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,32]},42],"id":1} +``` + ### getClusterNodes Returns information about all the nodes participating in the cluster @@ -808,4 +837,3 @@ Unsubscribe from signature confirmation notification // Result {"jsonrpc": "2.0","result": true,"id": 1} ``` - diff --git a/book/src/block-confirmation.md b/book/src/block-confirmation.md index dd29c9e3062ab6..e48d3f91e1bdd4 100644 --- a/book/src/block-confirmation.md +++ b/book/src/block-confirmation.md @@ -17,7 +17,7 @@ height of the block it is voting on. The account stores the 32 highest heights. * Only the validator knows how to find its own votes directly. Other components, such as the one that calculates confirmation time, needs to - be baked into the fullnode code. The fullnode code queries the bank for all + be baked into the validator code. The validator code queries the bank for all accounts owned by the vote program. * Voting ballots do not contain a PoH hash. The validator is only voting that diff --git a/book/src/cluster/README.md b/book/src/cluster/README.md index 7db2b7cfc50327..0d8ac9b1c5970a 100644 --- a/book/src/cluster/README.md +++ b/book/src/cluster/README.md @@ -1,10 +1,10 @@ # A Solana Cluster -A Solana cluster is a set of fullnodes working together to serve client transactions and maintain the integrity of the ledger. Many clusters may coexist. When two clusters share a common genesis block, they attempt to converge. Otherwise, they simply ignore the existence of the other. Transactions sent to the wrong one are quietly rejected. In this chapter, we'll discuss how a cluster is created, how nodes join the cluster, how they share the ledger, how they ensure the ledger is replicated, and how they cope with buggy and malicious nodes. +A Solana cluster is a set of validators working together to serve client transactions and maintain the integrity of the ledger. Many clusters may coexist. When two clusters share a common genesis block, they attempt to converge. Otherwise, they simply ignore the existence of the other. Transactions sent to the wrong one are quietly rejected. In this chapter, we'll discuss how a cluster is created, how nodes join the cluster, how they share the ledger, how they ensure the ledger is replicated, and how they cope with buggy and malicious nodes. ## Creating a Cluster -Before starting any fullnodes, one first needs to create a _genesis block_. The block contains entries referencing two public keys, a _mint_ and a _bootstrap leader_. The fullnode holding the bootstrap leader's private key is responsible for appending the first entries to the ledger. It initializes its internal state with the mint's account. That account will hold the number of native tokens defined by the genesis block. The second fullnode then contacts the bootstrap leader to register as a _validator_ or _replicator_. Additional fullnodes then register with any registered member of the cluster. +Before starting any validators, one first needs to create a _genesis block_. The block contains entries referencing two public keys, a _mint_ and a _bootstrap leader_. The validator holding the bootstrap leader's private key is responsible for appending the first entries to the ledger. It initializes its internal state with the mint's account. That account will hold the number of native tokens defined by the genesis block. The second validator then contacts the bootstrap leader to register as a _validator_ or _replicator_. Additional validators then register with any registered member of the cluster. A validator receives all entries from the leader and submits votes confirming those entries are valid. After voting, the validator is expected to store those entries until replicator nodes submit proofs that they have stored copies of it. Once the validator observes a sufficient number of copies exist, it deletes its copy. @@ -14,7 +14,7 @@ Validators and replicators enter the cluster via registration messages sent to i ## Sending Transactions to a Cluster -Clients send transactions to any fullnode's Transaction Processing Unit \(TPU\) port. If the node is in the validator role, it forwards the transaction to the designated leader. If in the leader role, the node bundles incoming transactions, timestamps them creating an _entry_, and pushes them onto the cluster's _data plane_. Once on the data plane, the transactions are validated by validator nodes and replicated by replicator nodes, effectively appending them to the ledger. +Clients send transactions to any validator's Transaction Processing Unit \(TPU\) port. If the node is in the validator role, it forwards the transaction to the designated leader. If in the leader role, the node bundles incoming transactions, timestamps them creating an _entry_, and pushes them onto the cluster's _data plane_. Once on the data plane, the transactions are validated by validator nodes and replicated by replicator nodes, effectively appending them to the ledger. ## Confirming Transactions diff --git a/book/src/cluster/fork-generation.md b/book/src/cluster/fork-generation.md index d1397d9de2d4aa..4f3a37678d8e5b 100644 --- a/book/src/cluster/fork-generation.md +++ b/book/src/cluster/fork-generation.md @@ -58,7 +58,7 @@ Validators vote based on a greedy choice to maximize their reward described in [ The diagram below represents a validator's view of the PoH stream with possible forks over time. L1, L2, etc. are leader slots, and `E`s represent entries from that leader during that leader's slot. The `x`s represent ticks only, and time flows downwards in the diagram. -![Fork generation](../.gitbook/assets/fork-generation%20%283%29.svg) +![Fork generation](../.gitbook/assets/fork-generation-3.svg) Note that an `E` appearing on 2 forks at the same slot is a slashable condition, so a validator observing `E3` and `E3'` can slash L3 and safely choose `x` for that slot. Once a validator commits to a forks, other forks can be discarded below that tick count. For any slot, validators need only consider a single "has entries" chain or a "ticks only" chain to be proposed by a leader. But multiple virtual entries may overlap as they link back to the a previous slot. diff --git a/book/src/cluster/leader-rotation.md b/book/src/cluster/leader-rotation.md index 8a7f783b3b6af4..91f1d17a1394d2 100644 --- a/book/src/cluster/leader-rotation.md +++ b/book/src/cluster/leader-rotation.md @@ -1,6 +1,6 @@ # Leader Rotation -At any given moment, a cluster expects only one fullnode to produce ledger entries. By having only one leader at a time, all validators are able to replay identical copies of the ledger. The drawback of only one leader at a time, however, is that a malicious leader is capable of censoring votes and transactions. Since censoring cannot be distinguished from the network dropping packets, the cluster cannot simply elect a single node to hold the leader role indefinitely. Instead, the cluster minimizes the influence of a malicious leader by rotating which node takes the lead. +At any given moment, a cluster expects only one validator to produce ledger entries. By having only one leader at a time, all validators are able to replay identical copies of the ledger. The drawback of only one leader at a time, however, is that a malicious leader is capable of censoring votes and transactions. Since censoring cannot be distinguished from the network dropping packets, the cluster cannot simply elect a single node to hold the leader role indefinitely. Instead, the cluster minimizes the influence of a malicious leader by rotating which node takes the lead. Each validator selects the expected leader using the same algorithm, described below. When the validator receives a new signed ledger entry, it can be certain that entry was produced by the expected leader. The order of slots which each leader is assigned a slot is called a _leader schedule_. diff --git a/book/src/cluster/managing-forks.md b/book/src/cluster/managing-forks.md index 6a26b4b0cd4e38..255a542f36e7ac 100644 --- a/book/src/cluster/managing-forks.md +++ b/book/src/cluster/managing-forks.md @@ -1,14 +1,14 @@ # Managing Forks -The ledger is permitted to fork at slot boundaries. The resulting data structure forms a tree called a _blocktree_. When the fullnode interprets the blocktree, it must maintain state for each fork in the chain. We call each instance an _active fork_. It is the responsibility of a fullnode to weigh those forks, such that it may eventually select a fork. +The ledger is permitted to fork at slot boundaries. The resulting data structure forms a tree called a _blocktree_. When the validator interprets the blocktree, it must maintain state for each fork in the chain. We call each instance an _active fork_. It is the responsibility of a validator to weigh those forks, such that it may eventually select a fork. -A fullnode selects a fork by submiting a vote to a slot leader on that fork. The vote commits the fullnode for a duration of time called a _lockout period_. The fullnode is not permitted to vote on a different fork until that lockout period expires. Each subsequent vote on the same fork doubles the length of the lockout period. After some cluster-configured number of votes \(currently 32\), the length of the lockout period reaches what's called _max lockout_. Until the max lockout is reached, the fullnode has the option to wait until the lockout period is over and then vote on another fork. When it votes on another fork, it performs a operation called _rollback_, whereby the state rolls back in time to a shared checkpoint and then jumps forward to the tip of the fork that it just voted on. The maximum distance that a fork may roll back is called the _rollback depth_. Rollback depth is the number of votes required to achieve max lockout. Whenever a fullnode votes, any checkpoints beyond the rollback depth become unreachable. That is, there is no scenario in which the fullnode will need to roll back beyond rollback depth. It therefore may safely _prune_ unreachable forks and _squash_ all checkpoints beyond rollback depth into the root checkpoint. +A validator selects a fork by submiting a vote to a slot leader on that fork. The vote commits the validator for a duration of time called a _lockout period_. The validator is not permitted to vote on a different fork until that lockout period expires. Each subsequent vote on the same fork doubles the length of the lockout period. After some cluster-configured number of votes \(currently 32\), the length of the lockout period reaches what's called _max lockout_. Until the max lockout is reached, the validator has the option to wait until the lockout period is over and then vote on another fork. When it votes on another fork, it performs a operation called _rollback_, whereby the state rolls back in time to a shared checkpoint and then jumps forward to the tip of the fork that it just voted on. The maximum distance that a fork may roll back is called the _rollback depth_. Rollback depth is the number of votes required to achieve max lockout. Whenever a validator votes, any checkpoints beyond the rollback depth become unreachable. That is, there is no scenario in which the validator will need to roll back beyond rollback depth. It therefore may safely _prune_ unreachable forks and _squash_ all checkpoints beyond rollback depth into the root checkpoint. ## Active Forks An active fork is as a sequence of checkpoints that has a length at least one longer than the rollback depth. The shortest fork will have a length exactly one longer than the rollback depth. For example: -![Forks](../.gitbook/assets/forks.svg) +![Forks](../.gitbook/assets/forks%20%282%29.svg) The following sequences are _active forks_: @@ -19,17 +19,17 @@ The following sequences are _active forks_: ## Pruning and Squashing -A fullnode may vote on any checkpoint in the tree. In the diagram above, that's every node except the leaves of the tree. After voting, the fullnode prunes nodes that fork from a distance farther than the rollback depth and then takes the opportunity to minimize its memory usage by squashing any nodes it can into the root. +A validator may vote on any checkpoint in the tree. In the diagram above, that's every node except the leaves of the tree. After voting, the validator prunes nodes that fork from a distance farther than the rollback depth and then takes the opportunity to minimize its memory usage by squashing any nodes it can into the root. Starting from the example above, wth a rollback depth of 2, consider a vote on 5 versus a vote on 6. First, a vote on 5: -![Forks after pruning](../.gitbook/assets/forks-pruned%20%283%29.svg) +![Forks after pruning](../.gitbook/assets/forks-pruned-3.svg) The new root is 2, and any active forks that are not descendants from 2 are pruned. Alternatively, a vote on 6: -![Forks](../.gitbook/assets/forks-pruned2%20%281%29.svg) +![Forks](../.gitbook/assets/forks-pruned2-1.svg) The tree remains with a root of 1, since the active fork starting at 6 is only 2 checkpoints from the root. diff --git a/book/src/cluster/performance-metrics.md b/book/src/cluster/performance-metrics.md index 0e74ca37473cb3..83ec1d3a0fb832 100644 --- a/book/src/cluster/performance-metrics.md +++ b/book/src/cluster/performance-metrics.md @@ -21,3 +21,4 @@ The validator software is deployed to GCP n1-standard-16 instances with 1TB pd-s solana-bench-tps is started after the network converges from a client machine with n1-standard-16 CPU-only instance with the following arguments: `--tx\_count=50000 --thread-batch-sleep 1000` TPS and confirmation metrics are captured from the dashboard numbers over a 5 minute average of when the bench-tps transfer stage begins. + diff --git a/book/src/cluster/stake-delegation-and-rewards.md b/book/src/cluster/stake-delegation-and-rewards.md index d07b20549db91e..fb2e0f03cca4ea 100644 --- a/book/src/cluster/stake-delegation-and-rewards.md +++ b/book/src/cluster/stake-delegation-and-rewards.md @@ -29,11 +29,13 @@ VoteState is the current state of all the votes the validator has submitted to t * Account::lamports - The accumulated lamports from the commission. These do not count as stakes. * `authorized_voter` - Only this identity is authorized to submit votes. This field can only modified by this identity. * `node_pubkey` - The Solana node that votes in this account. -* `authorized_withdrawer` - the identity of the entity in charge of the lamports of this account, separate from the account's - address and the authorized vote signer - +* `authorized_withdrawer` - the identity of the entity in charge of the lamports of this account, separate from the account's -### VoteInstruction::Initialize(VoteInit) + ```text + address and the authorized vote signer + ``` + +### VoteInstruction::Initialize\(VoteInit\) * `account[0]` - RW - The VoteState @@ -43,8 +45,7 @@ VoteState is the current state of all the votes the validator has submitted to t ### VoteInstruction::Authorize\(Pubkey, VoteAuthorize\) - Updates the account with a new authorized voter or withdrawer, according to the VoteAuthorize parameter - (`Voter` or `Withdrawer`). The transaction must be by signed by the Vote account's current `authorized_voter` or `authorized_withdrawer`. +Updates the account with a new authorized voter or withdrawer, according to the VoteAuthorize parameter \(`Voter` or `Withdrawer`\). The transaction must be by signed by the Vote account's current `authorized_voter` or `authorized_withdrawer`. * `account[0]` - RW - The VoteState @@ -57,13 +58,11 @@ VoteState is the current state of all the votes the validator has submitted to t `VoteState::lockouts` and `VoteState::credits` are updated according to voting lockout rules see [Tower BFT](../implemented-proposals/tower-bft.md) * `account[1]` - RO - `sysvar::slot_hashes` A list of some N most recent slots and their hashes for the vote to be verified against. - * `account[2]` - RO - `sysvar::clock` The current network time, expressed in slots, epochs. ### StakeState -A StakeState takes one of four forms, StakeState::Uninitialized, StakeState::Initialized, StakeState::Stake, and StakeState::RewardsPool. Only the first three forms are used in staking, but only StakeState::Stake is interesting. -All RewardsPools are created at genesis. +A StakeState takes one of four forms, StakeState::Uninitialized, StakeState::Initialized, StakeState::Stake, and StakeState::RewardsPool. Only the first three forms are used in staking, but only StakeState::Stake is interesting. All RewardsPools are created at genesis. ### StakeState::Stake @@ -75,22 +74,27 @@ StakeState::Stake is the current delegation preference of the **staker** and con * `credits_observed` - The total credits claimed over the lifetime of the program. * `activated` - the epoch at which this stake was activated/delegated. The full stake will be counted after warm up. * `deactivated` - the epoch at which this stake was de-activated, some cool down epochs are required before the account - is fully deactivated, and the stake available for withdrawal + + ```text + is fully deactivated, and the stake available for withdrawal + ``` + * `authorized_staker` - the pubkey of the entity that must sign delegation, activation, and deactivation transactions -* `authorized_withdrawer` - the identity of the entity in charge of the lamports of this account, separate from the account's - address, and the authorized staker +* `authorized_withdrawer` - the identity of the entity in charge of the lamports of this account, separate from the account's + + ```text + address, and the authorized staker + ``` ### StakeState::RewardsPool -To avoid a single network wide lock or contention in redemption, 256 RewardsPools are part of genesis under pre-determined -keys, each with std::u64::MAX credits to be able to satisfy redemptions according to point value. +To avoid a single network wide lock or contention in redemption, 256 RewardsPools are part of genesis under pre-determined keys, each with std::u64::MAX credits to be able to satisfy redemptions according to point value. The Stakes and the RewardsPool are accounts that are owned by the same `Stake` program. ### StakeInstruction::DelegateStake -The Stake account is moved from Ininitialized to StakeState::Stake form. This is how stakers choose their initial delegate validator node and activate their stake account lamports. If the stake account is already StakeState::Stake (i.e. already activated), the -stake is re-delegated The transaction must be signed by the stake's `authorized_staker`. +The Stake account is moved from Ininitialized to StakeState::Stake form. This is how stakers choose their initial delegate validator node and activate their stake account lamports. If the stake account is already StakeState::Stake \(i.e. already activated\), the stake is re-delegated The transaction must be signed by the stake's `authorized_staker`. * `account[0]` - RW - The StakeState::Stake instance. `StakeState::Stake::credits_observed` is initialized to `VoteState::credits`, `StakeState::Stake::voter_pubkey` is initialized to `account[1]`. If this is the initial delegation of stake, `StakeState::Stake::stake` is initialized to the account's balance in lamports, `StakeState::Stake::activated` is initialized to the current Bank epoch, and `StakeState::Stake::deactivated` is initialized to std::u64::MAX * `account[1]` - R - The VoteState instance. @@ -99,9 +103,7 @@ stake is re-delegated The transaction must be signed by the stake's `authorized ### StakeInstruction::Authorize\(Pubkey, StakeAuthorize\) -Updates the account with a new authorized staker or withdrawer, according to - the StakeAuthorize parameter (`Staker` or `Withdrawer`). The transaction must be by signed by the - Stakee account's current `authorized_staker` or `authorized_withdrawer`. +Updates the account with a new authorized staker or withdrawer, according to the StakeAuthorize parameter \(`Staker` or `Withdrawer`\). The transaction must be by signed by the Stakee account's current `authorized_staker` or `authorized_withdrawer`. * `account[0]` - RW - The StakeState @@ -119,8 +121,7 @@ The Vote account and the Stake account pair maintain a lifetime counter of total * `account[3]` - R - sysvar::rewards account from the Bank that carries point value. * `account[4]` - R - sysvar::stake\_history account from the Bank that carries stake warmup/cooldown history -Reward is paid out for the difference between `VoteState::credits` to `StakeState::Stake::credits_observed`, multiplied by `sysvar::rewards::Rewards::validator_point_value`. `StakeState::Stake::credits_observed` is updated to`VoteState::credits`. The commission is deposited into the Vote account token balance, and the reward is deposited to the Stake account token balance and -the stake account's `stake` is increased by the same amount (re-invested). +Reward is paid out for the difference between `VoteState::credits` to `StakeState::Stake::credits_observed`, multiplied by `sysvar::rewards::Rewards::validator_point_value`. `StakeState::Stake::credits_observed` is updated to`VoteState::credits`. The commission is deposited into the Vote account token balance, and the reward is deposited to the Stake account token balance and the stake account's `stake` is increased by the same amount \(re-invested\). ```text let credits_to_claim = vote_state.credits - stake_state.credits_observed; @@ -135,15 +136,13 @@ A staker may wish to withdraw from the network. To do so he must first deactivat The transaction must be signed by the stake's `authorized_staker`. * `account[0]` - RW - The StakeState::Stake instance that is deactivating. -* `account[1]` - R - The VoteState instance to which this stake is delegated, required in case of slashing -* `account[2]` - R - sysvar::clock account from the Bank that carries current epoch +* `account[1]` - R - sysvar::clock account from the Bank that carries current epoch StakeState::Stake::deactivated is set to the current epoch + cool down. The account's stake will ramp down to zero by that epoch, and Account::lamports will be available for withdrawal. ### StakeInstruction::Withdraw\(u64\) -Lamports build up over time in a Stake account and any excess over activated stake can be withdrawn. -The transaction must be signed by the stake's `authorized_withdrawer`. +Lamports build up over time in a Stake account and any excess over activated stake can be withdrawn. The transaction must be signed by the stake's `authorized_withdrawer`. * `account[0]` - RW - The StakeState::Stake from which to withdraw. * `account[1]` - RW - Account that should be credited with the withdrawn lamports. @@ -159,7 +158,7 @@ The transaction must be signed by the stake's `authorized_withdrawer`. ## Example Callflow -![Passive Staking Callflow](../.gitbook/assets/passive-staking-callflow%20%283%29.svg) +![Passive Staking Callflow](../.gitbook/assets/passive-staking-callflow-3.svg) ## Staking Rewards @@ -225,9 +224,9 @@ Were 2 stakes \(X and Y\) to activate at epoch N, they would be awarded a portio ### Withdrawal -Only lamports in excess of effective+activating stake may be withdrawn at any time. This means that during warmup, effectively no stake can be withdrawn. During cooldown, any tokens in excess of effective stake may be withdrawn \(activating == 0\). Because earned rewards are automatically added to stake, withdrawal is generally only possible after deactivation. +Only lamports in excess of effective+activating stake may be withdrawn at any time. This means that during warmup, effectively no stake can be withdrawn. During cooldown, any tokens in excess of effective stake may be withdrawn \(activating == 0\). Because earned rewards are automatically added to stake, withdrawal is generally only possible after deactivation. ### Lock-up -Stake accounts support the notion of lock-up, wherein the stake account balance is unavailable for withdrawal until a specified time. Lock-up is specified as a slot height, i.e. the minimum slot height that must be reached by the network before the stake account balance is available for withdrawal, except to a specified custodian. This information is gathered when the stake account is created. +Stake accounts support the notion of lock-up, wherein the stake account balance is unavailable for withdrawal until a specified time. Lock-up is specified as a slot height, i.e. the minimum slot height that must be reached by the network before the stake account balance is available for withdrawal, except to a specified custodian. This information is gathered when the stake account is created. diff --git a/book/src/cluster/synchronization.md b/book/src/cluster/synchronization.md index 75a0a46d276e8e..6394de3fe68d66 100644 --- a/book/src/cluster/synchronization.md +++ b/book/src/cluster/synchronization.md @@ -1,10 +1,10 @@ # Synchronization -Fast, reliable synchronization is the biggest reason Solana is able to achieve such high throughput. Traditional blockchains synchronize on large chunks of transactions called blocks. By synchronizing on blocks, a transaction cannot be processed until a duration called "block time" has passed. In Proof of Work consensus, these block times need to be very large \(~10 minutes\) to minimize the odds of multiple fullnodes producing a new valid block at the same time. There's no such constraint in Proof of Stake consensus, but without reliable timestamps, a fullnode cannot determine the order of incoming blocks. The popular workaround is to tag each block with a [wallclock timestamp](https://en.bitcoin.it/wiki/Block_timestamp). Because of clock drift and variance in network latencies, the timestamp is only accurate within an hour or two. To workaround the workaround, these systems lengthen block times to provide reasonable certainty that the median timestamp on each block is always increasing. +Fast, reliable synchronization is the biggest reason Solana is able to achieve such high throughput. Traditional blockchains synchronize on large chunks of transactions called blocks. By synchronizing on blocks, a transaction cannot be processed until a duration called "block time" has passed. In Proof of Work consensus, these block times need to be very large \(~10 minutes\) to minimize the odds of multiple validators producing a new valid block at the same time. There's no such constraint in Proof of Stake consensus, but without reliable timestamps, a validator cannot determine the order of incoming blocks. The popular workaround is to tag each block with a [wallclock timestamp](https://en.bitcoin.it/wiki/Block_timestamp). Because of clock drift and variance in network latencies, the timestamp is only accurate within an hour or two. To workaround the workaround, these systems lengthen block times to provide reasonable certainty that the median timestamp on each block is always increasing. Solana takes a very different approach, which it calls _Proof of History_ or _PoH_. Leader nodes "timestamp" blocks with cryptographic proofs that some duration of time has passed since the last proof. All data hashed into the proof most certainly have occurred before the proof was generated. The node then shares the new block with validator nodes, which are able to verify those proofs. The blocks can arrive at validators in any order or even could be replayed years later. With such reliable synchronization guarantees, Solana is able to break blocks into smaller batches of transactions called _entries_. Entries are streamed to validators in realtime, before any notion of block consensus. -Solana technically never sends a _block_, but uses the term to describe the sequence of entries that fullnodes vote on to achieve _confirmation_. In that way, Solana's confirmation times can be compared apples to apples to block-based systems. The current implementation sets block time to 800ms. +Solana technically never sends a _block_, but uses the term to describe the sequence of entries that validators vote on to achieve _confirmation_. In that way, Solana's confirmation times can be compared apples to apples to block-based systems. The current implementation sets block time to 800ms. What's happening under the hood is that entries are streamed to validators as quickly as a leader node can batch a set of valid transactions into an entry. Validators process those entries long before it is time to vote on their validity. By processing the transactions optimistically, there is effectively no delay between the time the last entry is received and the time when the node can vote. In the event consensus is **not** achieved, a node simply rolls back its state. This optimisic processing technique was introduced in 1981 and called [Optimistic Concurrency Control](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.4735). It can be applied to blockchain architecture where a cluster votes on a hash that represents the full ledger up to some _block height_. In Solana, it is implemented trivially using the last entry's PoH hash. diff --git a/book/src/cluster/turbine-block-propagation.md b/book/src/cluster/turbine-block-propagation.md index 045f5d28af52ce..c792cdad345c7d 100644 --- a/book/src/cluster/turbine-block-propagation.md +++ b/book/src/cluster/turbine-block-propagation.md @@ -20,15 +20,15 @@ This way each node only has to communicate with a maximum of `2 * DATA_PLANE_FAN The following diagram shows how the Leader sends shreds with a Fanout of 2 to Neighborhood 0 in Layer 0 and how the nodes in Neighborhood 0 share their data with each other. -![Leader sends shreds to Neighborhood 0 in Layer 0](../.gitbook/assets/data-plane-seeding.svg) +![Leader sends shreds to Neighborhood 0 in Layer 0](../.gitbook/assets/data-plane-seeding%20%283%29.svg) The following diagram shows how Neighborhood 0 fans out to Neighborhoods 1 and 2. -![Neighborhood 0 Fanout to Neighborhood 1 and 2](../.gitbook/assets/data-plane-fanout%20%283%29.svg) +![Neighborhood 0 Fanout to Neighborhood 1 and 2](../.gitbook/assets/data-plane-fanout-3.svg) Finally, the following diagram shows a two layer cluster with a Fanout of 2. -![Two layer cluster with a Fanout of 2](../.gitbook/assets/data-plane%20%283%29.svg) +![Two layer cluster with a Fanout of 2](../.gitbook/assets/data-plane-3.svg) ### Configuration Values @@ -40,5 +40,5 @@ Currently, configuration is set when the cluster is launched. In the future, the The following diagram shows how two neighborhoods in different layers interact. To cripple a neighborhood, enough nodes \(erasure codes +1\) from the neighborhood above need to fail. Since each neighborhood receives shreds from multiple nodes in a neighborhood in the upper layer, we'd need a big network failure in the upper layers to end up with incomplete data. -![Inner workings of a neighborhood](../.gitbook/assets/data-plane-neighborhood%20%283%29.svg) +![Inner workings of a neighborhood](../.gitbook/assets/data-plane-neighborhood-3.svg) diff --git a/book/src/cluster/vote-signing.md b/book/src/cluster/vote-signing.md index 6cea8d36b47794..7d1a64a5cf7898 100644 --- a/book/src/cluster/vote-signing.md +++ b/book/src/cluster/vote-signing.md @@ -1,6 +1,6 @@ # Secure Vote Signing -A validator fullnode receives entries from the current leader and submits votes confirming those entries are valid. This vote submission presents a security challenge, because forged votes that violate consensus rules could be used to slash the validator's stake. +A validator receives entries from the current leader and submits votes confirming those entries are valid. This vote submission presents a security challenge, because forged votes that violate consensus rules could be used to slash the validator's stake. The validator votes on its chosen fork by submitting a transaction that uses an asymmetric key to sign the result of its validation work. Other entities can verify this signature using the validator's public key. If the validator's key is used to sign incorrect data \(e.g. votes on multiple forks of the ledger\), the node's stake or its resources could be compromised. diff --git a/book/src/getting-started/README.md b/book/src/getting-started/README.md index 59f0ec25ac590e..b5a5f79b85e4f2 100644 --- a/book/src/getting-started/README.md +++ b/book/src/getting-started/README.md @@ -41,7 +41,7 @@ $ ./multinode-demo/setup.sh ### Drone -In order for the fullnodes and clients to work, we'll need to spin up a drone to give out some test tokens. The drone delivers Milton Friedman-style "air drops" \(free tokens to requesting clients\) to be used in test transactions. +In order for the validators and clients to work, we'll need to spin up a drone to give out some test tokens. The drone delivers Milton Friedman-style "air drops" \(free tokens to requesting clients\) to be used in test transactions. Start the drone with: diff --git a/book/src/implemented-proposals/leader-validator-transition.md b/book/src/implemented-proposals/leader-validator-transition.md index 92e8ef59408535..c76291c6527a26 100644 --- a/book/src/implemented-proposals/leader-validator-transition.md +++ b/book/src/implemented-proposals/leader-validator-transition.md @@ -1,6 +1,6 @@ # Leader-to-Validator Transition -A fullnode typically operates as a validator. If, however, a staker delegates its stake to a fullnode, it will occasionally be selected as a _slot leader_. As a slot leader, the fullnode is responsible for producing blocks during an assigned _slot_. A slot has a duration of some number of preconfigured _ticks_. The duration of those ticks are estimated with a _PoH Recorder_ described later in this document. +A validator typically spends its time validating blocks. If, however, a staker delegates its stake to a validator, it will occasionally be selected as a _slot leader_. As a slot leader, the validator is responsible for producing blocks during an assigned _slot_. A slot has a duration of some number of preconfigured _ticks_. The duration of those ticks are estimated with a _PoH Recorder_ described later in this document. ## BankFork diff --git a/book/src/implemented-proposals/persistent-account-storage.md b/book/src/implemented-proposals/persistent-account-storage.md index c09ba7c016f1fb..0a9be436f63b58 100644 --- a/book/src/implemented-proposals/persistent-account-storage.md +++ b/book/src/implemented-proposals/persistent-account-storage.md @@ -2,7 +2,7 @@ ## Persistent Account Storage -The set of Accounts represent the current computed state of all the transactions that have been processed by a fullnode. Each fullnode needs to maintain this entire set. Each block that is proposed by the network represents a change to this set, and since each block is a potential rollback point the changes need to be reversible. +The set of Accounts represent the current computed state of all the transactions that have been processed by a validator. Each validator needs to maintain this entire set. Each block that is proposed by the network represents a change to this set, and since each block is a potential rollback point the changes need to be reversible. Persistent storage like NVMEs are 20 to 40 times cheaper than DDR. The problem with persistent storage is that write and read performance is much slower than DDR and care must be taken in how data is read or written to. Both reads and writes can be split between multiple storage drives and accessed in parallel. This design proposes a data structure that allows for concurrent reads and concurrent writes of storage. Writes are optimized by using an AppendVec data structure, which allows a single writer to append while allowing access to many concurrent readers. The accounts index maintains a pointer to a spot where the account was appended to every fork, thus removing the need for explicit checkpointing of state. diff --git a/book/src/introduction.md b/book/src/introduction.md index bff1efd8da2c35..732d9d77a8e905 100644 --- a/book/src/introduction.md +++ b/book/src/introduction.md @@ -32,7 +32,7 @@ In June of 2018, the team scaled up the technology to run on cloud-based network A cluster is a set of computers that work together and can be viewed from the outside as a single system. A Solana cluster is a set of independently owned computers working together \(and sometimes against each other\) to verify the output of untrusted, user-submitted programs. A Solana cluster can be utilized any time a user wants to preserve an immutable record of events in time or programmatic interpretations of those events. One use is to track which of the computers did meaningful work to keep the cluster running. Another use might be to track the possession of real-world assets. In each case, the cluster produces a record of events called the ledger. It will be preserved for the lifetime of the cluster. As long as someone somewhere in the world maintains a copy of the ledger, the output of its programs \(which may contain a record of who possesses what\) will forever be reproducible, independent of the organization that launched it. -## What are Sols? +## What are SOLs? -A sol is the name of Solana's native token, which can be passed to nodes in a Solana cluster in exchange for running an on-chain program or validating its output. The system may perform micropayments of fractional sols and a sol may be split as many as 34 times. The fractional sol is called a _lamport_. It is named in honor of Solana's biggest technical influence, [Leslie Lamport](https://en.wikipedia.org/wiki/Leslie_Lamport). A lamport has a value of approximately 0.0000000000582 sol \(2^-34\). +A SOL is the name of Solana's native token, which can be passed to nodes in a Solana cluster in exchange for running an on-chain program or validating its output. The system may perform micropayments of fractional SOLs and a SOL may be split as many as 34 times. The fractional sol is called a _lamport_. It is named in honor of Solana's biggest technical influence, [Leslie Lamport](https://en.wikipedia.org/wiki/Leslie_Lamport). A lamport has a value of approximately 0.0000000000582 sol \(2^-34\). diff --git a/book/src/programs/README.md b/book/src/programs/README.md index c3e51b29554d6b..5b20ee6efb2c97 100644 --- a/book/src/programs/README.md +++ b/book/src/programs/README.md @@ -12,7 +12,7 @@ A program may be written in any programming language that can target the Berkley ## Storing State between Transactions -If the program needs to store state between transactions, it does so using _accounts_. Accounts are similar to files in operating systems such as Linux. Like a file, an account may hold arbitrary data and that data persists beyond the lifetime of a program. Also like a file, an account includes metadata that tells the runtime who is allowed to access the data and how. Unlike a file, the account includes metadata for the lifetime of the file. That lifetime is expressed in "tokens", which is a number of fractional native tokens, called _lamports_. Accounts are held in validator memory and pay "rent" to stay there. Each fullnode periodically scan all accounts and collects rent. Any account that drops to zero lamports is purged. +If the program needs to store state between transactions, it does so using _accounts_. Accounts are similar to files in operating systems such as Linux. Like a file, an account may hold arbitrary data and that data persists beyond the lifetime of a program. Also like a file, an account includes metadata that tells the runtime who is allowed to access the data and how. Unlike a file, the account includes metadata for the lifetime of the file. That lifetime is expressed in "tokens", which is a number of fractional native tokens, called _lamports_. Accounts are held in validator memory and pay "rent" to stay there. Each validator periodically scan all accounts and collects rent. Any account that drops to zero lamports is purged. If an account is marked "executable", it will only be used by a _loader_ to run programs. For example, a BPF-compiled program is marked executable and loaded by the BPF loader. No program is allowed to modify the contents of an executable account. diff --git a/book/src/programs/drones.md b/book/src/programs/drones.md index 9ed5e97eb1ddcc..40a1fee3af1920 100644 --- a/book/src/programs/drones.md +++ b/book/src/programs/drones.md @@ -22,10 +22,10 @@ Creator of a new on-chain token \(ERC-20 interface\), may wish to do a worldwide The drone may prefer its airdrops only target a particular Solana cluster. To do that, it listens to the cluster for new entry IDs and ensure any requests reference a recent one. -Note: to listen for new entry IDs assumes the drone is either a fullnode or a _light_ client. At the time of this writing, light clients have not been implemented and no proposal describes them. This document assumes one of the following approaches be taken: +Note: to listen for new entry IDs assumes the drone is either a validator or a _light_ client. At the time of this writing, light clients have not been implemented and no proposal describes them. This document assumes one of the following approaches be taken: 1. Define and implement a light client -2. Embed a fullnode +2. Embed a validator 3. Query the jsonrpc API for the latest last id at a rate slightly faster than ticks are produced. diff --git a/book/src/proposals/bankless-leader.md b/book/src/proposals/bankless-leader.md new file mode 100644 index 00000000000000..53334a0fb7478b --- /dev/null +++ b/book/src/proposals/bankless-leader.md @@ -0,0 +1,56 @@ +# Bankless Leader + +A bankless leader does the minimum amount of work to produce a valid block. The leader is tasked with ingress transactions, sorting and filtering valid transactions, arranging them into entries, shredding the entries and broadcasting the shreds. While a validator only needs to reassemble the block and replay execution of well formed entries. The leader does 3x more memory operations before any bank execution than the validator per processed transaction. + +## Rationale + +Normal bank operation for a spend needs to do 2 loads and 2 stores. With this design leader just does 1 load. so 4x less account\_db work before generating the block. The store operations are likely to be more expensive than reads. + +When replay stage starts processing the same transactions, it can assume that PoH is valid, and that all the entries are safe for parallel execution. The fee accounts that have been loaded to produce the block are likely to still be in memory, so the additional load should be warm and the cost is likely to be amortized. + +## Fee Account + +The [fee account](https://github.com/solana-labs/solana/tree/b5f7a4bff9953415b1f3d385bd59bc65c1ec11a4/book/src/proposals/terminology.md#fee_account) pays for the transaction to be included in the block. The leader only needs to validate that the fee account has the balance to pay for the fee. + +## Balance Cache + +For the duration of the leaders consecutive blocks, the leader maintains a temporary balance cache for all the processed fee accounts. The cache is a map of pubkeys to lamports. + +At the start of the first block the balance cache is empty. At the end of the last block the cache is destroyed. + +The balance cache lookups must reference the same base fork for the entire duration of the cache. At the block boundary, the cache can be reset along with the base fork after replay stage finishes verifying the previous block. + +## Balance Check + +Prior to the balance check, the leader validates all the signatures in the transaction. + +1. Verify the accounts are not in use and BlockHash is valid. +2. Check if the fee account is present in the cache, or load the account from accounts\_db and store the lamport balance in the cache. +3. If the balance is less than the fee, drop the transaction. +4. Subtract the fee from the balance. +5. For all the keys in the transaction that are Credit-Debit and are referenced by an instruction, reduce their balance to 0 in the cache. The account fee is declared as Credit-Debit, but as long as it is not used in any instruction its balance will not be reduced to 0. + +## Leader Replay + +Leaders will need to replay their blocks as part of the standard replay stage operation. + +## Leader Replay With Consecutive Blocks + +A leader can be scheduled to produce multiple blocks in a row. In that scenario the leader is likely to be producing the next block while the replay stage for the first block is playing. + +When the leader finishes the replay stage it can reset the balance cache by clearing it, and set a new fork as the base for the cache which can become active on the next block. + +## Reseting the Balance Cache + +1. At the start of the block, if the balance cache is uninitialized, set the base fork for the balance cache to be the parent of the block and create an empty cache. +2. if the cache is initialized, check if block's parents has a new frozen bank that is newer than the current base fork for the balance cache. +3. if a parent newer than the cache's base fork exist, reset the cache to the parent. + +## Impact on Clients + +The same fee account can be reused many times in the same block until it is used once as Credit-Debit by an instruction. + +Clients that transmit a large number of transactions per second should use a dedicated fee account that is not used as Credit-Debit in any instruction. + +Once an account fee is used as Credit-Debit, it will fail the balance check until the balance cache is reset. + diff --git a/book/src/proposals/cluster-test-framework.md b/book/src/proposals/cluster-test-framework.md index 0422abe95989f0..82409f2c554e29 100644 --- a/book/src/proposals/cluster-test-framework.md +++ b/book/src/proposals/cluster-test-framework.md @@ -12,9 +12,9 @@ Tests should verify a single bug or scenario, and should be written with the lea Tests are provided an entry point, which is a `contact_info::ContactInfo` structure, and a keypair that has already been funded. -Each node in the cluster is configured with a `fullnode::ValidatorConfig` at boot time. At boot time this configuration specifies any extra cluster configuration required for the test. The cluster should boot with the configuration when it is run in-process or in a data center. +Each node in the cluster is configured with a `validator::ValidatorConfig` at boot time. At boot time this configuration specifies any extra cluster configuration required for the test. The cluster should boot with the configuration when it is run in-process or in a data center. -Once booted, the test will discover the cluster through a gossip entry point and configure any runtime behaviors via fullnode RPC. +Once booted, the test will discover the cluster through a gossip entry point and configure any runtime behaviors via validator RPC. ## Test Interface @@ -43,13 +43,13 @@ let cluster_nodes = discover_nodes(&entry_point_info, num_nodes); ## Cluster Configuration -To enable specific scenarios, the cluster needs to be booted with special configurations. These configurations can be captured in `fullnode::ValidatorConfig`. +To enable specific scenarios, the cluster needs to be booted with special configurations. These configurations can be captured in `validator::ValidatorConfig`. For example: ```text let mut validator_config = ValidatorConfig::default(); -validator_config.rpc_config.enable_fullnode_exit = true; +validator_config.rpc_config.enable_validator_exit = true; let local = LocalCluster::new_with_config( num_nodes, 10_000, @@ -81,7 +81,7 @@ pub fn test_large_invalid_gossip_nodes( let cluster = discover_nodes(&entry_point_info, num_nodes); // Poison the cluster. - let client = create_client(entry_point_info.client_facing_addr(), FULLNODE_PORT_RANGE); + let client = create_client(entry_point_info.client_facing_addr(), VALIDATOR_PORT_RANGE); for _ in 0..(num_nodes * 100) { client.gossip_push( cluster_info::invalid_contact_info() @@ -91,7 +91,7 @@ pub fn test_large_invalid_gossip_nodes( // Force refresh of the active set. for node in &cluster { - let client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE); + let client = create_client(node.client_facing_addr(), VALIDATOR_PORT_RANGE); client.gossip_refresh_active_set(); } diff --git a/book/src/proposals/simple-payment-and-state-verification.md b/book/src/proposals/simple-payment-and-state-verification.md index 6fe94c46540fa8..3bf09eb3f9e0b7 100644 --- a/book/src/proposals/simple-payment-and-state-verification.md +++ b/book/src/proposals/simple-payment-and-state-verification.md @@ -4,17 +4,17 @@ It is often useful to allow low resourced clients to participate in a Solana clu ## A Naive Approach -Validators store the signatures of recently confirmed transactions for a short period of time to ensure that they are not processed more than once. Validators provide a JSON RPC endpoint, which clients can use to query the cluster if a transaction has been recently processed. Validators also provide a PubSub notification, whereby a client registers to be notified when a given signature is observed by the validator. While these two mechanisms allow a client to verify a payment, they are not a proof and rely on completely trusting a fullnode. +Validators store the signatures of recently confirmed transactions for a short period of time to ensure that they are not processed more than once. Validators provide a JSON RPC endpoint, which clients can use to query the cluster if a transaction has been recently processed. Validators also provide a PubSub notification, whereby a client registers to be notified when a given signature is observed by the validator. While these two mechanisms allow a client to verify a payment, they are not a proof and rely on completely trusting a validator. -We will describe a way to minimize this trust using Merkle Proofs to anchor the fullnode's response in the ledger, allowing the client to confirm on their own that a sufficient number of their preferred validators have confirmed a transaction. Requiring multiple validator attestations further reduces trust in the fullnode, as it increases both the technical and economic difficulty of compromising several other network participants. +We will describe a way to minimize this trust using Merkle Proofs to anchor the validator's response in the ledger, allowing the client to confirm on their own that a sufficient number of their preferred validators have confirmed a transaction. Requiring multiple validator attestations further reduces trust in the validator, as it increases both the technical and economic difficulty of compromising several other network participants. ## Light Clients -A 'light client' is a cluster participant that does not itself run a fullnode. This light client would provide a level of security greater than trusting a remote fullnode, without requiring the light client to spend a lot of resources verifying the ledger. +A 'light client' is a cluster participant that does not itself run a validator. This light client would provide a level of security greater than trusting a remote validator, without requiring the light client to spend a lot of resources verifying the ledger. -Rather than providing transaction signatures directly to a light client, the fullnode instead generates a Merkle Proof from the transaction of interest to the root of a Merkle Tree of all transactions in the including block. This Merkle Root is stored in a ledger entry which is voted on by validators, providing it consensus legitimacy. The additional level of security for a light client depends on an initial canonical set of validators the light client considers to be the stakeholders of the cluster. As that set is changed, the client can update its internal set of known validators with [receipts](simple-payment-and-state-verification.md#receipts). This may become challenging with a large number of delegated stakes. +Rather than providing transaction signatures directly to a light client, the validator instead generates a Merkle Proof from the transaction of interest to the root of a Merkle Tree of all transactions in the including block. This Merkle Root is stored in a ledger entry which is voted on by validators, providing it consensus legitimacy. The additional level of security for a light client depends on an initial canonical set of validators the light client considers to be the stakeholders of the cluster. As that set is changed, the client can update its internal set of known validators with [receipts](simple-payment-and-state-verification.md#receipts). This may become challenging with a large number of delegated stakes. -Fullnodes themselves may want to use light client APIs for performance reasons. For example, during the initial launch of a fullnode, the fullnode may use a cluster provided checkpoint of the state and verify it with a receipt. +Validators themselves may want to use light client APIs for performance reasons. For example, during the initial launch of a validator, the validator may use a cluster provided checkpoint of the state and verify it with a receipt. ## Receipts @@ -30,7 +30,7 @@ A payment receipt is a data structure that contains a Merkle Path from a transac An Entry-Merkle is a Merkle Root including all transactions in the entry, sorted by signature. -![Block Merkle Diagram](../.gitbook/assets/spv-block-merkle.svg) +![Block Merkle Diagram](../.gitbook/assets/spv-block-merkle%20%282%29.svg) A Block-Merkle is a Merkle root of all the Entry-Merkles sequenced in the block. Transaction status is necessary for the receipt because the state receipt is constructed for the block. Two transactions over the same state can appear in the block, and therefore, there is no way to infer from just the state whether a transaction that is committed to the ledger has succeeded or failed in modifying the intended state. It may not be necessary to encode the full status code, but a single status bit to indicate the transaction's success. @@ -48,7 +48,7 @@ At the end of the block, A and B are in the exact same starting state, and any s The Bank-Merkle is computed from the Merkle Tree of the new state changes, along with the Previous Bank-Merkle, and the Block-Merkle. -![Bank Merkle Diagram](../.gitbook/assets/spv-bank-merkle%20%283%29.svg) +![Bank Merkle Diagram](../.gitbook/assets/spv-bank-merkle-3.svg) A state receipt contains only the state changes occurring in the block. A direct Merkle Path to the current Bank-Merkle guarantees the state value at that bank hash, but it cannot be used to generate a “current” receipt to the latest state if the state modification occurred in some previous block. There is no guarantee that the path provided by the validator is the latest one available out of all the previous Bank-Merkles. diff --git a/book/src/proposals/staking-rewards.md b/book/src/proposals/staking-rewards.md index 1e73bdaf71cf02..4ce69cce117637 100644 --- a/book/src/proposals/staking-rewards.md +++ b/book/src/proposals/staking-rewards.md @@ -20,7 +20,7 @@ While many of the details of the specific implementation are currently under con Solana's ledger validation design is based on a rotating, stake-weighted selected leader broadcasting transactions in a PoH data structure to validating nodes. These nodes, upon receiving the leader's broadcast, have the opportunity to vote on the current state and PoH height by signing a transaction into the PoH stream. -To become a Solana validator, a fullnode must deposit/lock-up some amount of SOL in a contract. This SOL will not be accessible for a specific time period. The precise duration of the staking lockup period has not been determined. However we can consider three phases of this time for which specific parameters will be necessary: +To become a Solana validator, one must deposit/lock-up some amount of SOL in a contract. This SOL will not be accessible for a specific time period. The precise duration of the staking lockup period has not been determined. However we can consider three phases of this time for which specific parameters will be necessary: * _Warm-up period_: which SOL is deposited and inaccessible to the node, diff --git a/book/src/proposals/validator-proposal.md b/book/src/proposals/validator-proposal.md index 4828e4ab5223e5..7a21f31bfc6a9a 100644 --- a/book/src/proposals/validator-proposal.md +++ b/book/src/proposals/validator-proposal.md @@ -12,7 +12,7 @@ The fundamental difference between the pipelines is when the PoH is present. In We unwrap the many abstraction layers and build a single pipeline that can toggle leader mode on whenever the validator's ID shows up in the leader schedule. -![Validator block diagram](../.gitbook/assets/validator-proposal%20%281%29.svg) +![Validator block diagram](../.gitbook/assets/validator-proposal-1.svg) ## Notable changes diff --git a/book/src/running-validator/validator-info.md b/book/src/running-validator/validator-info.md index 8666499025568f..eb7357e00425d0 100644 --- a/book/src/running-validator/validator-info.md +++ b/book/src/running-validator/validator-info.md @@ -7,7 +7,7 @@ You can publish your validator information to the chain to be publicly visible t Run the solana CLI to populate a validator info account: ```bash -$ solana validator-info publish --keypair ~/validator-keypair.json +$ solana validator-info publish --keypair ~/validator-keypair.json ``` For details about optional fields for VALIDATOR\_INFO\_ARGS: diff --git a/book/src/running-validator/validator-software.md b/book/src/running-validator/validator-software.md index 7525c7049b0010..466c3810befe88 100644 --- a/book/src/running-validator/validator-software.md +++ b/book/src/running-validator/validator-software.md @@ -48,3 +48,4 @@ If you are unable to use the prebuilt binaries or prefer to build it yourself fr $ ./scripts/cargo-install-all.sh . $ export PATH=$PWD/bin:$PATH ``` + diff --git a/book/src/running-validator/validator-stake.md b/book/src/running-validator/validator-stake.md index 487d6059498d88..b977270defb226 100644 --- a/book/src/running-validator/validator-stake.md +++ b/book/src/running-validator/validator-stake.md @@ -36,7 +36,7 @@ The rewards lamports earned are split between your stake account and the vote ac Stake can be deactivated by running: ```bash -$ solana deactivate-stake ~/validator-config/stake-keypair.json ~/validator-vote-keypair.json +$ solana deactivate-stake ~/validator-config/stake-keypair.json ``` The stake will cool down, deactivate over time. While cooling down, your stake will continue to earn rewards. Only after stake cooldown is it safe to turn off your validator or withdraw it from the network. Cooldown may take several epochs to complete, depending on active stake and the size of your stake. @@ -44,3 +44,4 @@ The stake will cool down, deactivate over time. While cooling down, your stake w Note that a stake account may only be used once, so after deactivation, use the cli's `withdraw-stake` command to recover the previously staked lamports. Be sure and redeem your credits before withdrawing all your lamports. Once the account is fully withdrawn, the account is destroyed. + diff --git a/book/src/running-validator/validator-testnet.md b/book/src/running-validator/validator-testnet.md index 91b02ecc39b520..b54ea464552c0f 100644 --- a/book/src/running-validator/validator-testnet.md +++ b/book/src/running-validator/validator-testnet.md @@ -51,7 +51,7 @@ The Solana CLI tool points at testnet.solana.com by default. Include a `--url` a $ solana --url http://beta.testnet.solana.com:8899 balance ``` -The solana cli includes `get` and `set` configuration commands to automatically set the `--url` argument for future wallet commands. For example: +The solana cli includes `get` and `set` configuration commands to automatically set the `--url` argument for future cli commands. For example: ```bash $ solana set --url http://beta.testnet.solana.com:8899 diff --git a/book/src/terminology.md b/book/src/terminology.md index 02e29739e772f3..d6cc7312c68fb3 100644 --- a/book/src/terminology.md +++ b/book/src/terminology.md @@ -10,6 +10,10 @@ A persistent file addressed by [public key](terminology.md#public-key) and with A front-end application that interacts with a Solana cluster. +## bank state + +The result of interpreting all programs on the ledger at a given [tick height](terminology.md#tick-height). It includes at least the set of all [accounts](terminology.md#account) holding nonzero [native tokens](terminology.md#native-tokens). + ## block A contiguous set of [entries](terminology.md#entry) on the ledger covered by a [vote](terminology.md#ledger-vote). A [leader](terminology.md#leader) produces at most one block per [slot](terminology.md#slot). @@ -24,7 +28,7 @@ The [entry id](terminology.md#entry-id) of the last entry in a [block](terminolo ## bootstrap leader -The first [fullnode](terminology.md#fullnode) to take the [leader](terminology.md#leader) role. +The first [validator](terminology.md#validator) to produce a [block](terminology.md#block). ## CBC block @@ -36,7 +40,7 @@ A [node](terminology.md#node) that utilizes the [cluster](terminology.md#cluster ## cluster -A set of [fullnodes](terminology.md#fullnode) maintaining a single [ledger](terminology.md#ledger). +A set of [validators](terminology.md#validator) maintaining a single [ledger](terminology.md#ledger). ## confirmation @@ -78,6 +82,10 @@ The time, i.e. number of [slots](terminology.md#slot), for which a [leader sched A proof which has the same format as a storage proof, but the sha state is actually from hashing a known ledger value which the storage client can reveal and is also easily verifiable by the network on-chain. +## fee account + +The fee account in the transaction is the account pays for the cost of including the transaction in the ledger. This is the first account in the transaction. This account must be declared as Credit-Debit in the transaction since paying for the transaction reduces the account balance. + ## finality When nodes representing 2/3rd of the stake have a common [root](terminology.md#root). @@ -86,14 +94,6 @@ When nodes representing 2/3rd of the stake have a common [root](terminology.md#r A [ledger](terminology.md#ledger) derived from common entries but then diverged. -## fullnode - -A full participant in the [cluster](terminology.md#cluster) either a [leader](terminology.md#leader) or [validator](terminology.md#validator) node. - -## fullnode state - -The result of interpreting all programs on the ledger at a given [tick height](terminology.md#tick-height). It includes at least the set of all [accounts](terminology.md#account) holding nonzero [native tokens](terminology.md#native-tokens). - ## genesis block The configuration file that prepares the [ledger](terminology.md#ledger) for the first [block](terminology.md#block). @@ -120,11 +120,11 @@ A [program](terminology.md#program) with the ability to interpret the binary enc ## leader -The role of a [fullnode](terminology.md#fullnode) when it is appending [entries](terminology.md#entry) to the [ledger](terminology.md#ledger). +The role of a [validator](terminology.md#validator) when it is appending [entries](terminology.md#entry) to the [ledger](terminology.md#ledger). ## leader schedule -A sequence of [fullnode](terminology.md#fullnode) [public keys](terminology.md#public-key). The cluster uses the leader schedule to determine which fullnode is the [leader](terminology.md#leader) at any moment in time. +A sequence of [validator](terminology.md#validator) [public keys](terminology.md#public-key). The cluster uses the leader schedule to determine which validator is the [leader](terminology.md#leader) at any moment in time. ## ledger @@ -136,15 +136,15 @@ Portion of the ledger which is downloaded by the replicator where storage proof ## ledger vote -A [hash](terminology.md#hash) of the [fullnode's state](terminology.md#fullnode-state) at a given [tick height](terminology.md#tick-height). It comprises a validator's affirmation that a [block](terminology.md#block) it has received has been verified, as well as a promise not to vote for a conflicting [block](terminology.md#block) \(i.e. [fork](terminology.md#fork)\) for a specific amount of time, the [lockout](terminology.md#lockout) period. +A [hash](terminology.md#hash) of the [validator's state](terminology.md#bank-state) at a given [tick height](terminology.md#tick-height). It comprises a validator's affirmation that a [block](terminology.md#block) it has received has been verified, as well as a promise not to vote for a conflicting [block](terminology.md#block) \(i.e. [fork](terminology.md#fork)\) for a specific amount of time, the [lockout](terminology.md#lockout) period. ## light client -A type of [client](terminology.md#client) that can verify it's pointing to a valid [cluster](terminology.md#cluster). It performs more ledger verification than a [thin client](terminology.md#thin-client) and less than a [fullnode](terminology.md#fullnode). +A type of [client](terminology.md#client) that can verify it's pointing to a valid [cluster](terminology.md#cluster). It performs more ledger verification than a [thin client](terminology.md#thin-client) and less than a [validator](terminology.md#validator). ## lockout -The duration of time for which a [fullnode](terminology.md#fullnode) is unable to [vote](terminology.md#ledger-vote) on another [fork](terminology.md#fork). +The duration of time for which a [validator](terminology.md#validator) is unable to [vote](terminology.md#ledger-vote) on another [fork](terminology.md#fork). ## native token @@ -156,7 +156,7 @@ A computer participating in a [cluster](terminology.md#cluster). ## node count -The number of [fullnodes](terminology.md#fullnode) participating in a [cluster](terminology.md#cluster). +The number of [validators](terminology.md#validator) participating in a [cluster](terminology.md#cluster). ## PoH @@ -196,11 +196,11 @@ A [block](terminology.md#block) or [slot](terminology.md#slot) that has reached ## runtime -The component of a [fullnode](terminology.md#fullnode) responsible for [program](terminology.md#program) execution. +The component of a [validator](terminology.md#validator) responsible for [program](terminology.md#program) execution. ## shred -A fraction of a [block](terminology.md#block); the smallest unit sent between [fullnodes](terminology.md#fullnode). +A fraction of a [block](terminology.md#block); the smallest unit sent between [validators](terminology.md#validator). ## slot @@ -216,7 +216,7 @@ The [native token](terminology.md#native-token) tracked by a [cluster](terminolo ## stake -Tokens forfeit to the [cluster](terminology.md#cluster) if malicious [fullnode](terminology.md#fullnode) behavior can be proven. +Tokens forfeit to the [cluster](terminology.md#cluster) if malicious [validator](terminology.md#validator) behavior can be proven. ## storage proof @@ -272,7 +272,7 @@ A set of [transactions](terminology.md#transaction) that may be executed in para ## validator -The role of a [fullnode](terminology.md#fullnode) when it is validating the [leader's](terminology.md#leader) latest [entries](terminology.md#entry). +A full participant in the [cluster](terminology.md#cluster) reponsible for validating the [ledger](terminology.md#ledger) and producing new [blocks](terminology.md#block). ## VDF diff --git a/book/src/validator/README.md b/book/src/validator/README.md index c7c5b14d751217..e11ca3f8d841f4 100644 --- a/book/src/validator/README.md +++ b/book/src/validator/README.md @@ -1,6 +1,6 @@ # Anatomy of a Validator -![Validator block diagrams](../.gitbook/assets/validator.svg) +![Validator block diagrams](../.gitbook/assets/validator%20%281%29.svg) ## Pipelining diff --git a/book/src/validator/tpu.md b/book/src/validator/tpu.md index 4831daa84ed3df..6801c92fd6a439 100644 --- a/book/src/validator/tpu.md +++ b/book/src/validator/tpu.md @@ -1,4 +1,4 @@ # TPU -![TPU Block Diagram](../.gitbook/assets/tpu.svg) +![TPU Block Diagram](../.gitbook/assets/tpu%20%281%29.svg) diff --git a/ci/localnet-sanity.sh b/ci/localnet-sanity.sh index d03dd9a8169cf1..ae27cf3fa08772 100755 --- a/ci/localnet-sanity.sh +++ b/ci/localnet-sanity.sh @@ -207,7 +207,7 @@ killNodes() { set -x curl --retry 5 --retry-delay 2 --retry-connrefused \ -X POST -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","id":1, "method":"fullnodeExit"}' \ + -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' \ http://localhost:$port ) done diff --git a/ci/solana-testnet.yml b/ci/solana-testnet.yml deleted file mode 100755 index 296bbc4ceca489..00000000000000 --- a/ci/solana-testnet.yml +++ /dev/null @@ -1,13 +0,0 @@ -steps: - - command: "ci/testnet-automation.sh" - label: "run testnet" - agents: - - "queue=testnet-deploy" - - - wait: ~ - continue_on_failure: true - - - command: "ci/testnet-automation-cleanup.sh" - label: "delete testnet" - agents: - - "queue=testnet-deploy" diff --git a/ci/test-stable.sh b/ci/test-stable.sh index 644735bd83b310..35ad34a95b5d11 100755 --- a/ci/test-stable.sh +++ b/ci/test-stable.sh @@ -61,7 +61,7 @@ test-stable-perf) _ make -C programs/bpf/c tests _ cargo +"$rust_stable" test \ --manifest-path programs/bpf/Cargo.toml \ - --no-default-features --features=bpf_c,bpf_rust + --no-default-features --features=bpf_c,bpf_rust -- --nocapture if [[ $(uname) = Linux ]]; then # Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a diff --git a/ci/testnet-automation-cleanup.sh b/ci/testnet-automation-cleanup.sh deleted file mode 100755 index ce69aa88a74f25..00000000000000 --- a/ci/testnet-automation-cleanup.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash -set -e - -cd "$(dirname "$0")/.." - -echo --- find testnet configuration -net/gce.sh config -p testnet-automation - -echo --- delete testnet -net/gce.sh delete -p testnet-automation diff --git a/ci/testnet-automation.sh b/ci/testnet-automation.sh deleted file mode 100755 index 814aaa1508e821..00000000000000 --- a/ci/testnet-automation.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env bash -set -e - -cd "$(dirname "$0")/.." - -if [[ -z $USE_PREBUILT_CHANNEL_TARBALL ]]; then - echo --- downloading tar from build artifacts - buildkite-agent artifact download "solana-release*.tar.bz2" . -fi - -# shellcheck disable=SC1091 -source ci/upload-ci-artifact.sh - -[[ -n $ITERATION_WAIT ]] || ITERATION_WAIT=300 -[[ -n $NUMBER_OF_NODES ]] || NUMBER_OF_NODES="10 25 50 100" -[[ -n $LEADER_CPU_MACHINE_TYPE ]] || - LEADER_CPU_MACHINE_TYPE="--machine-type n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" -[[ -n $CLIENT_COUNT ]] || CLIENT_COUNT=2 -[[ -n $TESTNET_TAG ]] || TESTNET_TAG=testnet-automation -[[ -n $TESTNET_ZONES ]] || TESTNET_ZONES="us-west1-b" -[[ -n $CHANNEL ]] || CHANNEL=beta -[[ -n $ADDITIONAL_FLAGS ]] || ADDITIONAL_FLAGS="" - -TESTNET_CLOUD_ZONES=(); while read -r -d, ; do TESTNET_CLOUD_ZONES+=( "$REPLY" ); done <<< "${TESTNET_ZONES}," - -launchTestnet() { - declare nodeCount=$1 - echo --- setup "$nodeCount" node test - - # shellcheck disable=SC2068 - net/gce.sh create \ - -d pd-ssd \ - -n "$nodeCount" -c "$CLIENT_COUNT" \ - -G "$LEADER_CPU_MACHINE_TYPE" \ - -p "$TESTNET_TAG" ${TESTNET_CLOUD_ZONES[@]/#/-z } "$ADDITIONAL_FLAGS" - - echo --- configure database - net/init-metrics.sh -e - - echo --- start "$nodeCount" node test - if [[ -n $USE_PREBUILT_CHANNEL_TARBALL ]]; then - net/net.sh start -o noValidatorSanity -t "$CHANNEL" - else - net/net.sh start -o noValidatorSanity -T solana-release*.tar.bz2 - fi - - echo --- wait "$ITERATION_WAIT" seconds to complete test - sleep "$ITERATION_WAIT" - - set -x - - declare q_mean_tps=' - SELECT round(mean("sum_count")) AS "mean_tps" FROM ( - SELECT sum("count") AS "sum_count" - FROM "testnet-automation"."autogen"."banking_stage-record_transactions" - WHERE time > now() - 300s GROUP BY time(1s) - )' - - declare q_max_tps=' - SELECT round(max("sum_count")) AS "max_tps" FROM ( - SELECT sum("count") AS "sum_count" - FROM "testnet-automation"."autogen"."banking_stage-record_transactions" - WHERE time > now() - 300s GROUP BY time(1s) - )' - - declare q_mean_confirmation=' - SELECT round(mean("duration_ms")) as "mean_confirmation" - FROM "testnet-automation"."autogen"."validator-confirmation" - WHERE time > now() - 300s' - - declare q_max_confirmation=' - SELECT round(max("duration_ms")) as "max_confirmation" - FROM "testnet-automation"."autogen"."validator-confirmation" - WHERE time > now() - 300s' - - declare q_99th_confirmation=' - SELECT round(percentile("duration_ms", 99)) as "99th_confirmation" - FROM "testnet-automation"."autogen"."validator-confirmation" - WHERE time > now() - 300s' - - curl -G "${INFLUX_HOST}/query?u=ro&p=topsecret" \ - --data-urlencode "db=testnet-automation" \ - --data-urlencode "q=$q_mean_tps;$q_max_tps;$q_mean_confirmation;$q_max_confirmation;$q_99th_confirmation" | - python ci/testnet-automation-json-parser.py >>TPS"$nodeCount".log - - upload-ci-artifact TPS"$nodeCount".log -} - -# This is needed, because buildkite doesn't let us define an array of numbers. -# The array is defined as a space separated string of numbers -# shellcheck disable=SC2206 -nodes_count_array=($NUMBER_OF_NODES) - -for n in "${nodes_count_array[@]}"; do - launchTestnet "$n" -done diff --git a/ci/testnet-deploy.sh b/ci/testnet-deploy.sh index 4839c69c6ac138..297035a18328f7 100755 --- a/ci/testnet-deploy.sh +++ b/ci/testnet-deploy.sh @@ -278,6 +278,7 @@ if ! $skipCreate; then -a "$bootstrapFullNodeAddress" -c "$clientNodeCount" -n "$additionalFullNodeCount" + --dedicated ) # shellcheck disable=SC2206 create_args+=(${zone_args[@]}) @@ -362,7 +363,10 @@ if ! $skipStart; then op=start fi echo "--- net.sh $op" - args=("$op" -t "$tarChannelOrTag") + args=( + "$op" + -t "$tarChannelOrTag" + ) if ! $publicNetwork; then args+=(-o rejectExtraNodes) diff --git a/ci/testnet-manager.sh b/ci/testnet-manager.sh index dbe8d920e8ae07..5c1ea28371610f 100755 --- a/ci/testnet-manager.sh +++ b/ci/testnet-manager.sh @@ -233,8 +233,8 @@ sanity() { NO_INSTALL_CHECK=1 \ NO_VALIDATOR_SANITY=1 \ ci/testnet-sanity.sh edge-testnet-solana-com gce -P us-west1-b + maybe_deploy_software ) - maybe_deploy_software ;; testnet-edge-perf) ( @@ -250,8 +250,8 @@ sanity() { NO_INSTALL_CHECK=1 \ NO_VALIDATOR_SANITY=1 \ ci/testnet-sanity.sh beta-testnet-solana-com gce us-west1-b + maybe_deploy_software --deploy-if-newer ) - maybe_deploy_software --deploy-if-newer ;; testnet-beta-perf) ( @@ -425,7 +425,7 @@ deploy() { # shellcheck disable=SC2068 NO_VALIDATOR_SANITY=1 \ ci/testnet-deploy.sh -p demo-testnet-solana-com -C gce ${GCE_ZONE_ARGS[@]} \ - -t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P -u -f \ + -t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P -u --allow-boot-failures \ --skip-remote-log-retrieval \ -a demo-testnet-solana-com \ ${skipCreate:+-e} \ @@ -437,7 +437,7 @@ deploy() { # shellcheck disable=SC2068 NO_VALIDATOR_SANITY=1 \ ci/testnet-deploy.sh -p demo-testnet-solana-com2 -C gce ${GCE_LOW_QUOTA_ZONE_ARGS[@]} \ - -t "$CHANNEL_OR_TAG" -n "$GCE_LOW_QUOTA_NODE_COUNT" -c 0 -P -f -x \ + -t "$CHANNEL_OR_TAG" -n "$GCE_LOW_QUOTA_NODE_COUNT" -c 0 -P --allow-boot-failures -x \ --skip-remote-log-retrieval \ ${skipCreate:+-e} \ ${skipStart:+-s} \ diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 2b8480168a8fff..ac4bce07eb0130 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -21,11 +21,11 @@ lazy_static = "1.4.0" log = "0.4.8" num-traits = "0.2" pretty-hex = "0.1.1" -reqwest = { version = "0.9.21", default-features = false, features = ["rustls-tls"] } +reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tls"] } serde = "1.0.101" serde_derive = "1.0.101" serde_json = "1.0.41" -serde_yaml = "0.8.9" +serde_yaml = "0.8.11" solana-budget-api = { path = "../programs/budget_api", version = "0.20.0" } solana-client = { path = "../client", version = "0.20.0" } solana-config-api = { path = "../programs/config_api", version = "0.20.0" } diff --git a/cli/cli-help.sh b/cli/cli-help.sh deleted file mode 100755 index f46292518540e5..00000000000000 --- a/cli/cli-help.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -set -e - -cd "$(dirname "$0")"/.. - -cargo build --package solana-cli -export PATH=$PWD/target/debug:$PATH - -echo "\`\`\`manpage" -solana --help -echo "\`\`\`" -echo "" - -commands=(address airdrop balance cancel confirm deploy fees get-transaction-count pay send-signature send-timestamp) - -for x in "${commands[@]}"; do - echo "\`\`\`manpage" - solana "${x}" --help - echo "\`\`\`" - echo "" -done diff --git a/cli/src/wallet.rs b/cli/src/cli.rs similarity index 63% rename from cli/src/wallet.rs rename to cli/src/cli.rs index 5743862cf288f6..9b6ec613ed7faa 100644 --- a/cli/src/wallet.rs +++ b/cli/src/cli.rs @@ -1,13 +1,12 @@ use crate::{ - display::println_name_value, input_parsers::*, input_validators::*, stake::*, - validator_info::*, vote::*, + cluster_query::*, display::println_name_value, input_parsers::*, input_validators::*, stake::*, + storage::*, validator_info::*, vote::*, }; use chrono::prelude::*; -use clap::{value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand}; -use console::{style, Emoji}; +use clap::{App, AppSettings, Arg, ArgMatches, SubCommand}; use log::*; use num_traits::FromPrimitive; -use serde_json::{self, json, Value}; +use serde_json::{self, json}; use solana_budget_api::budget_instruction::{self, BudgetError}; use solana_client::{client_error::ClientError, rpc_client::RpcClient}; #[cfg(not(test))] @@ -15,8 +14,7 @@ use solana_drone::drone::request_airdrop_transaction; #[cfg(test)] use solana_drone::drone_mock::request_airdrop_transaction; use solana_sdk::{ - account_utils::State, - bpf_loader, clock, + bpf_loader, fee_calculator::FeeCalculator, hash::Hash, instruction::InstructionError, @@ -31,47 +29,66 @@ use solana_sdk::{ transaction::{Transaction, TransactionError}, }; use solana_stake_api::stake_state::{Authorized, Lockup, StakeAuthorize}; -use solana_storage_api::storage_instruction; +use solana_storage_api::storage_instruction::StorageAccountType; use solana_vote_api::vote_state::{VoteAuthorize, VoteInit}; use std::{ - collections::VecDeque, fs::File, io::{Read, Write}, net::{IpAddr, SocketAddr}, thread::sleep, - time::{Duration, Instant}, + time::Duration, {error, fmt}, }; const USERDATA_CHUNK_SIZE: usize = 229; // Keep program chunks under PACKET_DATA_SIZE -static CHECK_MARK: Emoji = Emoji("✅ ", ""); -static CROSS_MARK: Emoji = Emoji("❌ ", ""); - #[derive(Debug, PartialEq)] #[allow(clippy::large_enum_variant)] -pub enum WalletCommand { - Address, +pub enum CliCommand { + // Cluster Query Commands + ClusterVersion, Fees, - Airdrop { - drone_host: Option, - drone_port: u16, - lamports: u64, - use_lamports_unit: bool, + GetEpochInfo, + GetGenesisBlockhash, + GetSlot, + GetTransactionCount, + Ping { + interval: Duration, + count: Option, + timeout: Duration, }, - Balance { - pubkey: Pubkey, + ShowValidators { use_lamports_unit: bool, }, - Cancel(Pubkey), - Confirm(Signature), - VoteAuthorize(Pubkey, Pubkey, VoteAuthorize), - CreateVoteAccount(Pubkey, VoteInit), - ShowAccount { + // Program Deployment + Deploy(String), + // Stake Commands + CreateStakeAccount(Pubkey, Authorized, Lockup, u64), + DeactivateStake(Pubkey), + DelegateStake(Pubkey, Pubkey, bool), + RedeemVoteCredits(Pubkey, Pubkey), + ShowStakeAccount { pubkey: Pubkey, - output_file: Option, use_lamports_unit: bool, }, + StakeAuthorize(Pubkey, Pubkey, StakeAuthorize), + WithdrawStake(Pubkey, Pubkey, u64), + // Storage Commands + CreateStorageAccount { + account_owner: Pubkey, + storage_account_pubkey: Pubkey, + account_type: StorageAccountType, + }, + ClaimStorageReward { + node_account_pubkey: Pubkey, + storage_account_pubkey: Pubkey, + }, + ShowStorageAccount(Pubkey), + // Validator Info Commands + GetValidatorInfo(Option), + SetValidatorInfo(ValidatorInfo, Option), + // Vote Commands + CreateVoteAccount(Pubkey, VoteInit), ShowVoteAccount { pubkey: Pubkey, use_lamports_unit: bool, @@ -81,26 +98,21 @@ pub enum WalletCommand { aggregate: bool, span: Option, }, - CreateStakeAccount(Pubkey, Authorized, Lockup, u64), - StakeAuthorize(Pubkey, Pubkey, StakeAuthorize), - DelegateStake(Pubkey, Pubkey, bool), - WithdrawStake(Pubkey, Pubkey, u64), - DeactivateStake(Pubkey, Pubkey), - RedeemVoteCredits(Pubkey, Pubkey), - ShowStakeAccount { + VoteAuthorize(Pubkey, Pubkey, VoteAuthorize), + // Wallet Commands + Address, + Airdrop { + drone_host: Option, + drone_port: u16, + lamports: u64, + use_lamports_unit: bool, + }, + Balance { pubkey: Pubkey, use_lamports_unit: bool, }, - CreateReplicatorStorageAccount(Pubkey, Pubkey), - CreateValidatorStorageAccount(Pubkey, Pubkey), - ClaimStorageReward(Pubkey, Pubkey), - ShowStorageAccount(Pubkey), - Deploy(String), - GetGenesisBlockhash, - GetSlot, - GetEpochInfo, - GetTransactionCount, - GetVersion, + Cancel(Pubkey), + Confirm(Signature), Pay { lamports: u64, to: Pubkey, @@ -109,19 +121,17 @@ pub enum WalletCommand { witnesses: Option>, cancelable: Option, }, - Ping { - interval: Duration, - count: Option, - timeout: Duration, + ShowAccount { + pubkey: Pubkey, + output_file: Option, + use_lamports_unit: bool, }, TimeElapsed(Pubkey, Pubkey, DateTime), // TimeElapsed(to, process_id, timestamp) Witness(Pubkey, Pubkey), // Witness(to, process_id) - GetValidatorInfo(Option), - SetValidatorInfo(ValidatorInfo, Option), } #[derive(Debug, Clone)] -pub enum WalletError { +pub enum CliError { BadParameter(String), CommandNotRecognized(String), InsufficientFundsForFee, @@ -130,13 +140,13 @@ pub enum WalletError { KeypairFileNotFound(String), } -impl fmt::Display for WalletError { +impl fmt::Display for CliError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "invalid") } } -impl error::Error for WalletError { +impl error::Error for CliError { fn description(&self) -> &str { "invalid" } @@ -147,21 +157,21 @@ impl error::Error for WalletError { } } -pub struct WalletConfig { - pub command: WalletCommand, +pub struct CliConfig { + pub command: CliCommand, pub json_rpc_url: String, pub keypair: Keypair, pub keypair_path: String, pub rpc_client: Option, } -impl Default for WalletConfig { - fn default() -> WalletConfig { +impl Default for CliConfig { + fn default() -> CliConfig { let mut keypair_path = dirs::home_dir().expect("home directory"); keypair_path.extend(&[".config", "solana", "id.json"]); - WalletConfig { - command: WalletCommand::Balance { + CliConfig { + command: CliCommand::Balance { pubkey: Pubkey::default(), use_lamports_unit: false, }, @@ -176,17 +186,77 @@ impl Default for WalletConfig { pub fn parse_command( pubkey: &Pubkey, matches: &ArgMatches<'_>, -) -> Result> { +) -> Result> { let response = match matches.subcommand() { - ("address", Some(_address_matches)) => Ok(WalletCommand::Address), - ("fees", Some(_fees_matches)) => Ok(WalletCommand::Fees), + // Cluster Query Commands + ("cluster-version", Some(_matches)) => Ok(CliCommand::ClusterVersion), + ("fees", Some(_fees_matches)) => Ok(CliCommand::Fees), + ("get-epoch-info", Some(_matches)) => Ok(CliCommand::GetEpochInfo), + ("get-genesis-blockhash", Some(_matches)) => Ok(CliCommand::GetGenesisBlockhash), + ("get-slot", Some(_matches)) => Ok(CliCommand::GetSlot), + ("get-transaction-count", Some(_matches)) => Ok(CliCommand::GetTransactionCount), + ("ping", Some(matches)) => parse_cluster_ping(matches), + ("show-validators", Some(matches)) => parse_show_validators(matches), + // Program Deployment + ("deploy", Some(deploy_matches)) => Ok(CliCommand::Deploy( + deploy_matches + .value_of("program_location") + .unwrap() + .to_string(), + )), + // Stake Commands + ("create-stake-account", Some(matches)) => parse_stake_create_account(pubkey, matches), + ("delegate-stake", Some(matches)) => parse_stake_delegate_stake(matches), + ("withdraw-stake", Some(matches)) => parse_stake_withdraw_stake(matches), + ("deactivate-stake", Some(matches)) => parse_stake_deactivate_stake(matches), + ("stake-authorize-staker", Some(matches)) => { + parse_stake_authorize(matches, StakeAuthorize::Staker) + } + ("stake-authorize-withdrawer", Some(matches)) => { + parse_stake_authorize(matches, StakeAuthorize::Withdrawer) + } + ("redeem-vote-credits", Some(matches)) => parse_redeem_vote_credits(matches), + ("show-stake-account", Some(matches)) => parse_show_stake_account(matches), + // Storage Commands + ("create-replicator-storage-account", Some(matches)) => { + parse_storage_create_replicator_account(matches) + } + ("create-validator-storage-account", Some(matches)) => { + parse_storage_create_validator_account(matches) + } + ("claim-storage-reward", Some(matches)) => parse_storage_claim_reward(matches), + ("show-storage-account", Some(matches)) => parse_storage_get_account_command(matches), + // Validator Info Commands + ("validator-info", Some(matches)) => match matches.subcommand() { + ("publish", Some(matches)) => parse_validator_info_command(matches, pubkey), + ("get", Some(matches)) => parse_get_validator_info_command(matches), + ("", None) => { + eprintln!("{}", matches.usage()); + Err(CliError::CommandNotRecognized( + "no validator-info subcommand given".to_string(), + )) + } + _ => unreachable!(), + }, + // Vote Commands + ("create-vote-account", Some(matches)) => parse_vote_create_account(pubkey, matches), + ("vote-authorize-voter", Some(matches)) => { + parse_vote_authorize(matches, VoteAuthorize::Voter) + } + ("vote-authorize-withdrawer", Some(matches)) => { + parse_vote_authorize(matches, VoteAuthorize::Withdrawer) + } + ("show-vote-account", Some(matches)) => parse_vote_get_account_command(matches), + ("uptime", Some(matches)) => parse_vote_uptime_command(matches), + // Wallet Commands + ("address", Some(_address_matches)) => Ok(CliCommand::Address), ("airdrop", Some(airdrop_matches)) => { let drone_port = airdrop_matches .value_of("drone_port") .unwrap() .parse() .or_else(|err| { - Err(WalletError::BadParameter(format!( + Err(CliError::BadParameter(format!( "Invalid drone port: {:?}", err ))) @@ -194,7 +264,7 @@ pub fn parse_command( let drone_host = if let Some(drone_host) = matches.value_of("drone_host") { Some(solana_netutil::parse_host(drone_host).or_else(|err| { - Err(WalletError::BadParameter(format!( + Err(CliError::BadParameter(format!( "Invalid drone host: {:?}", err ))) @@ -205,7 +275,7 @@ pub fn parse_command( let lamports = amount_of(airdrop_matches, "amount", "unit").expect("Invalid amount"); let use_lamports_unit = airdrop_matches.value_of("unit").is_some() && airdrop_matches.value_of("unit").unwrap() == "lamports"; - Ok(WalletCommand::Airdrop { + Ok(CliCommand::Airdrop { drone_host, drone_port, lamports, @@ -215,93 +285,24 @@ pub fn parse_command( ("balance", Some(balance_matches)) => { let pubkey = pubkey_of(&balance_matches, "pubkey").unwrap_or(*pubkey); let use_lamports_unit = balance_matches.is_present("lamports"); - Ok(WalletCommand::Balance { + Ok(CliCommand::Balance { pubkey, use_lamports_unit, }) } ("cancel", Some(cancel_matches)) => { let process_id = value_of(cancel_matches, "process_id").unwrap(); - Ok(WalletCommand::Cancel(process_id)) + Ok(CliCommand::Cancel(process_id)) } ("confirm", Some(confirm_matches)) => { match confirm_matches.value_of("signature").unwrap().parse() { - Ok(signature) => Ok(WalletCommand::Confirm(signature)), + Ok(signature) => Ok(CliCommand::Confirm(signature)), _ => { eprintln!("{}", confirm_matches.usage()); - Err(WalletError::BadParameter("Invalid signature".to_string())) + Err(CliError::BadParameter("Invalid signature".to_string())) } } } - ("show-account", Some(matches)) => { - let account_pubkey = pubkey_of(matches, "account_pubkey").unwrap(); - let output_file = matches.value_of("output_file"); - let use_lamports_unit = matches.is_present("lamports"); - Ok(WalletCommand::ShowAccount { - pubkey: account_pubkey, - output_file: output_file.map(ToString::to_string), - use_lamports_unit, - }) - } - ("create-vote-account", Some(matches)) => parse_vote_create_account(pubkey, matches), - ("vote-authorize-voter", Some(matches)) => { - parse_vote_authorize(matches, VoteAuthorize::Voter) - } - ("vote-authorize-withdrawer", Some(matches)) => { - parse_vote_authorize(matches, VoteAuthorize::Withdrawer) - } - ("show-vote-account", Some(matches)) => parse_vote_get_account_command(matches), - ("uptime", Some(matches)) => parse_vote_uptime_command(matches), - ("create-stake-account", Some(matches)) => parse_stake_create_account(pubkey, matches), - ("delegate-stake", Some(matches)) => parse_stake_delegate_stake(matches), - ("withdraw-stake", Some(matches)) => parse_stake_withdraw_stake(matches), - ("deactivate-stake", Some(matches)) => parse_stake_deactivate_stake(matches), - ("stake-authorize-staker", Some(matches)) => { - parse_stake_authorize(matches, StakeAuthorize::Staker) - } - ("stake-authorize-withdrawer", Some(matches)) => { - parse_stake_authorize(matches, StakeAuthorize::Withdrawer) - } - ("redeem-vote-credits", Some(matches)) => parse_redeem_vote_credits(matches), - ("show-stake-account", Some(matches)) => parse_show_stake_account(matches), - ("create-replicator-storage-account", Some(matches)) => { - let account_owner = pubkey_of(matches, "storage_account_owner").unwrap(); - let storage_account_pubkey = pubkey_of(matches, "storage_account_pubkey").unwrap(); - Ok(WalletCommand::CreateReplicatorStorageAccount( - account_owner, - storage_account_pubkey, - )) - } - ("create-validator-storage-account", Some(matches)) => { - let account_owner = pubkey_of(matches, "storage_account_owner").unwrap(); - let storage_account_pubkey = pubkey_of(matches, "storage_account_pubkey").unwrap(); - Ok(WalletCommand::CreateValidatorStorageAccount( - account_owner, - storage_account_pubkey, - )) - } - ("claim-storage-reward", Some(matches)) => { - let node_account_pubkey = pubkey_of(matches, "node_account_pubkey").unwrap(); - let storage_account_pubkey = pubkey_of(matches, "storage_account_pubkey").unwrap(); - Ok(WalletCommand::ClaimStorageReward( - node_account_pubkey, - storage_account_pubkey, - )) - } - ("show-storage-account", Some(matches)) => { - let storage_account_pubkey = pubkey_of(matches, "storage_account_pubkey").unwrap(); - Ok(WalletCommand::ShowStorageAccount(storage_account_pubkey)) - } - ("deploy", Some(deploy_matches)) => Ok(WalletCommand::Deploy( - deploy_matches - .value_of("program_location") - .unwrap() - .to_string(), - )), - ("get-genesis-blockhash", Some(_matches)) => Ok(WalletCommand::GetGenesisBlockhash), - ("get-slot", Some(_matches)) => Ok(WalletCommand::GetSlot), - ("get-epoch-info", Some(_matches)) => Ok(WalletCommand::GetEpochInfo), - ("get-transaction-count", Some(_matches)) => Ok(WalletCommand::GetTransactionCount), ("pay", Some(pay_matches)) => { let lamports = amount_of(pay_matches, "amount", "unit").expect("Invalid amount"); let to = value_of(&pay_matches, "to").unwrap_or(*pubkey); @@ -324,7 +325,7 @@ pub fn parse_command( None }; - Ok(WalletCommand::Pay { + Ok(CliCommand::Pay { lamports, to, timestamp, @@ -333,24 +334,20 @@ pub fn parse_command( cancelable, }) } - ("ping", Some(ping_matches)) => { - let interval = Duration::from_secs(value_t_or_exit!(ping_matches, "interval", u64)); - let count = if ping_matches.is_present("count") { - Some(value_t_or_exit!(ping_matches, "count", u64)) - } else { - None - }; - let timeout = Duration::from_secs(value_t_or_exit!(ping_matches, "timeout", u64)); - Ok(WalletCommand::Ping { - interval, - count, - timeout, + ("show-account", Some(matches)) => { + let account_pubkey = pubkey_of(matches, "account_pubkey").unwrap(); + let output_file = matches.value_of("output_file"); + let use_lamports_unit = matches.is_present("lamports"); + Ok(CliCommand::ShowAccount { + pubkey: account_pubkey, + output_file: output_file.map(ToString::to_string), + use_lamports_unit, }) } ("send-signature", Some(sig_matches)) => { let to = value_of(&sig_matches, "to").unwrap(); let process_id = value_of(&sig_matches, "process_id").unwrap(); - Ok(WalletCommand::Witness(to, process_id)) + Ok(CliCommand::Witness(to, process_id)) } ("send-timestamp", Some(timestamp_matches)) => { let to = value_of(×tamp_matches, "to").unwrap(); @@ -370,23 +367,11 @@ pub fn parse_command( } else { Utc::now() }; - Ok(WalletCommand::TimeElapsed(to, process_id, dt)) + Ok(CliCommand::TimeElapsed(to, process_id, dt)) } - ("cluster-version", Some(_matches)) => Ok(WalletCommand::GetVersion), - ("validator-info", Some(matches)) => match matches.subcommand() { - ("publish", Some(matches)) => parse_validator_info_command(matches, pubkey), - ("get", Some(matches)) => parse_get_validator_info_command(matches), - ("", None) => { - eprintln!("{}", matches.usage()); - Err(WalletError::CommandNotRecognized( - "no validator-info subcommand given".to_string(), - )) - } - _ => unreachable!(), - }, ("", None) => { eprintln!("{}", matches.usage()); - Err(WalletError::CommandNotRecognized( + Err(CliError::CommandNotRecognized( "no subcommand given".to_string(), )) } @@ -395,11 +380,11 @@ pub fn parse_command( Ok(response) } -pub type ProcessResult = Result>; +pub type ProcessResult = Result>; pub fn check_account_for_fee( rpc_client: &RpcClient, - config: &WalletConfig, + config: &CliConfig, fee_calculator: &FeeCalculator, message: &Message, ) -> Result<(), Box> { @@ -408,7 +393,7 @@ pub fn check_account_for_fee( fn check_account_for_multiple_fees( rpc_client: &RpcClient, - config: &WalletConfig, + config: &CliConfig, fee_calculator: &FeeCalculator, messages: &[&Message], ) -> Result<(), Box> { @@ -423,15 +408,15 @@ fn check_account_for_multiple_fees( return Ok(()); } } - Err(WalletError::InsufficientFundsForFee.into()) + Err(CliError::InsufficientFundsForFee.into()) } pub fn check_unique_pubkeys( pubkey0: (&Pubkey, String), pubkey1: (&Pubkey, String), -) -> Result<(), WalletError> { +) -> Result<(), CliError> { if pubkey0.0 == pubkey1.0 { - Err(WalletError::BadParameter(format!( + Err(CliError::BadParameter(format!( "Identical pubkeys found: `{}` and `{}` must be unique", pubkey0.1, pubkey1.1 ))) @@ -440,17 +425,9 @@ pub fn check_unique_pubkeys( } } -fn process_fees(rpc_client: &RpcClient) -> ProcessResult { - let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; - - Ok(format!( - "blockhash: {}\nlamports per signature: {}", - recent_blockhash, fee_calculator.lamports_per_signature - )) -} fn process_airdrop( rpc_client: &RpcClient, - config: &WalletConfig, + config: &CliConfig, drone_addr: &SocketAddr, lamports: u64, use_lamports_unit: bool, @@ -463,7 +440,7 @@ fn process_airdrop( let previous_balance = match rpc_client.retry_get_balance(&config.keypair.pubkey(), 5)? { Some(lamports) => lamports, None => { - return Err(WalletError::RpcRequestError( + return Err(CliError::RpcRequestError( "Received result of an unexpected type".to_string(), ) .into()) @@ -487,10 +464,9 @@ fn process_balance( let balance = rpc_client.retry_get_balance(pubkey, 5)?; match balance { Some(lamports) => Ok(build_balance_message(lamports, use_lamports_unit)), - None => Err(WalletError::RpcRequestError( - "Received result of an unexpected type".to_string(), - ) - .into()), + None => Err( + CliError::RpcRequestError("Received result of an unexpected type".to_string()).into(), + ), } } @@ -506,15 +482,13 @@ fn process_confirm(rpc_client: &RpcClient, signature: &Signature) -> ProcessResu Ok("Not found".to_string()) } } - Err(err) => { - Err(WalletError::RpcRequestError(format!("Unable to confirm: {:?}", err)).into()) - } + Err(err) => Err(CliError::RpcRequestError(format!("Unable to confirm: {:?}", err)).into()), } } fn process_show_account( rpc_client: &RpcClient, - _config: &WalletConfig, + _config: &CliConfig, account_pubkey: &Pubkey, output_file: &Option, use_lamports_unit: bool, @@ -543,118 +517,18 @@ fn process_show_account( Ok("".to_string()) } -fn process_create_replicator_storage_account( - rpc_client: &RpcClient, - config: &WalletConfig, - account_owner: &Pubkey, - storage_account_pubkey: &Pubkey, -) -> ProcessResult { - check_unique_pubkeys( - (&config.keypair.pubkey(), "wallet keypair".to_string()), - ( - &storage_account_pubkey, - "storage_account_pubkey".to_string(), - ), - )?; - let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; - let ixs = storage_instruction::create_replicator_storage_account( - &config.keypair.pubkey(), - &account_owner, - storage_account_pubkey, - 1, - ); - let mut tx = Transaction::new_signed_instructions(&[&config.keypair], ixs, recent_blockhash); - check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?; - let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]); - log_instruction_custom_error::(result) -} - -fn process_create_validator_storage_account( - rpc_client: &RpcClient, - config: &WalletConfig, - account_owner: &Pubkey, - storage_account_pubkey: &Pubkey, -) -> ProcessResult { - check_unique_pubkeys( - (&config.keypair.pubkey(), "wallet keypair".to_string()), - ( - &storage_account_pubkey, - "storage_account_pubkey".to_string(), - ), - )?; - let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; - let ixs = storage_instruction::create_validator_storage_account( - &config.keypair.pubkey(), - account_owner, - storage_account_pubkey, - 1, - ); - let mut tx = Transaction::new_signed_instructions(&[&config.keypair], ixs, recent_blockhash); - check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?; - let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]); - log_instruction_custom_error::(result) -} - -fn process_claim_storage_reward( - rpc_client: &RpcClient, - config: &WalletConfig, - node_account_pubkey: &Pubkey, - storage_account_pubkey: &Pubkey, -) -> ProcessResult { - let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; - - let instruction = - storage_instruction::claim_reward(node_account_pubkey, storage_account_pubkey); - let signers = [&config.keypair]; - let message = Message::new_with_payer(vec![instruction], Some(&signers[0].pubkey())); - - let mut tx = Transaction::new(&signers, message, recent_blockhash); - check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?; - let signature_str = rpc_client.send_and_confirm_transaction(&mut tx, &signers)?; - Ok(signature_str.to_string()) -} - -fn process_show_storage_account( - rpc_client: &RpcClient, - _config: &WalletConfig, - storage_account_pubkey: &Pubkey, -) -> ProcessResult { - let account = rpc_client.get_account(storage_account_pubkey)?; - - if account.owner != solana_storage_api::id() { - return Err(WalletError::RpcRequestError( - format!("{:?} is not a storage account", storage_account_pubkey).to_string(), - ) - .into()); - } - - use solana_storage_api::storage_contract::StorageContract; - let storage_contract: StorageContract = account.state().map_err(|err| { - WalletError::RpcRequestError( - format!("Unable to deserialize storage account: {:?}", err).to_string(), - ) - })?; - println!("{:#?}", storage_contract); - println!("account lamports: {}", account.lamports); - Ok("".to_string()) -} - fn process_deploy( rpc_client: &RpcClient, - config: &WalletConfig, + config: &CliConfig, program_location: &str, ) -> ProcessResult { let program_id = Keypair::new(); let mut file = File::open(program_location).map_err(|err| { - WalletError::DynamicProgramError( - format!("Unable to open program file: {}", err).to_string(), - ) + CliError::DynamicProgramError(format!("Unable to open program file: {}", err).to_string()) })?; let mut program_data = Vec::new(); file.read_to_end(&mut program_data).map_err(|err| { - WalletError::DynamicProgramError( - format!("Unable to read program file: {}", err).to_string(), - ) + CliError::DynamicProgramError(format!("Unable to read program file: {}", err).to_string()) })?; // Build transactions to calculate fees @@ -699,9 +573,8 @@ fn process_deploy( trace!("Creating program account"); let result = rpc_client.send_and_confirm_transaction(&mut create_account_tx, &[&config.keypair]); - log_instruction_custom_error::(result).map_err(|_| { - WalletError::DynamicProgramError("Program allocate space failed".to_string()) - })?; + log_instruction_custom_error::(result) + .map_err(|_| CliError::DynamicProgramError("Program allocate space failed".to_string()))?; trace!("Writing program data"); rpc_client.send_and_confirm_transactions(write_transactions, &signers)?; @@ -710,7 +583,7 @@ fn process_deploy( rpc_client .send_and_confirm_transaction(&mut finalize_tx, &signers) .map_err(|_| { - WalletError::DynamicProgramError("Program finalize transaction failed".to_string()) + CliError::DynamicProgramError("Program finalize transaction failed".to_string()) })?; Ok(json!({ @@ -721,7 +594,7 @@ fn process_deploy( fn process_pay( rpc_client: &RpcClient, - config: &WalletConfig, + config: &CliConfig, lamports: u64, to: &Pubkey, timestamp: Option>, @@ -730,7 +603,7 @@ fn process_pay( cancelable: Option, ) -> ProcessResult { check_unique_pubkeys( - (&config.keypair.pubkey(), "wallet keypair".to_string()), + (&config.keypair.pubkey(), "cli keypair".to_string()), (to, "to".to_string()), )?; let (blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; @@ -775,7 +648,7 @@ fn process_pay( let witness = if let Some(ref witness_vec) = *witnesses { witness_vec[0] } else { - return Err(WalletError::BadParameter( + return Err(CliError::BadParameter( "Could not parse required signature pubkey(s)".to_string(), ) .into()); @@ -807,7 +680,7 @@ fn process_pay( } } -fn process_cancel(rpc_client: &RpcClient, config: &WalletConfig, pubkey: &Pubkey) -> ProcessResult { +fn process_cancel(rpc_client: &RpcClient, config: &CliConfig, pubkey: &Pubkey) -> ProcessResult { let (blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; let ix = budget_instruction::apply_signature( &config.keypair.pubkey(), @@ -820,53 +693,9 @@ fn process_cancel(rpc_client: &RpcClient, config: &WalletConfig, pubkey: &Pubkey log_instruction_custom_error::(result) } -fn process_get_genesis_blockhash(rpc_client: &RpcClient) -> ProcessResult { - let genesis_blockhash = rpc_client.get_genesis_blockhash()?; - Ok(genesis_blockhash.to_string()) -} - -fn process_get_slot(rpc_client: &RpcClient) -> ProcessResult { - let slot = rpc_client.get_slot()?; - Ok(slot.to_string()) -} - -fn process_get_epoch_info(rpc_client: &RpcClient) -> ProcessResult { - let epoch_info = rpc_client.get_epoch_info()?; - println!(); - println_name_value("Current epoch:", &epoch_info.epoch.to_string()); - println_name_value("Current slot:", &epoch_info.absolute_slot.to_string()); - println_name_value( - "Total slots in current epoch:", - &epoch_info.slots_in_epoch.to_string(), - ); - let remaining_slots_in_epoch = epoch_info.slots_in_epoch - epoch_info.slot_index; - println_name_value( - "Remaining slots in current epoch:", - &remaining_slots_in_epoch.to_string(), - ); - - let remaining_time_in_epoch = Duration::from_secs( - remaining_slots_in_epoch * clock::DEFAULT_TICKS_PER_SLOT / clock::DEFAULT_TICKS_PER_SECOND, - ); - println_name_value( - "Time remaining in current epoch:", - &format!( - "{} minutes, {} seconds", - remaining_time_in_epoch.as_secs() / 60, - remaining_time_in_epoch.as_secs() % 60 - ), - ); - Ok("".to_string()) -} - -fn process_get_transaction_count(rpc_client: &RpcClient) -> ProcessResult { - let transaction_count = rpc_client.get_transaction_count()?; - Ok(transaction_count.to_string()) -} - fn process_time_elapsed( rpc_client: &RpcClient, - config: &WalletConfig, + config: &CliConfig, to: &Pubkey, pubkey: &Pubkey, dt: DateTime, @@ -882,7 +711,7 @@ fn process_time_elapsed( fn process_witness( rpc_client: &RpcClient, - config: &WalletConfig, + config: &CliConfig, to: &Pubkey, pubkey: &Pubkey, ) -> ProcessResult { @@ -895,146 +724,13 @@ fn process_witness( log_instruction_custom_error::(result) } -fn process_get_version(rpc_client: &RpcClient, config: &WalletConfig) -> ProcessResult { - let remote_version: Value = serde_json::from_str(&rpc_client.get_version()?)?; - println!( - "{} {}", - style("Cluster versions from:").bold(), - config.json_rpc_url - ); - if let Some(versions) = remote_version.as_object() { - for (key, value) in versions.iter() { - if let Some(value_string) = value.as_str() { - println_name_value(&format!("* {}:", key), &value_string); - } - } +pub fn process_command(config: &CliConfig) -> ProcessResult { + println_name_value("Keypair:", &config.keypair_path); + if let CliCommand::Address = config.command { + // Get address of this client + return Ok(format!("{}", config.keypair.pubkey())); } - Ok("".to_string()) -} - -fn process_ping( - rpc_client: &RpcClient, - config: &WalletConfig, - interval: &Duration, - count: &Option, - timeout: &Duration, -) -> ProcessResult { - let to = Keypair::new().pubkey(); - - println_name_value("Source account:", &config.keypair.pubkey().to_string()); - println_name_value("Destination account:", &to.to_string()); - println!(); - - let (signal_sender, signal_receiver) = std::sync::mpsc::channel(); - ctrlc::set_handler(move || { - let _ = signal_sender.send(()); - }) - .expect("Error setting Ctrl-C handler"); - - let mut last_blockhash = Hash::default(); - let mut submit_count = 0; - let mut confirmed_count = 0; - let mut confirmation_time: VecDeque = VecDeque::with_capacity(1024); - - 'mainloop: for seq in 0..count.unwrap_or(std::u64::MAX) { - let (recent_blockhash, fee_calculator) = rpc_client.get_new_blockhash(&last_blockhash)?; - last_blockhash = recent_blockhash; - - let transaction = system_transaction::transfer(&config.keypair, &to, 1, recent_blockhash); - check_account_for_fee(rpc_client, config, &fee_calculator, &transaction.message)?; - - match rpc_client.send_transaction(&transaction) { - Ok(signature) => { - let transaction_sent = Instant::now(); - loop { - let signature_status = rpc_client.get_signature_status(&signature)?; - let elapsed_time = Instant::now().duration_since(transaction_sent); - if let Some(transaction_status) = signature_status { - match transaction_status { - Ok(()) => { - let elapsed_time_millis = elapsed_time.as_millis() as u64; - confirmation_time.push_back(elapsed_time_millis); - println!( - "{}1 lamport transferred: seq={:<3} time={:>4}ms signature={}", - CHECK_MARK, seq, elapsed_time_millis, signature - ); - confirmed_count += 1; - } - Err(err) => { - println!( - "{}Transaction failed: seq={:<3} error={:?} signature={}", - CROSS_MARK, seq, err, signature - ); - } - } - break; - } - - if elapsed_time >= *timeout { - println!( - "{}Confirmation timeout: seq={:<3} signature={}", - CROSS_MARK, seq, signature - ); - break; - } - - // Sleep for half a slot - if signal_receiver - .recv_timeout(Duration::from_millis( - 500 * solana_sdk::clock::DEFAULT_TICKS_PER_SLOT - / solana_sdk::clock::DEFAULT_TICKS_PER_SECOND, - )) - .is_ok() - { - break 'mainloop; - } - } - } - Err(err) => { - println!( - "{}Submit failed: seq={:<3} error={:?}", - CROSS_MARK, seq, err - ); - } - } - submit_count += 1; - - if signal_receiver.recv_timeout(*interval).is_ok() { - break 'mainloop; - } - } - - println!(); - println!("--- transaction statistics ---"); - println!( - "{} transactions submitted, {} transactions confirmed, {:.1}% transaction loss", - submit_count, - confirmed_count, - (100. - f64::from(confirmed_count) / f64::from(submit_count) * 100.) - ); - if !confirmation_time.is_empty() { - let samples: Vec = confirmation_time.iter().map(|t| *t as f64).collect(); - let dist = criterion_stats::Distribution::from(samples.into_boxed_slice()); - let mean = dist.mean(); - println!( - "confirmation min/mean/max/stddev = {:.0}/{:.0}/{:.0}/{:.0} ms", - dist.min(), - mean, - dist.max(), - dist.std_dev(Some(mean)) - ); - } - - Ok("".to_string()) -} - -pub fn process_command(config: &WalletConfig) -> ProcessResult { - println_name_value("Keypair:", &config.keypair_path); - if let WalletCommand::Address = config.command { - // Get address of this client - return Ok(format!("{}", config.keypair.pubkey())); - } - println_name_value("RPC Endpoint:", &config.json_rpc_url); + println_name_value("RPC Endpoint:", &config.json_rpc_url); let mut _rpc_client; let rpc_client = if config.rpc_client.is_none() { @@ -1046,100 +742,35 @@ pub fn process_command(config: &WalletConfig) -> ProcessResult { }; match &config.command { - // Get address of this client - WalletCommand::Address => unreachable!(), - - WalletCommand::Fees => process_fees(&rpc_client), - - // Request an airdrop from Solana Drone; - WalletCommand::Airdrop { - drone_host, - drone_port, - lamports, - use_lamports_unit, - } => { - let drone_addr = SocketAddr::new( - drone_host.unwrap_or_else(|| { - let drone_host = url::Url::parse(&config.json_rpc_url) - .unwrap() - .host() - .unwrap() - .to_string(); - solana_netutil::parse_host(&drone_host).unwrap_or_else(|err| { - panic!("Unable to resolve {}: {}", drone_host, err); - }) - }), - *drone_port, - ); - - process_airdrop( - &rpc_client, - config, - &drone_addr, - *lamports, - *use_lamports_unit, - ) + // Cluster Query Commands + + // Return software version of solana-cli and cluster entrypoint node + CliCommand::ClusterVersion => process_cluster_version(&rpc_client, config), + CliCommand::Fees => process_fees(&rpc_client), + CliCommand::GetGenesisBlockhash => process_get_genesis_blockhash(&rpc_client), + CliCommand::GetSlot => process_get_slot(&rpc_client), + CliCommand::GetEpochInfo => process_get_epoch_info(&rpc_client), + CliCommand::GetTransactionCount => process_get_transaction_count(&rpc_client), + CliCommand::Ping { + interval, + count, + timeout, + } => process_ping(&rpc_client, config, interval, count, timeout), + CliCommand::ShowValidators { use_lamports_unit } => { + process_show_validators(&rpc_client, *use_lamports_unit) } - // Check client balance - WalletCommand::Balance { - pubkey, - use_lamports_unit, - } => process_balance(&pubkey, &rpc_client, *use_lamports_unit), - - // Cancel a contract by contract Pubkey - WalletCommand::Cancel(pubkey) => process_cancel(&rpc_client, config, &pubkey), - - // Confirm the last client transaction by signature - WalletCommand::Confirm(signature) => process_confirm(&rpc_client, signature), + // Program Deployment - // Create vote account - WalletCommand::CreateVoteAccount(vote_account_pubkey, vote_init) => { - process_create_vote_account(&rpc_client, config, &vote_account_pubkey, &vote_init) + // Deploy a custom program to the chain + CliCommand::Deploy(ref program_location) => { + process_deploy(&rpc_client, config, program_location) } - WalletCommand::VoteAuthorize( - vote_account_pubkey, - new_authorized_pubkey, - vote_authorize, - ) => process_vote_authorize( - &rpc_client, - config, - &vote_account_pubkey, - &new_authorized_pubkey, - *vote_authorize, - ), - - WalletCommand::ShowAccount { - pubkey, - output_file, - use_lamports_unit, - } => process_show_account( - &rpc_client, - config, - &pubkey, - &output_file, - *use_lamports_unit, - ), - - WalletCommand::ShowVoteAccount { - pubkey: vote_account_pubkey, - use_lamports_unit, - } => process_show_vote_account( - &rpc_client, - config, - &vote_account_pubkey, - *use_lamports_unit, - ), - - WalletCommand::Uptime { - pubkey: vote_account_pubkey, - aggregate, - span, - } => process_uptime(&rpc_client, config, &vote_account_pubkey, *aggregate, *span), + // Stake Commands // Create stake account - WalletCommand::CreateStakeAccount(stake_account_pubkey, authorized, lockup, lamports) => { + CliCommand::CreateStakeAccount(stake_account_pubkey, authorized, lockup, lamports) => { process_create_stake_account( &rpc_client, config, @@ -1149,7 +780,11 @@ pub fn process_command(config: &WalletConfig) -> ProcessResult { *lamports, ) } - WalletCommand::DelegateStake(stake_account_pubkey, vote_account_pubkey, force) => { + // Deactivate stake account + CliCommand::DeactivateStake(stake_account_pubkey) => { + process_deactivate_stake_account(&rpc_client, config, &stake_account_pubkey) + } + CliCommand::DelegateStake(stake_account_pubkey, vote_account_pubkey, force) => { process_delegate_stake( &rpc_client, config, @@ -1158,7 +793,24 @@ pub fn process_command(config: &WalletConfig) -> ProcessResult { *force, ) } - WalletCommand::StakeAuthorize( + CliCommand::RedeemVoteCredits(stake_account_pubkey, vote_account_pubkey) => { + process_redeem_vote_credits( + &rpc_client, + config, + &stake_account_pubkey, + &vote_account_pubkey, + ) + } + CliCommand::ShowStakeAccount { + pubkey: stake_account_pubkey, + use_lamports_unit, + } => process_show_stake_account( + &rpc_client, + config, + &stake_account_pubkey, + *use_lamports_unit, + ), + CliCommand::StakeAuthorize( stake_account_pubkey, new_authorized_pubkey, stake_authorize, @@ -1170,91 +822,128 @@ pub fn process_command(config: &WalletConfig) -> ProcessResult { *stake_authorize, ), - WalletCommand::WithdrawStake( - stake_account_pubkey, - destination_account_pubkey, - lamports, - ) => process_withdraw_stake( - &rpc_client, - config, - &stake_account_pubkey, - &destination_account_pubkey, - *lamports, - ), - - // Deactivate stake account - WalletCommand::DeactivateStake(stake_account_pubkey, vote_account_pubkey) => { - process_deactivate_stake_account( + CliCommand::WithdrawStake(stake_account_pubkey, destination_account_pubkey, lamports) => { + process_withdraw_stake( &rpc_client, config, &stake_account_pubkey, - &vote_account_pubkey, + &destination_account_pubkey, + *lamports, ) } - WalletCommand::RedeemVoteCredits(stake_account_pubkey, vote_account_pubkey) => { - process_redeem_vote_credits( - &rpc_client, - config, - &stake_account_pubkey, - &vote_account_pubkey, - ) - } + // Storage Commands - WalletCommand::ShowStakeAccount { - pubkey: stake_account_pubkey, - use_lamports_unit, - } => process_show_stake_account( + // Create storage account + CliCommand::CreateStorageAccount { + account_owner, + storage_account_pubkey, + account_type, + } => process_create_storage_account( &rpc_client, config, - &stake_account_pubkey, - *use_lamports_unit, + &account_owner, + &storage_account_pubkey, + *account_type, ), - - WalletCommand::CreateReplicatorStorageAccount( - storage_account_owner, + CliCommand::ClaimStorageReward { + node_account_pubkey, storage_account_pubkey, - ) => process_create_replicator_storage_account( + } => process_claim_storage_reward( &rpc_client, config, - &storage_account_owner, + node_account_pubkey, &storage_account_pubkey, ), + CliCommand::ShowStorageAccount(storage_account_pubkey) => { + process_show_storage_account(&rpc_client, config, &storage_account_pubkey) + } - WalletCommand::CreateValidatorStorageAccount(account_owner, storage_account_pubkey) => { - process_create_validator_storage_account( - &rpc_client, - config, - &account_owner, - &storage_account_pubkey, - ) + // Validator Info Commands + + // Return all or single validator info + CliCommand::GetValidatorInfo(info_pubkey) => { + process_get_validator_info(&rpc_client, *info_pubkey) + } + // Publish validator info + CliCommand::SetValidatorInfo(validator_info, info_pubkey) => { + process_set_validator_info(&rpc_client, config, &validator_info, *info_pubkey) } - WalletCommand::ClaimStorageReward(node_account_pubkey, storage_account_pubkey) => { - process_claim_storage_reward( + // Vote Commands + + // Create vote account + CliCommand::CreateVoteAccount(vote_account_pubkey, vote_init) => { + process_create_vote_account(&rpc_client, config, &vote_account_pubkey, &vote_init) + } + CliCommand::ShowVoteAccount { + pubkey: vote_account_pubkey, + use_lamports_unit, + } => process_show_vote_account( + &rpc_client, + config, + &vote_account_pubkey, + *use_lamports_unit, + ), + CliCommand::VoteAuthorize(vote_account_pubkey, new_authorized_pubkey, vote_authorize) => { + process_vote_authorize( &rpc_client, config, - node_account_pubkey, - &storage_account_pubkey, + &vote_account_pubkey, + &new_authorized_pubkey, + *vote_authorize, ) } + CliCommand::Uptime { + pubkey: vote_account_pubkey, + aggregate, + span, + } => process_uptime(&rpc_client, config, &vote_account_pubkey, *aggregate, *span), - WalletCommand::ShowStorageAccount(storage_account_pubkey) => { - process_show_storage_account(&rpc_client, config, &storage_account_pubkey) - } + // Wallet Commands - // Deploy a custom program to the chain - WalletCommand::Deploy(ref program_location) => { - process_deploy(&rpc_client, config, program_location) - } - - WalletCommand::GetGenesisBlockhash => process_get_genesis_blockhash(&rpc_client), - WalletCommand::GetSlot => process_get_slot(&rpc_client), - WalletCommand::GetEpochInfo => process_get_epoch_info(&rpc_client), - WalletCommand::GetTransactionCount => process_get_transaction_count(&rpc_client), + // Get address of this client + CliCommand::Address => unreachable!(), + // Request an airdrop from Solana Drone; + CliCommand::Airdrop { + drone_host, + drone_port, + lamports, + use_lamports_unit, + } => { + let drone_addr = SocketAddr::new( + drone_host.unwrap_or_else(|| { + let drone_host = url::Url::parse(&config.json_rpc_url) + .unwrap() + .host() + .unwrap() + .to_string(); + solana_netutil::parse_host(&drone_host).unwrap_or_else(|err| { + panic!("Unable to resolve {}: {}", drone_host, err); + }) + }), + *drone_port, + ); + process_airdrop( + &rpc_client, + config, + &drone_addr, + *lamports, + *use_lamports_unit, + ) + } + // Check client balance + CliCommand::Balance { + pubkey, + use_lamports_unit, + } => process_balance(&pubkey, &rpc_client, *use_lamports_unit), + // Cancel a contract by contract Pubkey + CliCommand::Cancel(pubkey) => process_cancel(&rpc_client, config, &pubkey), + // Confirm the last client transaction by signature + CliCommand::Confirm(signature) => process_confirm(&rpc_client, signature), // If client has positive balance, pay lamports to another address - WalletCommand::Pay { + CliCommand::Pay { lamports, to, timestamp, @@ -1271,33 +960,23 @@ pub fn process_command(config: &WalletConfig) -> ProcessResult { witnesses, *cancelable, ), - - WalletCommand::Ping { - interval, - count, - timeout, - } => process_ping(&rpc_client, config, interval, count, timeout), - + CliCommand::ShowAccount { + pubkey, + output_file, + use_lamports_unit, + } => process_show_account( + &rpc_client, + config, + &pubkey, + &output_file, + *use_lamports_unit, + ), // Apply time elapsed to contract - WalletCommand::TimeElapsed(to, pubkey, dt) => { + CliCommand::TimeElapsed(to, pubkey, dt) => { process_time_elapsed(&rpc_client, config, &to, &pubkey, *dt) } - // Apply witness signature to contract - WalletCommand::Witness(to, pubkey) => process_witness(&rpc_client, config, &to, &pubkey), - - // Return software version of wallet and cluster entrypoint node - WalletCommand::GetVersion => process_get_version(&rpc_client, config), - - // Return all or single validator info - WalletCommand::GetValidatorInfo(info_pubkey) => { - process_get_validator_info(&rpc_client, *info_pubkey) - } - - // Publish validator info - WalletCommand::SetValidatorInfo(validator_info, info_pubkey) => { - process_set_validator_info(&rpc_client, config, &validator_info, *info_pubkey) - } + CliCommand::Witness(to, pubkey) => process_witness(&rpc_client, config, &to, &pubkey), } } @@ -1403,7 +1082,21 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, ' .version(version) .setting(AppSettings::SubcommandRequiredElseHelp) .subcommand(SubCommand::with_name("address").about("Get your public key")) - .subcommand(SubCommand::with_name("fees").about("Display current cluster fees")) + .cluster_query_subcommands() + .subcommand( + SubCommand::with_name("deploy") + .about("Deploy a program") + .arg( + Arg::with_name("program_location") + .index(1) + .value_name("PATH TO PROGRAM") + .takes_value(true) + .required(true) + .help("/path/to/program.o"), + ), // TODO: Add "loader" argument; current default is bpf_loader + ) + .stake_subcommands() + .storage_subcommands() .subcommand( SubCommand::with_name("airdrop") .about("Request lamports") @@ -1482,298 +1175,6 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, ' .help("The transaction signature to confirm"), ), ) - .subcommand( - SubCommand::with_name("vote-authorize-voter") - .about("Authorize a new vote signing keypair for the given vote account") - .arg( - Arg::with_name("vote_account_pubkey") - .index(1) - .value_name("VOTE ACCOUNT PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - .help("Vote account in which to set the authorized voter"), - ) - .arg( - Arg::with_name("new_authorized_pubkey") - .index(2) - .value_name("NEW VOTER PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - .help("New vote signer to authorize"), - ), - ) - .subcommand( - SubCommand::with_name("vote-authorize-withdrawer") - .about("Authorize a new withdraw signing keypair for the given vote account") - .arg( - Arg::with_name("vote_account_pubkey") - .index(1) - .value_name("VOTE ACCOUNT PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - .help("Vote account in which to set the authorized withdrawer"), - ) - .arg( - Arg::with_name("new_authorized_pubkey") - .index(2) - .value_name("NEW WITHDRAWER PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - .help("New withdrawer to authorize"), - ), - ) - .subcommand( - SubCommand::with_name("create-vote-account") - .about("Create a vote account") - .arg( - Arg::with_name("vote_account_pubkey") - .index(1) - .value_name("VOTE ACCOUNT PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - .help("Vote account address to fund"), - ) - .arg( - Arg::with_name("node_pubkey") - .index(2) - .value_name("VALIDATOR PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - .help("Validator that will vote with this account"), - ) - .arg( - Arg::with_name("commission") - .long("commission") - .value_name("NUM") - .takes_value(true) - .help("The commission taken on reward redemption (0-255), default: 0"), - ) - .arg( - Arg::with_name("authorized_voter") - .long("authorized-voter") - .value_name("PUBKEY") - .takes_value(true) - .validator(is_pubkey_or_keypair) - .help("Public key of the authorized voter (defaults to vote account)"), - ) - .arg( - Arg::with_name("authorized_withdrawer") - .long("authorized-withdrawer") - .value_name("PUBKEY") - .takes_value(true) - .validator(is_pubkey_or_keypair) - .help("Public key of the authorized withdrawer (defaults to wallet)"), - ), - ) - .subcommand( - SubCommand::with_name("show-account") - .about("Show the contents of an account") - .arg( - Arg::with_name("account_pubkey") - .index(1) - .value_name("ACCOUNT PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - .help("Account pubkey"), - ) - .arg( - Arg::with_name("output_file") - .long("output") - .short("o") - .value_name("FILE") - .takes_value(true) - .help("Write the account data to this file"), - ) - .arg( - Arg::with_name("lamports") - .long("lamports") - .takes_value(false) - .help("Display balance in lamports instead of SOL"), - ), - ) - .subcommand( - SubCommand::with_name("show-vote-account") - .about("Show the contents of a vote account") - .arg( - Arg::with_name("vote_account_pubkey") - .index(1) - .value_name("VOTE ACCOUNT PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - .help("Vote account pubkey"), - ) - .arg( - Arg::with_name("lamports") - .long("lamports") - .takes_value(false) - .help("Display balance in lamports instead of SOL"), - ), - ) - .subcommand( - SubCommand::with_name("uptime") - .about("Show the uptime of a validator, based on epoch voting history") - .arg( - Arg::with_name("vote_account_pubkey") - .index(1) - .value_name("VOTE ACCOUNT PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - .help("Vote account pubkey"), - ) - .arg( - Arg::with_name("span") - .long("span") - .value_name("NUM OF EPOCHS") - .takes_value(true) - .help("Number of recent epochs to examine") - ) - .arg( - Arg::with_name("aggregate") - .long("aggregate") - .help("Aggregate uptime data across span") - ), - ) - .stake_subcommands() - .subcommand( - SubCommand::with_name("create-storage-mining-pool-account") - .about("Create mining pool account") - .arg( - Arg::with_name("storage_account_pubkey") - .index(1) - .value_name("STORAGE ACCOUNT PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - .help("Storage mining pool account address to fund"), - ) - .arg( - Arg::with_name("amount") - .index(2) - .value_name("AMOUNT") - .takes_value(true) - .required(true) - .help("The amount to assign to the storage mining pool account (default unit SOL)"), - ) - .arg( - Arg::with_name("unit") - .index(3) - .value_name("UNIT") - .takes_value(true) - .possible_values(&["SOL", "lamports"]) - .help("Specify unit to use for request"), - ), - ) - .subcommand( - SubCommand::with_name("create-replicator-storage-account") - .about("Create a replicator storage account") - .arg( - Arg::with_name("storage_account_owner") - .index(1) - .value_name("STORAGE ACCOUNT OWNER PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - ) - .arg( - Arg::with_name("storage_account_pubkey") - .index(2) - .value_name("STORAGE ACCOUNT PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - ) - ) - .subcommand( - SubCommand::with_name("create-validator-storage-account") - .about("Create a validator storage account") - .arg( - Arg::with_name("storage_account_owner") - .index(1) - .value_name("STORAGE ACCOUNT OWNER PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - ) - .arg( - Arg::with_name("storage_account_pubkey") - .index(2) - .value_name("STORAGE ACCOUNT PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - ) - ) - .subcommand( - SubCommand::with_name("claim-storage-reward") - .about("Redeem storage reward credits") - .arg( - Arg::with_name("node_account_pubkey") - .index(1) - .value_name("NODE PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - .help("The node account to credit the rewards to"), - ) - .arg( - Arg::with_name("storage_account_pubkey") - .index(2) - .value_name("STORAGE ACCOUNT PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - .help("Storage account address to redeem credits for"), - )) - .subcommand( - SubCommand::with_name("show-storage-account") - .about("Show the contents of a storage account") - .arg( - Arg::with_name("storage_account_pubkey") - .index(1) - .value_name("STORAGE ACCOUNT PUBKEY") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - .help("Storage account pubkey"), - ) - ) - .subcommand( - SubCommand::with_name("deploy") - .about("Deploy a program") - .arg( - Arg::with_name("program_location") - .index(1) - .value_name("PATH TO PROGRAM") - .takes_value(true) - .required(true) - .help("/path/to/program.o"), - ), // TODO: Add "loader" argument; current default is bpf_loader - ) - .subcommand( - SubCommand::with_name("get-genesis-blockhash") - .about("Get the genesis blockhash"), - ) - .subcommand( - SubCommand::with_name("get-slot") - .about("Get current slot"), - ) - .subcommand( - SubCommand::with_name("get-epoch-info") - .about("Get information about the current epoch"), - ) - .subcommand( - SubCommand::with_name("get-transaction-count") - .about("Get current transaction count"), - ) .subcommand( SubCommand::with_name("pay") .about("Send a payment") @@ -1834,36 +1235,6 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, ' .takes_value(false), ), ) - .subcommand( - SubCommand::with_name("ping") - .about("Submit transactions sequentially") - .arg( - Arg::with_name("interval") - .short("i") - .long("interval") - .value_name("SECONDS") - .takes_value(true) - .default_value("2") - .help("Wait interval seconds between submitting the next transaction"), - ) - .arg( - Arg::with_name("count") - .short("c") - .long("count") - .value_name("NUMBER") - .takes_value(true) - .help("Stop after submitting count transactions"), - ) - .arg( - Arg::with_name("timeout") - .short("t") - .long("timeout") - .value_name("SECONDS") - .takes_value(true) - .default_value("10") - .help("Wait up to timeout seconds for transaction confirmation"), - ), - ) .subcommand( SubCommand::with_name("send-signature") .about("Send a signature to authorize a transfer") @@ -1914,8 +1285,31 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, ' ), ) .subcommand( - SubCommand::with_name("cluster-version") - .about("Get the version of the cluster entrypoint"), + SubCommand::with_name("show-account") + .about("Show the contents of an account") + .arg( + Arg::with_name("account_pubkey") + .index(1) + .value_name("ACCOUNT PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair) + .help("Account pubkey"), + ) + .arg( + Arg::with_name("output_file") + .long("output") + .short("o") + .value_name("FILE") + .takes_value(true) + .help("Write the account data to this file"), + ) + .arg( + Arg::with_name("lamports") + .long("lamports") + .takes_value(false) + .help("Display balance in lamports instead of SOL"), + ), ) .subcommand( SubCommand::with_name("validator-info") @@ -1989,6 +1383,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, ' ), ) ) + .vote_subcommands() } #[cfg(test)] @@ -1997,7 +1392,7 @@ mod tests { use serde_json::Value; use solana_client::mock_rpc_client_request::SIGNATURE; use solana_sdk::{ - signature::{gen_keypair_file, read_keypair}, + signature::{gen_keypair_file, read_keypair_file}, transaction::TransactionError, }; use std::path::PathBuf; @@ -2026,7 +1421,7 @@ mod tests { } #[test] - fn test_wallet_parse_command() { + fn test_cli_parse_command() { let test_commands = app("test", "desc", "version"); let pubkey = Pubkey::new_rand(); @@ -2043,7 +1438,7 @@ mod tests { .get_matches_from(vec!["test", "airdrop", "50", "lamports"]); assert_eq!( parse_command(&pubkey, &test_airdrop).unwrap(), - WalletCommand::Airdrop { + CliCommand::Airdrop { drone_host: None, drone_port: solana_drone::drone::DRONE_PORT, lamports: 50, @@ -2054,7 +1449,7 @@ mod tests { // Test Balance Subcommand, incl pubkey and keypair-file inputs let keypair_file = make_tmp_path("keypair_file"); gen_keypair_file(&keypair_file).unwrap(); - let keypair = read_keypair(&keypair_file).unwrap(); + let keypair = read_keypair_file(&keypair_file).unwrap(); let test_balance = test_commands.clone().get_matches_from(vec![ "test", "balance", @@ -2062,7 +1457,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_balance).unwrap(), - WalletCommand::Balance { + CliCommand::Balance { pubkey: keypair.pubkey(), use_lamports_unit: false } @@ -2075,7 +1470,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_balance).unwrap(), - WalletCommand::Balance { + CliCommand::Balance { pubkey: keypair.pubkey(), use_lamports_unit: true } @@ -2088,7 +1483,7 @@ mod tests { .get_matches_from(vec!["test", "cancel", &pubkey_string]); assert_eq!( parse_command(&pubkey, &test_cancel).unwrap(), - WalletCommand::Cancel(pubkey) + CliCommand::Cancel(pubkey) ); // Test Confirm Subcommand @@ -2100,7 +1495,7 @@ mod tests { .get_matches_from(vec!["test", "confirm", &signature_string]); assert_eq!( parse_command(&pubkey, &test_confirm).unwrap(), - WalletCommand::Confirm(signature) + CliCommand::Confirm(signature) ); let test_bad_signature = test_commands .clone() @@ -2114,7 +1509,7 @@ mod tests { .get_matches_from(vec!["test", "deploy", "/Users/test/program.o"]); assert_eq!( parse_command(&pubkey, &test_deploy).unwrap(), - WalletCommand::Deploy("/Users/test/program.o".to_string()) + CliCommand::Deploy("/Users/test/program.o".to_string()) ); // Test Simple Pay Subcommand @@ -2127,7 +1522,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_pay).unwrap(), - WalletCommand::Pay { + CliCommand::Pay { lamports: 50, to: pubkey, timestamp: None, @@ -2151,7 +1546,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_pay_multiple_witnesses).unwrap(), - WalletCommand::Pay { + CliCommand::Pay { lamports: 50, to: pubkey, timestamp: None, @@ -2171,7 +1566,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_pay_single_witness).unwrap(), - WalletCommand::Pay { + CliCommand::Pay { lamports: 50, to: pubkey, timestamp: None, @@ -2195,7 +1590,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_pay_timestamp).unwrap(), - WalletCommand::Pay { + CliCommand::Pay { lamports: 50, to: pubkey, timestamp: Some(dt), @@ -2214,7 +1609,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_send_signature).unwrap(), - WalletCommand::Witness(pubkey, pubkey) + CliCommand::Witness(pubkey, pubkey) ); let test_pay_multiple_witnesses = test_commands.clone().get_matches_from(vec![ "test", @@ -2233,7 +1628,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_pay_multiple_witnesses).unwrap(), - WalletCommand::Pay { + CliCommand::Pay { lamports: 50, to: pubkey, timestamp: Some(dt), @@ -2254,7 +1649,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_send_timestamp).unwrap(), - WalletCommand::TimeElapsed(pubkey, pubkey, dt) + CliCommand::TimeElapsed(pubkey, pubkey, dt) ); let test_bad_timestamp = test_commands.clone().get_matches_from(vec![ "test", @@ -2268,40 +1663,40 @@ mod tests { } #[test] - fn test_wallet_process_command() { + fn test_cli_process_command() { // Success cases - let mut config = WalletConfig::default(); + let mut config = CliConfig::default(); config.rpc_client = Some(RpcClient::new_mock("succeeds".to_string())); let keypair = Keypair::new(); let pubkey = keypair.pubkey().to_string(); config.keypair = keypair; - config.command = WalletCommand::Address; + config.command = CliCommand::Address; assert_eq!(process_command(&config).unwrap(), pubkey); - config.command = WalletCommand::Balance { + config.command = CliCommand::Balance { pubkey: config.keypair.pubkey(), use_lamports_unit: true, }; assert_eq!(process_command(&config).unwrap(), "50 lamports"); - config.command = WalletCommand::Balance { + config.command = CliCommand::Balance { pubkey: config.keypair.pubkey(), use_lamports_unit: false, }; assert_eq!(process_command(&config).unwrap(), "0 SOL"); let process_id = Pubkey::new_rand(); - config.command = WalletCommand::Cancel(process_id); + config.command = CliCommand::Cancel(process_id); assert_eq!(process_command(&config).unwrap(), SIGNATURE); let good_signature = Signature::new(&bs58::decode(SIGNATURE).into_vec().unwrap()); - config.command = WalletCommand::Confirm(good_signature); + config.command = CliCommand::Confirm(good_signature); assert_eq!(process_command(&config).unwrap(), "Confirmed"); let bob_pubkey = Pubkey::new_rand(); let node_pubkey = Pubkey::new_rand(); - config.command = WalletCommand::CreateVoteAccount( + config.command = CliCommand::CreateVoteAccount( bob_pubkey, VoteInit { node_pubkey, @@ -2315,13 +1710,13 @@ mod tests { let new_authorized_pubkey = Pubkey::new_rand(); config.command = - WalletCommand::VoteAuthorize(bob_pubkey, new_authorized_pubkey, VoteAuthorize::Voter); + CliCommand::VoteAuthorize(bob_pubkey, new_authorized_pubkey, VoteAuthorize::Voter); let signature = process_command(&config); assert_eq!(signature.unwrap(), SIGNATURE.to_string()); let bob_pubkey = Pubkey::new_rand(); let custodian = Pubkey::new_rand(); - config.command = WalletCommand::CreateStakeAccount( + config.command = CliCommand::CreateStakeAccount( bob_pubkey, Authorized { staker: config.keypair.pubkey(), @@ -2335,23 +1730,22 @@ mod tests { let stake_pubkey = Pubkey::new_rand(); let to_pubkey = Pubkey::new_rand(); - config.command = WalletCommand::WithdrawStake(stake_pubkey, to_pubkey, 100); + config.command = CliCommand::WithdrawStake(stake_pubkey, to_pubkey, 100); let signature = process_command(&config); assert_eq!(signature.unwrap(), SIGNATURE.to_string()); let stake_pubkey = Pubkey::new_rand(); - let vote_pubkey = Pubkey::new_rand(); - config.command = WalletCommand::DeactivateStake(stake_pubkey, vote_pubkey); + config.command = CliCommand::DeactivateStake(stake_pubkey); let signature = process_command(&config); assert_eq!(signature.unwrap(), SIGNATURE.to_string()); - config.command = WalletCommand::GetSlot; + config.command = CliCommand::GetSlot; assert_eq!(process_command(&config).unwrap(), "0"); - config.command = WalletCommand::GetTransactionCount; + config.command = CliCommand::GetTransactionCount; assert_eq!(process_command(&config).unwrap(), "1234"); - config.command = WalletCommand::Pay { + config.command = CliCommand::Pay { lamports: 10, to: bob_pubkey, timestamp: None, @@ -2364,7 +1758,7 @@ mod tests { let date_string = "\"2018-09-19T17:30:59Z\""; let dt: DateTime = serde_json::from_str(&date_string).unwrap(); - config.command = WalletCommand::Pay { + config.command = CliCommand::Pay { lamports: 10, to: bob_pubkey, timestamp: Some(dt), @@ -2385,7 +1779,7 @@ mod tests { ); let witness = Pubkey::new_rand(); - config.command = WalletCommand::Pay { + config.command = CliCommand::Pay { lamports: 10, to: bob_pubkey, timestamp: None, @@ -2406,17 +1800,17 @@ mod tests { ); let process_id = Pubkey::new_rand(); - config.command = WalletCommand::TimeElapsed(bob_pubkey, process_id, dt); + config.command = CliCommand::TimeElapsed(bob_pubkey, process_id, dt); let signature = process_command(&config); assert_eq!(signature.unwrap(), SIGNATURE.to_string()); let witness = Pubkey::new_rand(); - config.command = WalletCommand::Witness(bob_pubkey, witness); + config.command = CliCommand::Witness(bob_pubkey, witness); let signature = process_command(&config); assert_eq!(signature.unwrap(), SIGNATURE.to_string()); // Need airdrop cases - config.command = WalletCommand::Airdrop { + config.command = CliCommand::Airdrop { drone_host: None, drone_port: 1234, lamports: 50, @@ -2425,25 +1819,25 @@ mod tests { assert!(process_command(&config).is_ok()); config.rpc_client = Some(RpcClient::new_mock("airdrop".to_string())); - config.command = WalletCommand::TimeElapsed(bob_pubkey, process_id, dt); + config.command = CliCommand::TimeElapsed(bob_pubkey, process_id, dt); let signature = process_command(&config); assert_eq!(signature.unwrap(), SIGNATURE.to_string()); let witness = Pubkey::new_rand(); - config.command = WalletCommand::Witness(bob_pubkey, witness); + config.command = CliCommand::Witness(bob_pubkey, witness); let signature = process_command(&config); assert_eq!(signature.unwrap(), SIGNATURE.to_string()); // sig_not_found case config.rpc_client = Some(RpcClient::new_mock("sig_not_found".to_string())); let missing_signature = Signature::new(&bs58::decode("5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW").into_vec().unwrap()); - config.command = WalletCommand::Confirm(missing_signature); + config.command = CliCommand::Confirm(missing_signature); assert_eq!(process_command(&config).unwrap(), "Not found"); // Tx error case config.rpc_client = Some(RpcClient::new_mock("account_in_use".to_string())); let any_signature = Signature::new(&bs58::decode(SIGNATURE).into_vec().unwrap()); - config.command = WalletCommand::Confirm(any_signature); + config.command = CliCommand::Confirm(any_signature); assert_eq!( process_command(&config).unwrap(), format!( @@ -2455,7 +1849,7 @@ mod tests { // Failure cases config.rpc_client = Some(RpcClient::new_mock("fails".to_string())); - config.command = WalletCommand::Airdrop { + config.command = CliCommand::Airdrop { drone_host: None, drone_port: 1234, lamports: 50, @@ -2463,13 +1857,13 @@ mod tests { }; assert!(process_command(&config).is_err()); - config.command = WalletCommand::Balance { + config.command = CliCommand::Balance { pubkey: config.keypair.pubkey(), use_lamports_unit: false, }; assert!(process_command(&config).is_err()); - config.command = WalletCommand::CreateVoteAccount( + config.command = CliCommand::CreateVoteAccount( bob_pubkey, VoteInit { node_pubkey, @@ -2480,16 +1874,16 @@ mod tests { ); assert!(process_command(&config).is_err()); - config.command = WalletCommand::VoteAuthorize(bob_pubkey, bob_pubkey, VoteAuthorize::Voter); + config.command = CliCommand::VoteAuthorize(bob_pubkey, bob_pubkey, VoteAuthorize::Voter); assert!(process_command(&config).is_err()); - config.command = WalletCommand::GetSlot; + config.command = CliCommand::GetSlot; assert!(process_command(&config).is_err()); - config.command = WalletCommand::GetTransactionCount; + config.command = CliCommand::GetTransactionCount; assert!(process_command(&config).is_err()); - config.command = WalletCommand::Pay { + config.command = CliCommand::Pay { lamports: 10, to: bob_pubkey, timestamp: None, @@ -2499,7 +1893,7 @@ mod tests { }; assert!(process_command(&config).is_err()); - config.command = WalletCommand::Pay { + config.command = CliCommand::Pay { lamports: 10, to: bob_pubkey, timestamp: Some(dt), @@ -2509,7 +1903,7 @@ mod tests { }; assert!(process_command(&config).is_err()); - config.command = WalletCommand::Pay { + config.command = CliCommand::Pay { lamports: 10, to: bob_pubkey, timestamp: None, @@ -2519,12 +1913,12 @@ mod tests { }; assert!(process_command(&config).is_err()); - config.command = WalletCommand::TimeElapsed(bob_pubkey, process_id, dt); + config.command = CliCommand::TimeElapsed(bob_pubkey, process_id, dt); assert!(process_command(&config).is_err()); } #[test] - fn test_wallet_deploy() { + fn test_cli_deploy() { solana_logger::setup(); let mut pathbuf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); pathbuf.push("tests"); @@ -2533,10 +1927,10 @@ mod tests { pathbuf.set_extension("so"); // Success case - let mut config = WalletConfig::default(); + let mut config = CliConfig::default(); config.rpc_client = Some(RpcClient::new_mock("deploy_succeeds".to_string())); - config.command = WalletCommand::Deploy(pathbuf.to_str().unwrap().to_string()); + config.command = CliCommand::Deploy(pathbuf.to_str().unwrap().to_string()); let result = process_command(&config); let json: Value = serde_json::from_str(&result.unwrap()).unwrap(); let program_id = json @@ -2550,7 +1944,7 @@ mod tests { assert!(program_id.parse::().is_ok()); // Failure case - config.command = WalletCommand::Deploy("bad/file/location.so".to_string()); + config.command = CliCommand::Deploy("bad/file/location.so".to_string()); assert!(process_command(&config).is_err()); } } diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs new file mode 100644 index 00000000000000..853b7dde8edc4a --- /dev/null +++ b/cli/src/cluster_query.rs @@ -0,0 +1,427 @@ +use crate::{ + cli::{ + build_balance_message, check_account_for_fee, CliCommand, CliConfig, CliError, + ProcessResult, + }, + display::println_name_value, +}; +use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand}; +use console::{style, Emoji}; +use serde_json::Value; +use solana_client::rpc_client::RpcClient; +use solana_sdk::{ + clock, + hash::Hash, + signature::{Keypair, KeypairUtil}, + system_transaction, +}; +use std::{ + collections::VecDeque, + time::{Duration, Instant}, +}; + +static CHECK_MARK: Emoji = Emoji("✅ ", ""); +static CROSS_MARK: Emoji = Emoji("❌ ", ""); + +pub trait ClusterQuerySubCommands { + fn cluster_query_subcommands(self) -> Self; +} + +impl ClusterQuerySubCommands for App<'_, '_> { + fn cluster_query_subcommands(self) -> Self { + self.subcommand( + SubCommand::with_name("cluster-version") + .about("Get the version of the cluster entrypoint"), + ) + .subcommand(SubCommand::with_name("fees").about("Display current cluster fees")) + .subcommand( + SubCommand::with_name("get-epoch-info") + .about("Get information about the current epoch"), + ) + .subcommand( + SubCommand::with_name("get-genesis-blockhash").about("Get the genesis blockhash"), + ) + .subcommand(SubCommand::with_name("get-slot").about("Get current slot")) + .subcommand( + SubCommand::with_name("get-transaction-count").about("Get current transaction count"), + ) + .subcommand( + SubCommand::with_name("ping") + .about("Submit transactions sequentially") + .arg( + Arg::with_name("interval") + .short("i") + .long("interval") + .value_name("SECONDS") + .takes_value(true) + .default_value("2") + .help("Wait interval seconds between submitting the next transaction"), + ) + .arg( + Arg::with_name("count") + .short("c") + .long("count") + .value_name("NUMBER") + .takes_value(true) + .help("Stop after submitting count transactions"), + ) + .arg( + Arg::with_name("timeout") + .short("t") + .long("timeout") + .value_name("SECONDS") + .takes_value(true) + .default_value("10") + .help("Wait up to timeout seconds for transaction confirmation"), + ), + ) + .subcommand( + SubCommand::with_name("show-validators") + .about("Show information about the current validators") + .arg( + Arg::with_name("lamports") + .long("lamports") + .takes_value(false) + .help("Display balance in lamports instead of SOL"), + ), + ) + } +} + +pub fn parse_cluster_ping(matches: &ArgMatches<'_>) -> Result { + let interval = Duration::from_secs(value_t_or_exit!(matches, "interval", u64)); + let count = if matches.is_present("count") { + Some(value_t_or_exit!(matches, "count", u64)) + } else { + None + }; + let timeout = Duration::from_secs(value_t_or_exit!(matches, "timeout", u64)); + Ok(CliCommand::Ping { + interval, + count, + timeout, + }) +} + +pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result { + let use_lamports_unit = matches.is_present("lamports"); + + Ok(CliCommand::ShowValidators { use_lamports_unit }) +} + +pub fn process_cluster_version(rpc_client: &RpcClient, config: &CliConfig) -> ProcessResult { + let remote_version: Value = serde_json::from_str(&rpc_client.get_version()?)?; + println!( + "{} {}", + style("Cluster versions from:").bold(), + config.json_rpc_url + ); + if let Some(versions) = remote_version.as_object() { + for (key, value) in versions.iter() { + if let Some(value_string) = value.as_str() { + println_name_value(&format!("* {}:", key), &value_string); + } + } + } + Ok("".to_string()) +} + +pub fn process_fees(rpc_client: &RpcClient) -> ProcessResult { + let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; + + Ok(format!( + "blockhash: {}\nlamports per signature: {}", + recent_blockhash, fee_calculator.lamports_per_signature + )) +} + +pub fn process_get_epoch_info(rpc_client: &RpcClient) -> ProcessResult { + let epoch_info = rpc_client.get_epoch_info()?; + println!(); + println_name_value("Current epoch:", &epoch_info.epoch.to_string()); + println_name_value("Current slot:", &epoch_info.absolute_slot.to_string()); + println_name_value( + "Total slots in current epoch:", + &epoch_info.slots_in_epoch.to_string(), + ); + let remaining_slots_in_epoch = epoch_info.slots_in_epoch - epoch_info.slot_index; + println_name_value( + "Remaining slots in current epoch:", + &remaining_slots_in_epoch.to_string(), + ); + + let remaining_time_in_epoch = Duration::from_secs( + remaining_slots_in_epoch * clock::DEFAULT_TICKS_PER_SLOT / clock::DEFAULT_TICKS_PER_SECOND, + ); + println_name_value( + "Time remaining in current epoch:", + &format!( + "{} minutes, {} seconds", + remaining_time_in_epoch.as_secs() / 60, + remaining_time_in_epoch.as_secs() % 60 + ), + ); + Ok("".to_string()) +} + +pub fn process_get_genesis_blockhash(rpc_client: &RpcClient) -> ProcessResult { + let genesis_blockhash = rpc_client.get_genesis_blockhash()?; + Ok(genesis_blockhash.to_string()) +} + +pub fn process_get_slot(rpc_client: &RpcClient) -> ProcessResult { + let slot = rpc_client.get_slot()?; + Ok(slot.to_string()) +} + +pub fn process_get_transaction_count(rpc_client: &RpcClient) -> ProcessResult { + let transaction_count = rpc_client.get_transaction_count()?; + Ok(transaction_count.to_string()) +} + +pub fn process_ping( + rpc_client: &RpcClient, + config: &CliConfig, + interval: &Duration, + count: &Option, + timeout: &Duration, +) -> ProcessResult { + let to = Keypair::new().pubkey(); + + println_name_value("Source account:", &config.keypair.pubkey().to_string()); + println_name_value("Destination account:", &to.to_string()); + println!(); + + let (signal_sender, signal_receiver) = std::sync::mpsc::channel(); + ctrlc::set_handler(move || { + let _ = signal_sender.send(()); + }) + .expect("Error setting Ctrl-C handler"); + + let mut last_blockhash = Hash::default(); + let mut submit_count = 0; + let mut confirmed_count = 0; + let mut confirmation_time: VecDeque = VecDeque::with_capacity(1024); + + 'mainloop: for seq in 0..count.unwrap_or(std::u64::MAX) { + let (recent_blockhash, fee_calculator) = rpc_client.get_new_blockhash(&last_blockhash)?; + last_blockhash = recent_blockhash; + + let transaction = system_transaction::transfer(&config.keypair, &to, 1, recent_blockhash); + check_account_for_fee(rpc_client, config, &fee_calculator, &transaction.message)?; + + match rpc_client.send_transaction(&transaction) { + Ok(signature) => { + let transaction_sent = Instant::now(); + loop { + let signature_status = rpc_client.get_signature_status(&signature)?; + let elapsed_time = Instant::now().duration_since(transaction_sent); + if let Some(transaction_status) = signature_status { + match transaction_status { + Ok(()) => { + let elapsed_time_millis = elapsed_time.as_millis() as u64; + confirmation_time.push_back(elapsed_time_millis); + println!( + "{}1 lamport transferred: seq={:<3} time={:>4}ms signature={}", + CHECK_MARK, seq, elapsed_time_millis, signature + ); + confirmed_count += 1; + } + Err(err) => { + println!( + "{}Transaction failed: seq={:<3} error={:?} signature={}", + CROSS_MARK, seq, err, signature + ); + } + } + break; + } + + if elapsed_time >= *timeout { + println!( + "{}Confirmation timeout: seq={:<3} signature={}", + CROSS_MARK, seq, signature + ); + break; + } + + // Sleep for half a slot + if signal_receiver + .recv_timeout(Duration::from_millis( + 500 * solana_sdk::clock::DEFAULT_TICKS_PER_SLOT + / solana_sdk::clock::DEFAULT_TICKS_PER_SECOND, + )) + .is_ok() + { + break 'mainloop; + } + } + } + Err(err) => { + println!( + "{}Submit failed: seq={:<3} error={:?}", + CROSS_MARK, seq, err + ); + } + } + submit_count += 1; + + if signal_receiver.recv_timeout(*interval).is_ok() { + break 'mainloop; + } + } + + println!(); + println!("--- transaction statistics ---"); + println!( + "{} transactions submitted, {} transactions confirmed, {:.1}% transaction loss", + submit_count, + confirmed_count, + (100. - f64::from(confirmed_count) / f64::from(submit_count) * 100.) + ); + if !confirmation_time.is_empty() { + let samples: Vec = confirmation_time.iter().map(|t| *t as f64).collect(); + let dist = criterion_stats::Distribution::from(samples.into_boxed_slice()); + let mean = dist.mean(); + println!( + "confirmation min/mean/max/stddev = {:.0}/{:.0}/{:.0}/{:.0} ms", + dist.min(), + mean, + dist.max(), + dist.std_dev(Some(mean)) + ); + } + + Ok("".to_string()) +} + +pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool) -> ProcessResult { + let vote_accounts = rpc_client.get_vote_accounts()?; + let total_activate_stake = vote_accounts + .current + .iter() + .chain(vote_accounts.delinquent.iter()) + .fold(0, |acc, vote_account| acc + vote_account.activated_stake) + as f64; + + println!( + "{}", + style(format!( + "{:<44} {:<44} {:<11} {:>10} {:>11} {}", + "Identity Pubkey", + "Vote Account Pubkey", + "Commission", + "Last Vote", + "Root Block", + "Active Stake", + )) + .bold() + ); + + for vote_account in vote_accounts + .current + .iter() + .chain(vote_accounts.delinquent.iter()) + { + fn non_zero_or_dash(v: u64) -> String { + if v == 0 { + "-".into() + } else { + format!("{}", v) + } + } + + println!( + "{:<44} {:<44} {:>3} ({:>4.1}%) {:>10} {:>11} {:>11}", + vote_account.node_pubkey, + vote_account.vote_pubkey, + vote_account.commission, + f64::from(vote_account.commission) * 100.0 / f64::from(std::u8::MAX), + non_zero_or_dash(vote_account.last_vote), + non_zero_or_dash(vote_account.root_slot), + if vote_account.activated_stake > 0 { + format!( + "{} ({:.2}%)", + build_balance_message(vote_account.activated_stake, use_lamports_unit), + 100. * vote_account.activated_stake as f64 / total_activate_stake + ) + } else { + "-".into() + }, + ); + } + + Ok("".to_string()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cli::{app, parse_command}; + use solana_sdk::pubkey::Pubkey; + + #[test] + fn test_parse_command() { + let test_commands = app("test", "desc", "version"); + let pubkey = Pubkey::new_rand(); + + let test_cluster_version = test_commands + .clone() + .get_matches_from(vec!["test", "cluster-version"]); + assert_eq!( + parse_command(&pubkey, &test_cluster_version).unwrap(), + CliCommand::ClusterVersion + ); + + let test_fees = test_commands.clone().get_matches_from(vec!["test", "fees"]); + assert_eq!( + parse_command(&pubkey, &test_fees).unwrap(), + CliCommand::Fees + ); + + let test_get_epoch_info = test_commands + .clone() + .get_matches_from(vec!["test", "get-epoch-info"]); + assert_eq!( + parse_command(&pubkey, &test_get_epoch_info).unwrap(), + CliCommand::GetEpochInfo + ); + + let test_get_genesis_blockhash = test_commands + .clone() + .get_matches_from(vec!["test", "get-genesis-blockhash"]); + assert_eq!( + parse_command(&pubkey, &test_get_genesis_blockhash).unwrap(), + CliCommand::GetGenesisBlockhash + ); + + let test_get_slot = test_commands + .clone() + .get_matches_from(vec!["test", "get-slot"]); + assert_eq!( + parse_command(&pubkey, &test_get_slot).unwrap(), + CliCommand::GetSlot + ); + + let test_transaction_count = test_commands + .clone() + .get_matches_from(vec!["test", "get-transaction-count"]); + assert_eq!( + parse_command(&pubkey, &test_transaction_count).unwrap(), + CliCommand::GetTransactionCount + ); + + let test_ping = test_commands + .clone() + .get_matches_from(vec!["test", "ping", "-i", "1", "-c", "2", "-t", "3"]); + assert_eq!( + parse_command(&pubkey, &test_ping).unwrap(), + CliCommand::Ping { + interval: Duration::from_secs(1), + count: Some(2), + timeout: Duration::from_secs(3), + } + ); + } + // TODO: Add process tests +} diff --git a/cli/src/config.rs b/cli/src/config.rs index e27556f3ad70fe..c455e870faff98 100644 --- a/cli/src/config.rs +++ b/cli/src/config.rs @@ -7,7 +7,7 @@ use std::path::Path; lazy_static! { pub static ref CONFIG_FILE: Option = { dirs::home_dir().map(|mut path| { - path.extend(&[".config", "solana", "wallet", "config.yml"]); + path.extend(&[".config", "solana", "cli", "config.yml"]); path.to_str().unwrap().to_string() }) }; diff --git a/cli/src/input_parsers.rs b/cli/src/input_parsers.rs index ba7e0bba1ff0a1..1cf23abcb704f6 100644 --- a/cli/src/input_parsers.rs +++ b/cli/src/input_parsers.rs @@ -2,7 +2,7 @@ use clap::ArgMatches; use solana_sdk::{ native_token::sol_to_lamports, pubkey::Pubkey, - signature::{read_keypair, Keypair, KeypairUtil}, + signature::{read_keypair_file, Keypair, KeypairUtil}, }; // Return parsed values from matches at `name` @@ -32,7 +32,7 @@ where // Return the keypair for an argument with filename `name` or None if not present. pub fn keypair_of(matches: &ArgMatches<'_>, name: &str) -> Option { if let Some(value) = matches.value_of(name) { - read_keypair(value).ok() + read_keypair_file(value).ok() } else { None } @@ -56,7 +56,7 @@ pub fn amount_of(matches: &ArgMatches<'_>, name: &str, unit: &str) -> Option() -> App<'ab, 'v> { @@ -120,7 +120,7 @@ mod tests { fn test_keypair_of() { let keypair = Keypair::new(); let outfile = tmp_file_path("test_gen_keypair_file.json", &keypair.pubkey()); - let _ = write_keypair(&keypair, &outfile).unwrap(); + let _ = write_keypair_file(&keypair, &outfile).unwrap(); let matches = app() .clone() @@ -141,7 +141,7 @@ mod tests { fn test_pubkey_of() { let keypair = Keypair::new(); let outfile = tmp_file_path("test_gen_keypair_file.json", &keypair.pubkey()); - let _ = write_keypair(&keypair, &outfile).unwrap(); + let _ = write_keypair_file(&keypair, &outfile).unwrap(); let matches = app() .clone() diff --git a/cli/src/input_validators.rs b/cli/src/input_validators.rs index cca7c76b502f72..594a9a56dc3c72 100644 --- a/cli/src/input_validators.rs +++ b/cli/src/input_validators.rs @@ -1,5 +1,5 @@ use solana_sdk::pubkey::Pubkey; -use solana_sdk::signature::read_keypair; +use solana_sdk::signature::read_keypair_file; // Return an error if a pubkey cannot be parsed. pub fn is_pubkey(string: String) -> Result<(), String> { @@ -11,7 +11,7 @@ pub fn is_pubkey(string: String) -> Result<(), String> { // Return an error if a keypair file cannot be parsed. pub fn is_keypair(string: String) -> Result<(), String> { - read_keypair(&string) + read_keypair_file(&string) .map(|_| ()) .map_err(|err| format!("{:?}", err)) } diff --git a/cli/src/lib.rs b/cli/src/lib.rs index 5f07db925e52d7..0aa95a5b5737f9 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -1,11 +1,13 @@ #[macro_use] extern crate lazy_static; +pub mod cli; +pub mod cluster_query; pub mod config; pub mod display; pub mod input_parsers; pub mod input_validators; pub mod stake; +pub mod storage; pub mod validator_info; pub mod vote; -pub mod wallet; diff --git a/cli/src/main.rs b/cli/src/main.rs index 8b2892b7abad6d..72de05cbdd5320 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -1,38 +1,34 @@ use clap::{crate_description, crate_name, crate_version, Arg, ArgGroup, ArgMatches, SubCommand}; use console::style; use solana_cli::{ + cli::{app, parse_command, process_command, CliConfig, CliError}, config::{self, Config}, display::{println_name_value, println_name_value_or}, input_validators::is_url, - wallet::{app, parse_command, process_command, WalletConfig, WalletError}, }; -use solana_sdk::signature::{read_keypair, KeypairUtil}; +use solana_sdk::signature::{read_keypair_file, KeypairUtil}; use std::error; fn parse_settings(matches: &ArgMatches<'_>) -> Result> { let parse_args = match matches.subcommand() { ("get", Some(subcommand_matches)) => { if let Some(config_file) = matches.value_of("config_file") { - let default_wallet_config = WalletConfig::default(); + let default_cli_config = CliConfig::default(); let config = Config::load(config_file).unwrap_or_default(); if let Some(field) = subcommand_matches.value_of("specific_setting") { let (value, default_value) = match field { - "url" => (config.url, default_wallet_config.json_rpc_url), - "keypair" => (config.keypair, default_wallet_config.keypair_path), + "url" => (config.url, default_cli_config.json_rpc_url), + "keypair" => (config.keypair, default_cli_config.keypair_path), _ => unreachable!(), }; println_name_value_or(&format!("* {}:", field), &value, &default_value); } else { println_name_value("Wallet Config:", config_file); - println_name_value_or( - "* url:", - &config.url, - &default_wallet_config.json_rpc_url, - ); + println_name_value_or("* url:", &config.url, &default_cli_config.json_rpc_url); println_name_value_or( "* keypair:", &config.keypair, - &default_wallet_config.keypair_path, + &default_cli_config.keypair_path, ); } } else { @@ -69,7 +65,7 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result) -> Result> { +pub fn parse_args(matches: &ArgMatches<'_>) -> Result> { let config = if let Some(config_file) = matches.value_of("config_file") { Config::load(config_file).unwrap_or_default() } else { @@ -80,7 +76,7 @@ pub fn parse_args(matches: &ArgMatches<'_>) -> Result) -> Result) -> Result Result<(), Box> { ) .subcommand( SubCommand::with_name("get") - .about("Get wallet config settings") + .about("Get cli config settings") .arg( Arg::with_name("specific_setting") .index(1) @@ -166,7 +162,7 @@ fn main() -> Result<(), Box> { ) .subcommand( SubCommand::with_name("set") - .about("Set a wallet config setting") + .about("Set a cli config setting") .group( ArgGroup::with_name("config_settings") .args(&["json_rpc_url", "keypair"]) diff --git a/cli/src/stake.rs b/cli/src/stake.rs index 46b1496d60d1ba..7eddc13681ffd0 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -1,10 +1,10 @@ use crate::{ - input_parsers::*, - input_validators::*, - wallet::{ + cli::{ build_balance_message, check_account_for_fee, check_unique_pubkeys, - log_instruction_custom_error, ProcessResult, WalletCommand, WalletConfig, WalletError, + log_instruction_custom_error, CliCommand, CliConfig, CliError, ProcessResult, }, + input_parsers::*, + input_validators::*, }; use clap::{App, Arg, ArgMatches, SubCommand}; use solana_client::rpc_client::RpcClient; @@ -73,7 +73,7 @@ impl StakeSubCommands for App<'_, '_> { .value_name("PUBKEY") .takes_value(true) .validator(is_pubkey_or_keypair) - .help("Public key of authorized staker (defaults to wallet)") + .help("Public key of authorized staker (defaults to cli config pubkey)") ) .arg( Arg::with_name("authorized_withdrawer") @@ -81,7 +81,7 @@ impl StakeSubCommands for App<'_, '_> { .value_name("PUBKEY") .takes_value(true) .validator(is_pubkey_or_keypair) - .help("Public key of the authorized withdrawer (defaults to wallet)") + .help("Public key of the authorized withdrawer (defaults to cli config pubkey)") ) ) .subcommand( @@ -168,15 +168,6 @@ impl StakeSubCommands for App<'_, '_> { .required(true) .help("Stake account to be deactivated.") ) - .arg( - Arg::with_name("vote_account_pubkey") - .index(2) - .value_name("VOTE ACCOUNT") - .takes_value(true) - .required(true) - .validator(is_pubkey_or_keypair) - .help("The vote account to which the stake is currently delegated") - ) ) .subcommand( SubCommand::with_name("withdraw-stake") @@ -263,7 +254,7 @@ impl StakeSubCommands for App<'_, '_> { pub fn parse_stake_create_account( pubkey: &Pubkey, matches: &ArgMatches<'_>, -) -> Result { +) -> Result { let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap(); let slot = value_of(&matches, "lockup").unwrap_or(0); let custodian = pubkey_of(matches, "custodian").unwrap_or_default(); @@ -271,7 +262,7 @@ pub fn parse_stake_create_account( let withdrawer = pubkey_of(matches, "authorized_withdrawer").unwrap_or(*pubkey); // defaults to config let lamports = amount_of(matches, "amount", "unit").expect("Invalid amount"); - Ok(WalletCommand::CreateStakeAccount( + Ok(CliCommand::CreateStakeAccount( stake_account_pubkey, Authorized { staker, withdrawer }, Lockup { custodian, slot }, @@ -279,12 +270,12 @@ pub fn parse_stake_create_account( )) } -pub fn parse_stake_delegate_stake(matches: &ArgMatches<'_>) -> Result { +pub fn parse_stake_delegate_stake(matches: &ArgMatches<'_>) -> Result { let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap(); let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap(); let force = matches.is_present("force"); - Ok(WalletCommand::DelegateStake( + Ok(CliCommand::DelegateStake( stake_account_pubkey, vote_account_pubkey, force, @@ -294,53 +285,47 @@ pub fn parse_stake_delegate_stake(matches: &ArgMatches<'_>) -> Result, stake_authorize: StakeAuthorize, -) -> Result { +) -> Result { let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap(); let authorized_pubkey = pubkey_of(matches, "authorized_pubkey").unwrap(); - Ok(WalletCommand::StakeAuthorize( + Ok(CliCommand::StakeAuthorize( stake_account_pubkey, authorized_pubkey, stake_authorize, )) } -pub fn parse_redeem_vote_credits(matches: &ArgMatches<'_>) -> Result { +pub fn parse_redeem_vote_credits(matches: &ArgMatches<'_>) -> Result { let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap(); let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap(); - Ok(WalletCommand::RedeemVoteCredits( + Ok(CliCommand::RedeemVoteCredits( stake_account_pubkey, vote_account_pubkey, )) } -pub fn parse_stake_deactivate_stake( - matches: &ArgMatches<'_>, -) -> Result { +pub fn parse_stake_deactivate_stake(matches: &ArgMatches<'_>) -> Result { let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap(); - let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap(); - Ok(WalletCommand::DeactivateStake( - stake_account_pubkey, - vote_account_pubkey, - )) + Ok(CliCommand::DeactivateStake(stake_account_pubkey)) } -pub fn parse_stake_withdraw_stake(matches: &ArgMatches<'_>) -> Result { +pub fn parse_stake_withdraw_stake(matches: &ArgMatches<'_>) -> Result { let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap(); let destination_account_pubkey = pubkey_of(matches, "destination_account_pubkey").unwrap(); let lamports = amount_of(matches, "amount", "unit").expect("Invalid amount"); - Ok(WalletCommand::WithdrawStake( + Ok(CliCommand::WithdrawStake( stake_account_pubkey, destination_account_pubkey, lamports, )) } -pub fn parse_show_stake_account(matches: &ArgMatches<'_>) -> Result { +pub fn parse_show_stake_account(matches: &ArgMatches<'_>) -> Result { let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap(); let use_lamports_unit = matches.is_present("lamports"); - Ok(WalletCommand::ShowStakeAccount { + Ok(CliCommand::ShowStakeAccount { pubkey: stake_account_pubkey, use_lamports_unit, }) @@ -348,19 +333,19 @@ pub fn parse_show_stake_account(matches: &ArgMatches<'_>) -> Result ProcessResult { check_unique_pubkeys( - (&config.keypair.pubkey(), "wallet keypair".to_string()), + (&config.keypair.pubkey(), "cli keypair".to_string()), (stake_account_pubkey, "stake_account_pubkey".to_string()), )?; if rpc_client.get_account(&stake_account_pubkey).is_ok() { - return Err(WalletError::BadParameter(format!( + return Err(CliError::BadParameter(format!( "Unable to create stake account. Stake account already exists: {}", stake_account_pubkey )) @@ -371,7 +356,7 @@ pub fn process_create_stake_account( rpc_client.get_minimum_balance_for_rent_exemption(std::mem::size_of::())?; if lamports < minimum_balance { - return Err(WalletError::BadParameter(format!( + return Err(CliError::BadParameter(format!( "need atleast {} lamports for stake account to be rent exempt, provided lamports: {}", minimum_balance, lamports )) @@ -394,7 +379,7 @@ pub fn process_create_stake_account( pub fn process_stake_authorize( rpc_client: &RpcClient, - config: &WalletConfig, + config: &CliConfig, stake_account_pubkey: &Pubkey, authorized_pubkey: &Pubkey, stake_authorize: StakeAuthorize, @@ -419,15 +404,13 @@ pub fn process_stake_authorize( pub fn process_deactivate_stake_account( rpc_client: &RpcClient, - config: &WalletConfig, + config: &CliConfig, stake_account_pubkey: &Pubkey, - vote_account_pubkey: &Pubkey, ) -> ProcessResult { let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; let ixs = vec![stake_instruction::deactivate_stake( stake_account_pubkey, &config.keypair.pubkey(), - vote_account_pubkey, )]; let mut tx = Transaction::new_signed_instructions(&[&config.keypair], ixs, recent_blockhash); check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?; @@ -437,7 +420,7 @@ pub fn process_deactivate_stake_account( pub fn process_withdraw_stake( rpc_client: &RpcClient, - config: &WalletConfig, + config: &CliConfig, stake_account_pubkey: &Pubkey, destination_account_pubkey: &Pubkey, lamports: u64, @@ -459,7 +442,7 @@ pub fn process_withdraw_stake( pub fn process_redeem_vote_credits( rpc_client: &RpcClient, - config: &WalletConfig, + config: &CliConfig, stake_account_pubkey: &Pubkey, vote_account_pubkey: &Pubkey, ) -> ProcessResult { @@ -481,13 +464,13 @@ pub fn process_redeem_vote_credits( pub fn process_show_stake_account( rpc_client: &RpcClient, - _config: &WalletConfig, + _config: &CliConfig, stake_account_pubkey: &Pubkey, use_lamports_unit: bool, ) -> ProcessResult { let stake_account = rpc_client.get_account(stake_account_pubkey)?; if stake_account.owner != solana_stake_api::id() { - return Err(WalletError::RpcRequestError( + return Err(CliError::RpcRequestError( format!("{:?} is not a stake account", stake_account_pubkey).to_string(), ) .into()); @@ -536,7 +519,7 @@ pub fn process_show_stake_account( show_lockup(&lockup); Ok("".to_string()) } - Err(err) => Err(WalletError::RpcRequestError(format!( + Err(err) => Err(CliError::RpcRequestError(format!( "Account data could not be deserialized to stake state: {:?}", err )) @@ -546,13 +529,13 @@ pub fn process_show_stake_account( pub fn process_delegate_stake( rpc_client: &RpcClient, - config: &WalletConfig, + config: &CliConfig, stake_account_pubkey: &Pubkey, vote_account_pubkey: &Pubkey, force: bool, ) -> ProcessResult { check_unique_pubkeys( - (&config.keypair.pubkey(), "wallet keypair".to_string()), + (&config.keypair.pubkey(), "cli keypair".to_string()), (stake_account_pubkey, "stake_account_pubkey".to_string()), )?; @@ -561,23 +544,23 @@ pub fn process_delegate_stake( let vote_account_data = rpc_client .get_account_data(vote_account_pubkey) .map_err(|_| { - WalletError::RpcRequestError(format!("Vote account not found: {}", vote_account_pubkey)) + CliError::RpcRequestError(format!("Vote account not found: {}", vote_account_pubkey)) })?; let vote_state = VoteState::deserialize(&vote_account_data).map_err(|_| { - WalletError::RpcRequestError( + CliError::RpcRequestError( "Account data could not be deserialized to vote state".to_string(), ) })?; let sanity_check_result = match vote_state.root_slot { - None => Err(WalletError::BadParameter( + None => Err(CliError::BadParameter( "Unable to delegate. Vote account has no root slot".to_string(), )), Some(root_slot) => { let slot = rpc_client.get_slot()?; if root_slot + solana_sdk::clock::DEFAULT_SLOTS_PER_TURN < slot { - Err(WalletError::BadParameter( + Err(CliError::BadParameter( format!( "Unable to delegate. Vote account root slot ({}) is too old, the current slot is {}", root_slot, slot ) @@ -613,7 +596,7 @@ pub fn process_delegate_stake( #[cfg(test)] mod tests { use super::*; - use crate::wallet::{app, parse_command}; + use crate::cli::{app, parse_command}; #[test] fn test_parse_command() { @@ -629,7 +612,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_authorize_staker).unwrap(), - WalletCommand::StakeAuthorize(pubkey, pubkey, StakeAuthorize::Staker) + CliCommand::StakeAuthorize(pubkey, pubkey, StakeAuthorize::Staker) ); let test_authorize_withdrawer = test_commands.clone().get_matches_from(vec![ "test", @@ -639,7 +622,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_authorize_withdrawer).unwrap(), - WalletCommand::StakeAuthorize(pubkey, pubkey, StakeAuthorize::Withdrawer) + CliCommand::StakeAuthorize(pubkey, pubkey, StakeAuthorize::Withdrawer) ); // Test CreateStakeAccount SubCommand @@ -664,7 +647,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_create_stake_account).unwrap(), - WalletCommand::CreateStakeAccount( + CliCommand::CreateStakeAccount( pubkey, Authorized { staker: authorized, @@ -686,7 +669,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_create_stake_account2).unwrap(), - WalletCommand::CreateStakeAccount( + CliCommand::CreateStakeAccount( pubkey, Authorized { staker: pubkey, @@ -711,7 +694,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_delegate_stake).unwrap(), - WalletCommand::DelegateStake(stake_pubkey, pubkey, false,) + CliCommand::DelegateStake(stake_pubkey, pubkey, false,) ); let test_delegate_stake = test_commands.clone().get_matches_from(vec![ @@ -723,7 +706,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_delegate_stake).unwrap(), - WalletCommand::DelegateStake(stake_pubkey, pubkey, true) + CliCommand::DelegateStake(stake_pubkey, pubkey, true) ); // Test WithdrawStake Subcommand @@ -738,7 +721,7 @@ mod tests { assert_eq!( parse_command(&pubkey, &test_withdraw_stake).unwrap(), - WalletCommand::WithdrawStake(stake_pubkey, pubkey, 42) + CliCommand::WithdrawStake(stake_pubkey, pubkey, 42) ); // Test DeactivateStake Subcommand @@ -746,11 +729,10 @@ mod tests { "test", "deactivate-stake", &stake_pubkey_string, - &pubkey_string, ]); assert_eq!( parse_command(&pubkey, &test_deactivate_stake).unwrap(), - WalletCommand::DeactivateStake(stake_pubkey, pubkey) + CliCommand::DeactivateStake(stake_pubkey) ); } // TODO: Add process tests diff --git a/cli/src/storage.rs b/cli/src/storage.rs new file mode 100644 index 00000000000000..9c5393f0e5aa65 --- /dev/null +++ b/cli/src/storage.rs @@ -0,0 +1,269 @@ +use crate::{ + cli::{ + check_account_for_fee, check_unique_pubkeys, log_instruction_custom_error, CliCommand, + CliConfig, CliError, ProcessResult, + }, + input_parsers::*, + input_validators::*, +}; +use clap::{App, Arg, ArgMatches, SubCommand}; +use solana_client::rpc_client::RpcClient; +use solana_sdk::{ + account_utils::State, message::Message, pubkey::Pubkey, signature::KeypairUtil, + system_instruction::SystemError, transaction::Transaction, +}; +use solana_storage_api::storage_instruction::{self, StorageAccountType}; + +pub trait StorageSubCommands { + fn storage_subcommands(self) -> Self; +} + +impl StorageSubCommands for App<'_, '_> { + fn storage_subcommands(self) -> Self { + self.subcommand( + SubCommand::with_name("create-replicator-storage-account") + .about("Create a replicator storage account") + .arg( + Arg::with_name("storage_account_owner") + .index(1) + .value_name("STORAGE ACCOUNT OWNER PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair), + ) + .arg( + Arg::with_name("storage_account_pubkey") + .index(2) + .value_name("STORAGE ACCOUNT PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair), + ), + ) + .subcommand( + SubCommand::with_name("create-validator-storage-account") + .about("Create a validator storage account") + .arg( + Arg::with_name("storage_account_owner") + .index(1) + .value_name("STORAGE ACCOUNT OWNER PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair), + ) + .arg( + Arg::with_name("storage_account_pubkey") + .index(2) + .value_name("STORAGE ACCOUNT PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair), + ), + ) + .subcommand( + SubCommand::with_name("claim-storage-reward") + .about("Redeem storage reward credits") + .arg( + Arg::with_name("node_account_pubkey") + .index(1) + .value_name("NODE PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair) + .help("The node account to credit the rewards to"), + ) + .arg( + Arg::with_name("storage_account_pubkey") + .index(2) + .value_name("STORAGE ACCOUNT PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair) + .help("Storage account address to redeem credits for"), + ), + ) + .subcommand( + SubCommand::with_name("show-storage-account") + .about("Show the contents of a storage account") + .arg( + Arg::with_name("storage_account_pubkey") + .index(1) + .value_name("STORAGE ACCOUNT PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair) + .help("Storage account pubkey"), + ), + ) + } +} + +pub fn parse_storage_create_replicator_account( + matches: &ArgMatches<'_>, +) -> Result { + let account_owner = pubkey_of(matches, "storage_account_owner").unwrap(); + let storage_account_pubkey = pubkey_of(matches, "storage_account_pubkey").unwrap(); + Ok(CliCommand::CreateStorageAccount { + account_owner, + storage_account_pubkey, + account_type: StorageAccountType::Replicator, + }) +} + +pub fn parse_storage_create_validator_account( + matches: &ArgMatches<'_>, +) -> Result { + let account_owner = pubkey_of(matches, "storage_account_owner").unwrap(); + let storage_account_pubkey = pubkey_of(matches, "storage_account_pubkey").unwrap(); + Ok(CliCommand::CreateStorageAccount { + account_owner, + storage_account_pubkey, + account_type: StorageAccountType::Validator, + }) +} + +pub fn parse_storage_claim_reward(matches: &ArgMatches<'_>) -> Result { + let node_account_pubkey = pubkey_of(matches, "node_account_pubkey").unwrap(); + let storage_account_pubkey = pubkey_of(matches, "storage_account_pubkey").unwrap(); + Ok(CliCommand::ClaimStorageReward { + node_account_pubkey, + storage_account_pubkey, + }) +} + +pub fn parse_storage_get_account_command(matches: &ArgMatches<'_>) -> Result { + let storage_account_pubkey = pubkey_of(matches, "storage_account_pubkey").unwrap(); + Ok(CliCommand::ShowStorageAccount(storage_account_pubkey)) +} + +pub fn process_create_storage_account( + rpc_client: &RpcClient, + config: &CliConfig, + account_owner: &Pubkey, + storage_account_pubkey: &Pubkey, + account_type: StorageAccountType, +) -> ProcessResult { + check_unique_pubkeys( + (&config.keypair.pubkey(), "cli keypair".to_string()), + ( + &storage_account_pubkey, + "storage_account_pubkey".to_string(), + ), + )?; + let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; + let ixs = storage_instruction::create_storage_account( + &config.keypair.pubkey(), + &account_owner, + storage_account_pubkey, + 1, + account_type, + ); + let mut tx = Transaction::new_signed_instructions(&[&config.keypair], ixs, recent_blockhash); + check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?; + let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]); + log_instruction_custom_error::(result) +} + +pub fn process_claim_storage_reward( + rpc_client: &RpcClient, + config: &CliConfig, + node_account_pubkey: &Pubkey, + storage_account_pubkey: &Pubkey, +) -> ProcessResult { + let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; + + let instruction = + storage_instruction::claim_reward(node_account_pubkey, storage_account_pubkey); + let signers = [&config.keypair]; + let message = Message::new_with_payer(vec![instruction], Some(&signers[0].pubkey())); + + let mut tx = Transaction::new(&signers, message, recent_blockhash); + check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?; + let signature_str = rpc_client.send_and_confirm_transaction(&mut tx, &signers)?; + Ok(signature_str.to_string()) +} + +pub fn process_show_storage_account( + rpc_client: &RpcClient, + _config: &CliConfig, + storage_account_pubkey: &Pubkey, +) -> ProcessResult { + let account = rpc_client.get_account(storage_account_pubkey)?; + + if account.owner != solana_storage_api::id() { + return Err(CliError::RpcRequestError( + format!("{:?} is not a storage account", storage_account_pubkey).to_string(), + ) + .into()); + } + + use solana_storage_api::storage_contract::StorageContract; + let storage_contract: StorageContract = account.state().map_err(|err| { + CliError::RpcRequestError( + format!("Unable to deserialize storage account: {:?}", err).to_string(), + ) + })?; + println!("{:#?}", storage_contract); + println!("account lamports: {}", account.lamports); + Ok("".to_string()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cli::{app, parse_command}; + + #[test] + fn test_parse_command() { + let test_commands = app("test", "desc", "version"); + let pubkey = Pubkey::new_rand(); + let pubkey_string = pubkey.to_string(); + let storage_account_pubkey = Pubkey::new_rand(); + let storage_account_string = storage_account_pubkey.to_string(); + + let test_create_replicator_storage_account = test_commands.clone().get_matches_from(vec![ + "test", + "create-replicator-storage-account", + &pubkey_string, + &storage_account_string, + ]); + assert_eq!( + parse_command(&pubkey, &test_create_replicator_storage_account).unwrap(), + CliCommand::CreateStorageAccount { + account_owner: pubkey, + storage_account_pubkey, + account_type: StorageAccountType::Replicator, + } + ); + + let test_create_validator_storage_account = test_commands.clone().get_matches_from(vec![ + "test", + "create-validator-storage-account", + &pubkey_string, + &storage_account_string, + ]); + assert_eq!( + parse_command(&pubkey, &test_create_validator_storage_account).unwrap(), + CliCommand::CreateStorageAccount { + account_owner: pubkey, + storage_account_pubkey, + account_type: StorageAccountType::Validator, + } + ); + + let test_claim_storage_reward = test_commands.clone().get_matches_from(vec![ + "test", + "claim-storage-reward", + &pubkey_string, + &storage_account_string, + ]); + assert_eq!( + parse_command(&pubkey, &test_claim_storage_reward).unwrap(), + CliCommand::ClaimStorageReward { + node_account_pubkey: pubkey, + storage_account_pubkey, + } + ); + } + // TODO: Add process tests +} diff --git a/cli/src/validator_info.rs b/cli/src/validator_info.rs index 3d61efbce9f98c..a607da779a3260 100644 --- a/cli/src/validator_info.rs +++ b/cli/src/validator_info.rs @@ -1,6 +1,6 @@ use crate::{ + cli::{check_account_for_fee, CliCommand, CliConfig, CliError, ProcessResult}, input_validators::is_url, - wallet::{check_account_for_fee, ProcessResult, WalletCommand, WalletConfig, WalletError}, }; use bincode::deserialize; use clap::ArgMatches; @@ -142,10 +142,10 @@ fn parse_validator_info( } } -fn parse_info_pubkey(matches: &ArgMatches<'_>) -> Result, WalletError> { +fn parse_info_pubkey(matches: &ArgMatches<'_>) -> Result, CliError> { let info_pubkey = if let Some(pubkey) = matches.value_of("info_pubkey") { Some(pubkey.parse::().map_err(|err| { - WalletError::BadParameter(format!("Invalid validator info pubkey: {:?}", err)) + CliError::BadParameter(format!("Invalid validator info pubkey: {:?}", err)) })?) } else { None @@ -156,7 +156,7 @@ fn parse_info_pubkey(matches: &ArgMatches<'_>) -> Result, WalletE pub fn parse_validator_info_command( matches: &ArgMatches<'_>, validator_pubkey: &Pubkey, -) -> Result { +) -> Result { let info_pubkey = parse_info_pubkey(matches)?; // Prepare validator info let validator_info = parse_args(&matches); @@ -167,10 +167,7 @@ pub fn parse_validator_info_command( println!("--force supplied, ignoring: {:?}", result); } else { result.map_err(|err| { - WalletError::BadParameter(format!( - "Invalid validator keybase username: {:?}", - err - )) + CliError::BadParameter(format!("Invalid validator keybase username: {:?}", err)) })?; } } @@ -179,19 +176,17 @@ pub fn parse_validator_info_command( let validator_info = ValidatorInfo { info: validator_string, }; - Ok(WalletCommand::SetValidatorInfo(validator_info, info_pubkey)) + Ok(CliCommand::SetValidatorInfo(validator_info, info_pubkey)) } -pub fn parse_get_validator_info_command( - matches: &ArgMatches<'_>, -) -> Result { +pub fn parse_get_validator_info_command(matches: &ArgMatches<'_>) -> Result { let info_pubkey = parse_info_pubkey(matches)?; - Ok(WalletCommand::GetValidatorInfo(info_pubkey)) + Ok(CliCommand::GetValidatorInfo(info_pubkey)) } pub fn process_set_validator_info( rpc_client: &RpcClient, - config: &WalletConfig, + config: &CliConfig, validator_info: &ValidatorInfo, info_pubkey: Option, ) -> ProcessResult { @@ -310,7 +305,7 @@ pub fn process_get_validator_info(rpc_client: &RpcClient, pubkey: Option #[cfg(test)] mod tests { use super::*; - use crate::wallet::app; + use crate::cli::app; use bincode::{serialize, serialized_size}; use serde_json::json; diff --git a/cli/src/vote.rs b/cli/src/vote.rs index 44a6f2f5cb822f..be7f60d959d201 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -1,32 +1,175 @@ use crate::{ - input_parsers::*, - wallet::{ + cli::{ build_balance_message, check_account_for_fee, check_unique_pubkeys, - log_instruction_custom_error, ProcessResult, WalletCommand, WalletConfig, WalletError, + log_instruction_custom_error, CliCommand, CliConfig, CliError, ProcessResult, }, + input_parsers::*, + input_validators::*, }; -use clap::{value_t_or_exit, ArgMatches}; +use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand}; use solana_client::rpc_client::RpcClient; use solana_sdk::{ - pubkey::Pubkey, signature::KeypairUtil, system_instruction::SystemError, - transaction::Transaction, + account::Account, epoch_schedule::EpochSchedule, pubkey::Pubkey, signature::KeypairUtil, + system_instruction::SystemError, sysvar, transaction::Transaction, }; use solana_vote_api::{ vote_instruction::{self, VoteError}, vote_state::{VoteAuthorize, VoteInit, VoteState}, }; +pub trait VoteSubCommands { + fn vote_subcommands(self) -> Self; +} + +impl VoteSubCommands for App<'_, '_> { + fn vote_subcommands(self) -> Self { + self.subcommand( + SubCommand::with_name("create-vote-account") + .about("Create a vote account") + .arg( + Arg::with_name("vote_account_pubkey") + .index(1) + .value_name("VOTE ACCOUNT PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair) + .help("Vote account address to fund"), + ) + .arg( + Arg::with_name("node_pubkey") + .index(2) + .value_name("VALIDATOR PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair) + .help("Validator that will vote with this account"), + ) + .arg( + Arg::with_name("commission") + .long("commission") + .value_name("NUM") + .takes_value(true) + .help("The commission taken on reward redemption (0-255), default: 0"), + ) + .arg( + Arg::with_name("authorized_voter") + .long("authorized-voter") + .value_name("PUBKEY") + .takes_value(true) + .validator(is_pubkey_or_keypair) + .help("Public key of the authorized voter (defaults to vote account)"), + ) + .arg( + Arg::with_name("authorized_withdrawer") + .long("authorized-withdrawer") + .value_name("PUBKEY") + .takes_value(true) + .validator(is_pubkey_or_keypair) + .help("Public key of the authorized withdrawer (defaults to cli config pubkey)"), + ), + ) + .subcommand( + SubCommand::with_name("vote-authorize-voter") + .about("Authorize a new vote signing keypair for the given vote account") + .arg( + Arg::with_name("vote_account_pubkey") + .index(1) + .value_name("VOTE ACCOUNT PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair) + .help("Vote account in which to set the authorized voter"), + ) + .arg( + Arg::with_name("new_authorized_pubkey") + .index(2) + .value_name("NEW VOTER PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair) + .help("New vote signer to authorize"), + ), + ) + .subcommand( + SubCommand::with_name("vote-authorize-withdrawer") + .about("Authorize a new withdraw signing keypair for the given vote account") + .arg( + Arg::with_name("vote_account_pubkey") + .index(1) + .value_name("VOTE ACCOUNT PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair) + .help("Vote account in which to set the authorized withdrawer"), + ) + .arg( + Arg::with_name("new_authorized_pubkey") + .index(2) + .value_name("NEW WITHDRAWER PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair) + .help("New withdrawer to authorize"), + ), + ) + .subcommand( + SubCommand::with_name("show-vote-account") + .about("Show the contents of a vote account") + .arg( + Arg::with_name("vote_account_pubkey") + .index(1) + .value_name("VOTE ACCOUNT PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair) + .help("Vote account pubkey"), + ) + .arg( + Arg::with_name("lamports") + .long("lamports") + .takes_value(false) + .help("Display balance in lamports instead of SOL"), + ), + ) + .subcommand( + SubCommand::with_name("uptime") + .about("Show the uptime of a validator, based on epoch voting history") + .arg( + Arg::with_name("vote_account_pubkey") + .index(1) + .value_name("VOTE ACCOUNT PUBKEY") + .takes_value(true) + .required(true) + .validator(is_pubkey_or_keypair) + .help("Vote account pubkey"), + ) + .arg( + Arg::with_name("span") + .long("span") + .value_name("NUM OF EPOCHS") + .takes_value(true) + .help("Number of recent epochs to examine"), + ) + .arg( + Arg::with_name("aggregate") + .long("aggregate") + .help("Aggregate uptime data across span"), + ), + ) + } +} + pub fn parse_vote_create_account( pubkey: &Pubkey, matches: &ArgMatches<'_>, -) -> Result { +) -> Result { let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap(); let node_pubkey = pubkey_of(matches, "node_pubkey").unwrap(); let commission = value_of(&matches, "commission").unwrap_or(0); let authorized_voter = pubkey_of(matches, "authorized_voter").unwrap_or(vote_account_pubkey); let authorized_withdrawer = pubkey_of(matches, "authorized_withdrawer").unwrap_or(*pubkey); - Ok(WalletCommand::CreateVoteAccount( + Ok(CliCommand::CreateVoteAccount( vote_account_pubkey, VoteInit { node_pubkey, @@ -40,31 +183,44 @@ pub fn parse_vote_create_account( pub fn parse_vote_authorize( matches: &ArgMatches<'_>, vote_authorize: VoteAuthorize, -) -> Result { +) -> Result { let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap(); let new_authorized_pubkey = pubkey_of(matches, "new_authorized_pubkey").unwrap(); - Ok(WalletCommand::VoteAuthorize( + Ok(CliCommand::VoteAuthorize( vote_account_pubkey, new_authorized_pubkey, vote_authorize, )) } -pub fn parse_vote_get_account_command( - matches: &ArgMatches<'_>, -) -> Result { +pub fn parse_vote_get_account_command(matches: &ArgMatches<'_>) -> Result { let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap(); let use_lamports_unit = matches.is_present("lamports"); - Ok(WalletCommand::ShowVoteAccount { + Ok(CliCommand::ShowVoteAccount { pubkey: vote_account_pubkey, use_lamports_unit, }) } +pub fn parse_vote_uptime_command(matches: &ArgMatches<'_>) -> Result { + let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap(); + let aggregate = matches.is_present("aggregate"); + let span = if matches.is_present("span") { + Some(value_t_or_exit!(matches, "span", u64)) + } else { + None + }; + Ok(CliCommand::Uptime { + pubkey: vote_account_pubkey, + aggregate, + span, + }) +} + pub fn process_create_vote_account( rpc_client: &RpcClient, - config: &WalletConfig, + config: &CliConfig, vote_account_pubkey: &Pubkey, vote_init: &VoteInit, ) -> ProcessResult { @@ -73,7 +229,7 @@ pub fn process_create_vote_account( (&vote_init.node_pubkey, "node_pubkey".to_string()), )?; check_unique_pubkeys( - (&config.keypair.pubkey(), "wallet keypair".to_string()), + (&config.keypair.pubkey(), "cli keypair".to_string()), (vote_account_pubkey, "vote_account_pubkey".to_string()), )?; let required_balance = @@ -98,7 +254,7 @@ pub fn process_create_vote_account( pub fn process_vote_authorize( rpc_client: &RpcClient, - config: &WalletConfig, + config: &CliConfig, vote_account_pubkey: &Pubkey, new_authorized_pubkey: &Pubkey, vote_authorize: VoteAuthorize, @@ -121,42 +277,53 @@ pub fn process_vote_authorize( log_instruction_custom_error::(result) } -pub fn parse_vote_uptime_command(matches: &ArgMatches<'_>) -> Result { - let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap(); - let aggregate = matches.is_present("aggregate"); - let span = if matches.is_present("span") { - Some(value_t_or_exit!(matches, "span", u64)) - } else { - None - }; - Ok(WalletCommand::Uptime { - pubkey: vote_account_pubkey, - aggregate, - span, - }) +fn get_epoch_schedule(rpc_client: &RpcClient) -> Result> { + let epoch_schedule_account = rpc_client.get_account(&sysvar::epoch_schedule::id())?; + + if epoch_schedule_account.owner != sysvar::id() { + return Err(CliError::RpcRequestError(format!( + "{:?} is not an epoch_schedule account", + sysvar::epoch_schedule::id() + )) + .into()); + } + + let epoch_schedule = EpochSchedule::deserialize(&epoch_schedule_account)?; + + Ok(epoch_schedule) } -pub fn process_show_vote_account( +fn get_vote_account( rpc_client: &RpcClient, - _config: &WalletConfig, vote_account_pubkey: &Pubkey, - use_lamports_unit: bool, -) -> ProcessResult { +) -> Result<(Account, VoteState), Box> { let vote_account = rpc_client.get_account(vote_account_pubkey)?; if vote_account.owner != solana_vote_api::id() { - return Err(WalletError::RpcRequestError( + return Err(CliError::RpcRequestError( format!("{:?} is not a vote account", vote_account_pubkey).to_string(), ) .into()); } - let vote_state = VoteState::deserialize(&vote_account.data).map_err(|_| { - WalletError::RpcRequestError( + CliError::RpcRequestError( "Account data could not be deserialized to vote state".to_string(), ) })?; + Ok((vote_account, vote_state)) +} + +pub fn process_show_vote_account( + rpc_client: &RpcClient, + _config: &CliConfig, + vote_account_pubkey: &Pubkey, + use_lamports_unit: bool, +) -> ProcessResult { + let (vote_account, vote_state) = get_vote_account(rpc_client, vote_account_pubkey)?; + + let epoch_schedule = get_epoch_schedule(rpc_client)?; + println!( "account balance: {}", build_balance_message(vote_account.lamports, use_lamports_unit) @@ -188,14 +355,6 @@ pub fn process_show_vote_account( ); } - // TODO: Use the real GenesisBlock from the cluster. - let genesis_block = solana_sdk::genesis_block::GenesisBlock::default(); - let epoch_schedule = solana_runtime::epoch_schedule::EpochSchedule::new( - genesis_block.slots_per_epoch, - genesis_block.stakers_slot_offset, - genesis_block.epoch_warmup, - ); - println!("epoch voting history:"); for (epoch, credits, prev_credits) in vote_state.epoch_credits() { let credits_earned = credits - prev_credits; @@ -211,39 +370,20 @@ pub fn process_show_vote_account( pub fn process_uptime( rpc_client: &RpcClient, - _config: &WalletConfig, + _config: &CliConfig, vote_account_pubkey: &Pubkey, aggregate: bool, span: Option, ) -> ProcessResult { - let vote_account = rpc_client.get_account(vote_account_pubkey)?; + let (_vote_account, vote_state) = get_vote_account(rpc_client, vote_account_pubkey)?; - if vote_account.owner != solana_vote_api::id() { - return Err(WalletError::RpcRequestError( - format!("{:?} is not a vote account", vote_account_pubkey).to_string(), - ) - .into()); - } - - let vote_state = VoteState::deserialize(&vote_account.data).map_err(|_| { - WalletError::RpcRequestError( - "Account data could not be deserialized to vote state".to_string(), - ) - })?; + let epoch_schedule = get_epoch_schedule(rpc_client)?; println!("Node id: {}", vote_state.node_pubkey); println!("Authorized voter: {}", vote_state.authorized_voter); if !vote_state.votes.is_empty() { println!("Uptime:"); - // TODO: Use the real GenesisBlock from the cluster. - let genesis_block = solana_sdk::genesis_block::GenesisBlock::default(); - let epoch_schedule = solana_runtime::epoch_schedule::EpochSchedule::new( - genesis_block.slots_per_epoch, - genesis_block.stakers_slot_offset, - genesis_block.epoch_warmup, - ); - let epoch_credits_vec: Vec<(u64, u64, u64)> = vote_state.epoch_credits().copied().collect(); let epoch_credits = if let Some(x) = span { @@ -281,7 +421,7 @@ pub fn process_uptime( #[cfg(test)] mod tests { use super::*; - use crate::wallet::{app, parse_command}; + use crate::cli::{app, parse_command}; #[test] fn test_parse_command() { @@ -297,7 +437,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_authorize_voter).unwrap(), - WalletCommand::VoteAuthorize(pubkey, pubkey, VoteAuthorize::Voter) + CliCommand::VoteAuthorize(pubkey, pubkey, VoteAuthorize::Voter) ); // Test CreateVoteAccount SubCommand @@ -313,7 +453,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_create_vote_account).unwrap(), - WalletCommand::CreateVoteAccount( + CliCommand::CreateVoteAccount( pubkey, VoteInit { node_pubkey, @@ -331,7 +471,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_create_vote_account2).unwrap(), - WalletCommand::CreateVoteAccount( + CliCommand::CreateVoteAccount( pubkey, VoteInit { node_pubkey, @@ -353,7 +493,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_create_vote_account3).unwrap(), - WalletCommand::CreateVoteAccount( + CliCommand::CreateVoteAccount( pubkey, VoteInit { node_pubkey, @@ -374,7 +514,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &test_create_vote_account4).unwrap(), - WalletCommand::CreateVoteAccount( + CliCommand::CreateVoteAccount( pubkey, VoteInit { node_pubkey, @@ -397,7 +537,7 @@ mod tests { ]); assert_eq!( parse_command(&pubkey, &matches).unwrap(), - WalletCommand::Uptime { + CliCommand::Uptime { pubkey, aggregate: true, span: Some(4) diff --git a/cli/tests/deploy.rs b/cli/tests/deploy.rs index fcf43ff0c3b4fd..2d040d708c54a9 100644 --- a/cli/tests/deploy.rs +++ b/cli/tests/deploy.rs @@ -1,5 +1,5 @@ use serde_json::{json, Value}; -use solana_cli::wallet::{process_command, WalletCommand, WalletConfig}; +use solana_cli::cli::{process_command, CliCommand, CliConfig}; use solana_client::rpc_client::RpcClient; use solana_client::rpc_request::RpcRequest; use solana_core::validator::new_validator_for_tests; @@ -11,7 +11,7 @@ use std::path::PathBuf; use std::sync::mpsc::channel; #[test] -fn test_wallet_deploy_program() { +fn test_cli_deploy_program() { solana_logger::setup(); let mut pathbuf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); @@ -35,9 +35,9 @@ fn test_wallet_deploy_program() { .get_minimum_balance_for_rent_exemption(program_data.len()) .unwrap(); - let mut config = WalletConfig::default(); + let mut config = CliConfig::default(); config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port()); - config.command = WalletCommand::Airdrop { + config.command = CliCommand::Airdrop { drone_host: None, drone_port: drone_addr.port(), lamports: minimum_balance_for_rent_exemption + 1, // min balance for rent exemption + leftover for tx processing @@ -45,7 +45,7 @@ fn test_wallet_deploy_program() { }; process_command(&config).unwrap(); - config.command = WalletCommand::Deploy(pathbuf.to_str().unwrap().to_string()); + config.command = CliCommand::Deploy(pathbuf.to_str().unwrap().to_string()); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); diff --git a/cli/tests/pay.rs b/cli/tests/pay.rs index 781d8ad3427ae4..94d98e9b7c4996 100644 --- a/cli/tests/pay.rs +++ b/cli/tests/pay.rs @@ -1,8 +1,6 @@ use chrono::prelude::*; use serde_json::Value; -use solana_cli::wallet::{ - process_command, request_and_confirm_airdrop, WalletCommand, WalletConfig, -}; +use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig}; use solana_client::rpc_client::RpcClient; use solana_drone::drone::run_local_drone; use solana_sdk::pubkey::Pubkey; @@ -29,7 +27,7 @@ fn check_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) { } #[test] -fn test_wallet_timestamp_tx() { +fn test_cli_timestamp_tx() { let (server, leader_data, alice, ledger_path) = new_validator_for_tests(); let bob_pubkey = Pubkey::new_rand(); @@ -39,11 +37,11 @@ fn test_wallet_timestamp_tx() { let rpc_client = RpcClient::new_socket(leader_data.rpc); - let mut config_payer = WalletConfig::default(); + let mut config_payer = CliConfig::default(); config_payer.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port()); - let mut config_witness = WalletConfig::default(); + let mut config_witness = CliConfig::default(); config_witness.json_rpc_url = config_payer.json_rpc_url.clone(); assert_ne!( @@ -66,7 +64,7 @@ fn test_wallet_timestamp_tx() { // Make transaction (from config_payer to bob_pubkey) requiring timestamp from config_witness let date_string = "\"2018-09-19T17:30:59Z\""; let dt: DateTime = serde_json::from_str(&date_string).unwrap(); - config_payer.command = WalletCommand::Pay { + config_payer.command = CliCommand::Pay { lamports: 10, to: bob_pubkey, timestamp: Some(dt), @@ -88,7 +86,7 @@ fn test_wallet_timestamp_tx() { check_balance(0, &rpc_client, &bob_pubkey); // recipient balance // Sign transaction by config_witness - config_witness.command = WalletCommand::TimeElapsed(bob_pubkey, process_id, dt); + config_witness.command = CliCommand::TimeElapsed(bob_pubkey, process_id, dt); process_command(&config_witness).unwrap(); check_balance(40, &rpc_client, &config_payer.keypair.pubkey()); // config_payer balance @@ -100,7 +98,7 @@ fn test_wallet_timestamp_tx() { } #[test] -fn test_wallet_witness_tx() { +fn test_cli_witness_tx() { let (server, leader_data, alice, ledger_path) = new_validator_for_tests(); let bob_pubkey = Pubkey::new_rand(); @@ -110,11 +108,11 @@ fn test_wallet_witness_tx() { let rpc_client = RpcClient::new_socket(leader_data.rpc); - let mut config_payer = WalletConfig::default(); + let mut config_payer = CliConfig::default(); config_payer.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port()); - let mut config_witness = WalletConfig::default(); + let mut config_witness = CliConfig::default(); config_witness.json_rpc_url = config_payer.json_rpc_url.clone(); assert_ne!( @@ -133,7 +131,7 @@ fn test_wallet_witness_tx() { .unwrap(); // Make transaction (from config_payer to bob_pubkey) requiring witness signature from config_witness - config_payer.command = WalletCommand::Pay { + config_payer.command = CliCommand::Pay { lamports: 10, to: bob_pubkey, timestamp: None, @@ -155,7 +153,7 @@ fn test_wallet_witness_tx() { check_balance(0, &rpc_client, &bob_pubkey); // recipient balance // Sign transaction by config_witness - config_witness.command = WalletCommand::Witness(bob_pubkey, process_id); + config_witness.command = CliCommand::Witness(bob_pubkey, process_id); process_command(&config_witness).unwrap(); check_balance(40, &rpc_client, &config_payer.keypair.pubkey()); // config_payer balance @@ -167,7 +165,7 @@ fn test_wallet_witness_tx() { } #[test] -fn test_wallet_cancel_tx() { +fn test_cli_cancel_tx() { let (server, leader_data, alice, ledger_path) = new_validator_for_tests(); let bob_pubkey = Pubkey::new_rand(); @@ -177,11 +175,11 @@ fn test_wallet_cancel_tx() { let rpc_client = RpcClient::new_socket(leader_data.rpc); - let mut config_payer = WalletConfig::default(); + let mut config_payer = CliConfig::default(); config_payer.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port()); - let mut config_witness = WalletConfig::default(); + let mut config_witness = CliConfig::default(); config_witness.json_rpc_url = config_payer.json_rpc_url.clone(); assert_ne!( @@ -193,7 +191,7 @@ fn test_wallet_cancel_tx() { .unwrap(); // Make transaction (from config_payer to bob_pubkey) requiring witness signature from config_witness - config_payer.command = WalletCommand::Pay { + config_payer.command = CliCommand::Pay { lamports: 10, to: bob_pubkey, timestamp: None, @@ -215,7 +213,7 @@ fn test_wallet_cancel_tx() { check_balance(0, &rpc_client, &bob_pubkey); // recipient balance // Sign transaction by config_witness - config_payer.command = WalletCommand::Cancel(process_id); + config_payer.command = CliCommand::Cancel(process_id); process_command(&config_payer).unwrap(); check_balance(50, &rpc_client, &config_payer.keypair.pubkey()); // config_payer balance diff --git a/cli/tests/request_airdrop.rs b/cli/tests/request_airdrop.rs index 5a84f86f26f72e..3eedcf09141c7a 100644 --- a/cli/tests/request_airdrop.rs +++ b/cli/tests/request_airdrop.rs @@ -1,4 +1,4 @@ -use solana_cli::wallet::{process_command, WalletCommand, WalletConfig}; +use solana_cli::cli::{process_command, CliCommand, CliConfig}; use solana_client::rpc_client::RpcClient; use solana_core::validator::new_validator_for_tests; use solana_drone::drone::run_local_drone; @@ -7,15 +7,15 @@ use std::fs::remove_dir_all; use std::sync::mpsc::channel; #[test] -fn test_wallet_request_airdrop() { +fn test_cli_request_airdrop() { let (server, leader_data, alice, ledger_path) = new_validator_for_tests(); let (sender, receiver) = channel(); run_local_drone(alice, sender, None); let drone_addr = receiver.recv().unwrap(); - let mut bob_config = WalletConfig::default(); + let mut bob_config = CliConfig::default(); bob_config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port()); - bob_config.command = WalletCommand::Airdrop { + bob_config.command = CliCommand::Airdrop { drone_host: None, drone_port: drone_addr.port(), lamports: 50, diff --git a/client/Cargo.toml b/client/Cargo.toml index 37b3b08b21074f..0238223de18c39 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -11,11 +11,11 @@ edition = "2018" [dependencies] bincode = "1.2.0" bs58 = "0.3.0" -jsonrpc-core = "13.2.0" +jsonrpc-core = "14.0.0" log = "0.4.8" rand = "0.6.5" rayon = "1.2.0" -reqwest = { version = "0.9.21", default-features = false, features = ["rustls-tls"] } +reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tls"] } serde = "1.0.101" serde_derive = "1.0.101" serde_json = "1.0.41" @@ -23,6 +23,6 @@ solana-netutil = { path = "../netutil", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.0" } [dev-dependencies] -jsonrpc-core = "13.2.0" -jsonrpc-http-server = "13.2.0" +jsonrpc-core = "14.0.0" +jsonrpc-http-server = "14.0.0" solana-logger = { path = "../logger", version = "0.20.0" } diff --git a/client/src/rpc_client.rs b/client/src/rpc_client.rs index 15cd9a1b27635f..6be99c2b86e9fc 100644 --- a/client/src/rpc_client.rs +++ b/client/src/rpc_client.rs @@ -2,7 +2,7 @@ use crate::client_error::ClientError; use crate::generic_rpc_client_request::GenericRpcClientRequest; use crate::mock_rpc_client_request::MockRpcClientRequest; use crate::rpc_client_request::RpcClientRequest; -use crate::rpc_request::{RpcEpochInfo, RpcRequest}; +use crate::rpc_request::{RpcEpochInfo, RpcRequest, RpcVoteAccountStatus}; use bincode::serialize; use log::*; use serde_json::{json, Value}; @@ -100,6 +100,25 @@ impl RpcClient { }) } + pub fn get_vote_accounts(&self) -> io::Result { + let response = self + .client + .send(&RpcRequest::GetVoteAccounts, None, 0) + .map_err(|err| { + io::Error::new( + io::ErrorKind::Other, + format!("GetVoteAccounts request failure: {:?}", err), + ) + })?; + + serde_json::from_value(response).map_err(|err| { + io::Error::new( + io::ErrorKind::Other, + format!("GetVoteAccounts parse failure: {}", err), + ) + }) + } + pub fn get_epoch_info(&self) -> io::Result { let response = self .client @@ -162,7 +181,7 @@ impl RpcClient { transaction: &mut Transaction, signer_keys: &[&T], ) -> Result { - let mut send_retries = 5; + let mut send_retries = 20; loop { let mut status_retries = 4; let signature_str = self.send_transaction(transaction)?; @@ -679,20 +698,20 @@ impl RpcClient { }) } - pub fn fullnode_exit(&self) -> io::Result { + pub fn validator_exit(&self) -> io::Result { let response = self .client - .send(&RpcRequest::FullnodeExit, None, 0) + .send(&RpcRequest::ValidatorExit, None, 0) .map_err(|err| { io::Error::new( io::ErrorKind::Other, - format!("FullnodeExit request failure: {:?}", err), + format!("ValidatorExit request failure: {:?}", err), ) })?; serde_json::from_value(response).map_err(|err| { io::Error::new( io::ErrorKind::Other, - format!("FullnodeExit parse failure: {:?}", err), + format!("ValidatorExit parse failure: {:?}", err), ) }) } diff --git a/client/src/rpc_request.rs b/client/src/rpc_request.rs index 7f409579a02ff9..3a3b04d1980a43 100644 --- a/client/src/rpc_request.rs +++ b/client/src/rpc_request.rs @@ -17,11 +17,43 @@ pub struct RpcEpochInfo { pub absolute_slot: u64, } +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct RpcVoteAccountStatus { + pub current: Vec, + pub delinquent: Vec, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct RpcVoteAccountInfo { + /// Vote account pubkey as base-58 encoded string + pub vote_pubkey: String, + + /// The pubkey of the node that votes using this account + pub node_pubkey: String, + + /// The current stake, in lamports, delegated to this vote account + pub activated_stake: u64, + + /// An 8-bit integer used as a fraction (commission/MAX_U8) for rewards payout + pub commission: u8, + + /// Whether this account is staked for the current epoch + pub epoch_vote_account: bool, + + /// Most recent slot voted on by this vote account (0 if no votes exist) + pub last_vote: u64, + + /// Current root slot for this vote account (0 if not root slot exists) + pub root_slot: u64, +} + #[derive(Debug, PartialEq)] pub enum RpcRequest { ConfirmTransaction, DeregisterNode, - FullnodeExit, + ValidatorExit, GetAccountInfo, GetBalance, GetClusterNodes, @@ -54,7 +86,7 @@ impl RpcRequest { let method = match self { RpcRequest::ConfirmTransaction => "confirmTransaction", RpcRequest::DeregisterNode => "deregisterNode", - RpcRequest::FullnodeExit => "fullnodeExit", + RpcRequest::ValidatorExit => "validatorExit", RpcRequest::GetAccountInfo => "getAccountInfo", RpcRequest::GetBalance => "getBalance", RpcRequest::GetClusterNodes => "getClusterNodes", diff --git a/client/src/thin_client.rs b/client/src/thin_client.rs index bf45a66b637b50..9cf72fcbc76e5b 100644 --- a/client/src/thin_client.rs +++ b/client/src/thin_client.rs @@ -268,8 +268,8 @@ impl ThinClient { self.rpc_client().check_signature(signature) } - pub fn fullnode_exit(&self) -> io::Result { - self.rpc_client().fullnode_exit() + pub fn validator_exit(&self) -> io::Result { + self.rpc_client().validator_exit() } pub fn get_num_blocks_since_signature_confirmation( diff --git a/core/Cargo.toml b/core/Cargo.toml index 51c840f09056dd..e9963fdfad603d 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -26,19 +26,19 @@ chrono = { version = "0.4.9", features = ["serde"] } core_affinity = "0.5.9" crc = { version = "1.8.1", optional = true } crossbeam-channel = "0.3" -dir-diff = "0.3.1" +dir-diff = "0.3.2" dlopen = "0.1.8" dlopen_derive = "0.1.4" fs_extra = "1.1.0" indexmap = "1.1" itertools = "0.8.0" -jsonrpc-core = "13.2.0" -jsonrpc-derive = "13.2.0" -jsonrpc-http-server = "13.2.0" -jsonrpc-pubsub = "13.2.0" -jsonrpc-ws-server = "13.2.0" +jsonrpc-core = "14.0.0" +jsonrpc-derive = "14.0.0" +jsonrpc-http-server = "14.0.0" +jsonrpc-pubsub = "14.0.0" +jsonrpc-ws-server = "14.0.0" lazy_static = "1.4.0" -libc = "0.2.62" +libc = "0.2.64" log = "0.4.8" memmap = { version = "0.7.0", optional = true } nix = "0.15.0" @@ -89,7 +89,7 @@ features = ["lz4"] [dev-dependencies] hex-literal = "0.2.1" matches = "0.1.6" -reqwest = { version = "0.9.21", default-features = false, features = ["rustls-tls"] } +reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tls"] } serial_test = "0.2.0" serial_test_derive = "0.2.0" @@ -111,6 +111,12 @@ name = "sigverify_stage" [[bench]] name = "poh" +[[bench]] +name = "retransmit_stage" + +[[bench]] +name = "cluster_info" + [[bench]] name = "chacha" required-features = ["chacha"] diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 01154e1d9773b0..3251d9afef3f49 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -275,7 +275,7 @@ fn simulate_process_entries( initial_lamports: u64, num_accounts: usize, ) { - let bank = Bank::new(genesis_block); + let bank = Arc::new(Bank::new(genesis_block)); for i in 0..(num_accounts / 2) { bank.transfer(initial_lamports, mint_keypair, &keypairs[i * 2].pubkey()) diff --git a/core/benches/cluster_info.rs b/core/benches/cluster_info.rs index 6e21b932dd0a9b..207c9a0d5c3142 100644 --- a/core/benches/cluster_info.rs +++ b/core/benches/cluster_info.rs @@ -32,8 +32,9 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) { stakes.insert(id, thread_rng().gen_range(1, NUM_PEERS) as u64); } bencher.iter(move || { + let shreds = shreds.clone(); cluster_info - .broadcast_shreds(&socket, &shreds, &seeds, Some(&stakes)) + .broadcast_shreds(&socket, shreds, &seeds, Some(&stakes)) .unwrap(); }); } diff --git a/core/benches/retransmit_stage.rs b/core/benches/retransmit_stage.rs new file mode 100644 index 00000000000000..a14e967aecb62f --- /dev/null +++ b/core/benches/retransmit_stage.rs @@ -0,0 +1,125 @@ +#![feature(test)] + +extern crate solana_core; +extern crate test; + +use log::*; +use solana_core::bank_forks::BankForks; +use solana_core::cluster_info::{ClusterInfo, Node}; +use solana_core::contact_info::ContactInfo; +use solana_core::genesis_utils::{create_genesis_block, GenesisBlockInfo}; +use solana_core::leader_schedule_cache::LeaderScheduleCache; +use solana_core::packet::to_packets_chunked; +use solana_core::retransmit_stage::retransmitter; +use solana_core::test_tx::test_tx; +use solana_measure::measure::Measure; +use solana_runtime::bank::Bank; +use solana_sdk::pubkey::Pubkey; +use solana_sdk::timing::timestamp; +use std::net::UdpSocket; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::mpsc::channel; +use std::sync::Mutex; +use std::sync::{Arc, RwLock}; +use std::thread::sleep; +use std::thread::Builder; +use std::time::Duration; +use test::Bencher; + +#[bench] +fn bench_retransmitter(bencher: &mut Bencher) { + solana_logger::setup(); + let mut cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); + const NUM_PEERS: usize = 4; + let mut peer_sockets = Vec::new(); + for _ in 0..NUM_PEERS { + let id = Pubkey::new_rand(); + let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); + let mut contact_info = ContactInfo::new_localhost(&id, timestamp()); + contact_info.tvu = socket.local_addr().unwrap(); + contact_info.tvu.set_ip("127.0.0.1".parse().unwrap()); + contact_info.tvu_forwards = contact_info.tvu; + info!("local: {:?}", contact_info.tvu); + cluster_info.insert_info(contact_info); + socket.set_nonblocking(true).unwrap(); + peer_sockets.push(socket); + } + let peer_sockets = Arc::new(peer_sockets); + let cluster_info = Arc::new(RwLock::new(cluster_info)); + + let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(100_000); + let bank0 = Bank::new(&genesis_block); + let bank_forks = BankForks::new(0, bank0); + let bank = bank_forks.working_bank(); + let bank_forks = Arc::new(RwLock::new(bank_forks)); + let (packet_sender, packet_receiver) = channel(); + let packet_receiver = Arc::new(Mutex::new(packet_receiver)); + const NUM_THREADS: usize = 2; + let sockets = (0..NUM_THREADS) + .map(|_| UdpSocket::bind("0.0.0.0:0").unwrap()) + .collect(); + + let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); + + // To work reliably with higher values, this needs larger udp rmem size + let tx = test_tx(); + const NUM_PACKETS: usize = 50; + let chunk_size = NUM_PACKETS / (4 * NUM_THREADS); + let batches = to_packets_chunked(&vec![tx; NUM_PACKETS], chunk_size); + info!("batches: {}", batches.len()); + + let retransmitter_handles = retransmitter( + Arc::new(sockets), + bank_forks, + &leader_schedule_cache, + cluster_info, + packet_receiver, + ); + + let total = Arc::new(AtomicUsize::new(0)); + bencher.iter(move || { + let peer_sockets1 = peer_sockets.clone(); + let handles: Vec<_> = (0..NUM_PEERS) + .into_iter() + .map(|p| { + let peer_sockets2 = peer_sockets1.clone(); + let total2 = total.clone(); + Builder::new() + .name("recv".to_string()) + .spawn(move || { + info!("{} waiting on {:?}", p, peer_sockets2[p]); + let mut buf = [0u8; 1024]; + loop { + while peer_sockets2[p].recv(&mut buf).is_ok() { + total2.fetch_add(1, Ordering::Relaxed); + } + if total2.load(Ordering::Relaxed) >= NUM_PACKETS { + break; + } + info!("{} recv", total2.load(Ordering::Relaxed)); + sleep(Duration::from_millis(1)); + } + }) + .unwrap() + }) + .collect(); + + for packets in batches.clone() { + packet_sender.send(packets).unwrap(); + } + info!("sent..."); + + let mut join_time = Measure::start("join"); + for h in handles { + h.join().unwrap(); + } + join_time.stop(); + info!("took: {}ms", join_time.as_ms()); + + total.store(0, Ordering::Relaxed); + }); + + for t in retransmitter_handles { + t.join().unwrap(); + } +} diff --git a/core/benches/shredder.rs b/core/benches/shredder.rs index 3dddc12338d5a7..7a496397f6a368 100644 --- a/core/benches/shredder.rs +++ b/core/benches/shredder.rs @@ -2,32 +2,85 @@ extern crate test; -use solana_core::shred::{Shredder, RECOMMENDED_FEC_RATE}; +use solana_core::entry::create_ticks; +use solana_core::entry::Entry; +use solana_core::shred::{ + max_entries_per_n_shred, max_ticks_per_n_shreds, Shred, Shredder, RECOMMENDED_FEC_RATE, + SIZE_OF_SHRED_HEADER, +}; +use solana_core::test_tx; +use solana_sdk::hash::Hash; +use solana_sdk::packet::PACKET_DATA_SIZE; use solana_sdk::signature::{Keypair, KeypairUtil}; use std::sync::Arc; use test::Bencher; +fn make_test_entry(txs_per_entry: u64) -> Entry { + Entry { + num_hashes: 100_000, + hash: Hash::default(), + transactions: vec![test_tx::test_tx(); txs_per_entry as usize], + } +} +fn make_large_unchained_entries(txs_per_entry: u64, num_entries: u64) -> Vec { + (0..num_entries) + .map(|_| make_test_entry(txs_per_entry)) + .collect() +} + +#[bench] +fn bench_shredder_ticks(bencher: &mut Bencher) { + let kp = Arc::new(Keypair::new()); + let shred_size = PACKET_DATA_SIZE - *SIZE_OF_SHRED_HEADER; + let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size; + // ~1Mb + let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64; + let entries = create_ticks(num_ticks, Hash::default()); + bencher.iter(|| { + let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone()).unwrap(); + shredder.entries_to_shreds(&entries, true, 0); + }) +} + #[bench] -fn bench_shredder(bencher: &mut Bencher) { +fn bench_shredder_large_entries(bencher: &mut Bencher) { let kp = Arc::new(Keypair::new()); + let shred_size = PACKET_DATA_SIZE - *SIZE_OF_SHRED_HEADER; + let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size; + let txs_per_entry = 128; + let num_entries = max_entries_per_n_shred(&make_test_entry(txs_per_entry), num_shreds as u64); + let entries = make_large_unchained_entries(txs_per_entry, num_entries); // 1Mb - let data = vec![0u8; 1000 * 1000]; bencher.iter(|| { - let mut shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, &kp, 0).unwrap(); - bincode::serialize_into(&mut shredder, &data).unwrap(); + let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone()).unwrap(); + shredder.entries_to_shreds(&entries, true, 0); }) } #[bench] fn bench_deshredder(bencher: &mut Bencher) { let kp = Arc::new(Keypair::new()); - // 10MB - let data = vec![0u8; 10000 * 1000]; - let mut shredded = Shredder::new(1, 0, 0.0, &kp, 0).unwrap(); - let _ = bincode::serialize_into(&mut shredded, &data); - shredded.finalize_data(); + let shred_size = PACKET_DATA_SIZE - *SIZE_OF_SHRED_HEADER; + // ~10Mb + let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size; + let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64; + let entries = create_ticks(num_ticks, Hash::default()); + let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp).unwrap(); + let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0; bencher.iter(|| { - let raw = &mut Shredder::deshred(&shredded.shreds).unwrap(); + let raw = &mut Shredder::deshred(&data_shreds).unwrap(); assert_ne!(raw.len(), 0); }) } + +#[bench] +fn bench_deserialize_hdr(bencher: &mut Bencher) { + let data = vec![0; PACKET_DATA_SIZE - *SIZE_OF_SHRED_HEADER]; + + let shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true); + + bencher.iter(|| { + let payload = shred.payload.clone(); + let _ = Shred::new_from_serialized_shred(payload).unwrap(); + }) +} diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 0c62b2e7c05130..727aff633715fd 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -8,6 +8,7 @@ use crate::{ leader_schedule_cache::LeaderScheduleCache, packet::PACKETS_PER_BATCH, packet::{Packet, Packets}, + perf_libs, poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntry}, poh_service::PohService, result::{Error, Result}, @@ -20,6 +21,7 @@ use itertools::Itertools; use solana_measure::measure::Measure; use solana_metrics::{inc_new_counter_debug, inc_new_counter_info, inc_new_counter_warn}; use solana_runtime::{accounts_db::ErrorCounters, bank::Bank, transaction_batch::TransactionBatch}; +use solana_sdk::clock::MAX_TRANSACTION_FORWARDING_DELAY_GPU; use solana_sdk::{ clock::{ DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE, @@ -697,12 +699,18 @@ impl BankingStage { // 1. Transaction forwarding delay // 2. The slot at which the next leader will actually process the transaction // Drop the transaction if it will expire by the time the next node receives and processes it + let api = perf_libs::api(); + let max_tx_fwd_delay = if api.is_none() { + MAX_TRANSACTION_FORWARDING_DELAY + } else { + MAX_TRANSACTION_FORWARDING_DELAY_GPU + }; let result = bank.check_transactions( transactions, None, &filter, (MAX_PROCESSING_AGE) - .saturating_sub(MAX_TRANSACTION_FORWARDING_DELAY) + .saturating_sub(max_tx_fwd_delay) .saturating_sub( (FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET * bank.ticks_per_slot() / DEFAULT_TICKS_PER_SECOND) as usize, diff --git a/core/src/blob_fetch_stage.rs b/core/src/blob_fetch_stage.rs deleted file mode 100644 index 74334742726843..00000000000000 --- a/core/src/blob_fetch_stage.rs +++ /dev/null @@ -1,118 +0,0 @@ -//! The `blob_fetch_stage` pulls blobs from UDP sockets and sends it to a channel. - -use crate::recycler::Recycler; -use crate::result; -use crate::result::Error; -use crate::service::Service; -use crate::streamer::{self, BlobSender, PacketReceiver, PacketSender}; -use std::net::UdpSocket; -use std::sync::atomic::AtomicBool; -use std::sync::mpsc::{channel, RecvTimeoutError}; -use std::sync::Arc; -use std::thread::{self, Builder, JoinHandle}; - -pub struct BlobFetchStage { - thread_hdls: Vec>, -} - -impl BlobFetchStage { - pub fn new(socket: Arc, sender: &BlobSender, exit: &Arc) -> Self { - Self::new_multi_socket(vec![socket], sender, exit) - } - pub fn new_multi_socket( - sockets: Vec>, - sender: &BlobSender, - exit: &Arc, - ) -> Self { - let thread_hdls: Vec<_> = sockets - .into_iter() - .map(|socket| streamer::blob_receiver(socket, &exit, sender.clone())) - .collect(); - - Self { thread_hdls } - } - - fn handle_forwarded_packets( - recvr: &PacketReceiver, - sendr: &PacketSender, - ) -> result::Result<()> { - let msgs = recvr.recv()?; - let mut batch = vec![msgs]; - while let Ok(more) = recvr.try_recv() { - batch.push(more); - } - - batch - .iter_mut() - .for_each(|b| b.packets.iter_mut().for_each(|p| p.meta.forward = true)); - - for packets in batch { - if sendr.send(packets).is_err() { - return Err(Error::SendError); - } - } - - Ok(()) - } - - pub fn new_multi_socket_packet( - sockets: Vec>, - forward_sockets: Vec>, - sender: &PacketSender, - exit: &Arc, - ) -> Self { - let recycler = Recycler::default(); - let tvu_threads = sockets.into_iter().map(|socket| { - streamer::receiver( - socket, - &exit, - sender.clone(), - recycler.clone(), - "blob_fetch_stage", - ) - }); - - let (forward_sender, forward_receiver) = channel(); - let tvu_forwards_threads = forward_sockets.into_iter().map(|socket| { - streamer::receiver( - socket, - &exit, - forward_sender.clone(), - recycler.clone(), - "blob_fetch_stage", - ) - }); - - let sender = sender.clone(); - let fwd_thread_hdl = Builder::new() - .name("solana-tvu-fetch-stage-fwd-rcvr".to_string()) - .spawn(move || loop { - if let Err(e) = Self::handle_forwarded_packets(&forward_receiver, &sender) { - match e { - Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break, - Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (), - Error::RecvError(_) => break, - Error::SendError => break, - _ => error!("{:?}", e), - } - } - }) - .unwrap(); - - let mut thread_hdls: Vec<_> = tvu_threads.chain(tvu_forwards_threads).collect(); - thread_hdls.push(fwd_thread_hdl); - - Self { thread_hdls } - } -} - -impl Service for BlobFetchStage { - type JoinReturnType = (); - - fn join(self) -> thread::Result<()> { - for thread_hdl in self.thread_hdls { - thread_hdl.join()?; - } - Ok(()) - } -} diff --git a/core/src/blockstream_service.rs b/core/src/blockstream_service.rs index c5e7a3c2f813fb..c8ef3c2365400c 100644 --- a/core/src/blockstream_service.rs +++ b/core/src/blockstream_service.rs @@ -169,7 +169,7 @@ mod test { None, true, &Arc::new(Keypair::new()), - &entries, + entries, ) .unwrap(); diff --git a/core/src/blocktree.rs b/core/src/blocktree.rs index b54532e20a4d49..541841ccbb2c2d 100644 --- a/core/src/blocktree.rs +++ b/core/src/blocktree.rs @@ -12,13 +12,12 @@ use std::collections::HashMap; use rocksdb; -use solana_metrics::{datapoint_error, datapoint_info}; +use solana_metrics::{datapoint_debug, datapoint_error}; use solana_sdk::genesis_block::GenesisBlock; use solana_sdk::hash::Hash; use solana_sdk::signature::{Keypair, KeypairUtil}; -use std::borrow::Borrow; use std::cell::RefCell; use std::cmp; use std::fs; @@ -324,7 +323,7 @@ impl Blocktree { // 3a. Enough number of shreds = (#data + #coding shreds) > erasure.num_data for (&(slot, set_index), erasure_meta) in erasure_metas.iter() { let submit_metrics = |attempted: bool, status: String, recovered: usize| { - datapoint_info!( + datapoint_debug!( "blocktree-erasure", ("slot", slot as i64, i64), ("start_index", set_index as i64, i64), @@ -806,21 +805,17 @@ impl Blocktree { self.code_shred_cf.get_bytes((slot, index)) } - pub fn write_entries( + pub fn write_entries( &self, start_slot: u64, num_ticks_in_start_slot: u64, - start_index: u64, + start_index: u32, ticks_per_slot: u64, parent: Option, is_full_slot: bool, keypair: &Arc, - entries: I, - ) -> Result - where - I: IntoIterator, - I::Item: Borrow, - { + entries: Vec, + ) -> Result { assert!(num_ticks_in_start_slot < ticks_per_slot); let mut remaining_ticks_in_slot = ticks_per_slot - num_ticks_in_start_slot; @@ -833,40 +828,45 @@ impl Blocktree { }, |v| v, ); - let mut shredder = - Shredder::new(current_slot, parent_slot, 0.0, keypair, start_index as u32) - .expect("Failed to create entry shredder"); + let mut shredder = Shredder::new(current_slot, parent_slot, 0.0, keypair.clone()) + .expect("Failed to create entry shredder"); let mut all_shreds = vec![]; + let mut slot_entries = vec![]; // Find all the entries for start_slot - for entry in entries { + for entry in entries.into_iter() { if remaining_ticks_in_slot == 0 { current_slot += 1; parent_slot = current_slot - 1; remaining_ticks_in_slot = ticks_per_slot; - shredder.finalize_slot(); - all_shreds.append(&mut shredder.shreds); - shredder = - Shredder::new(current_slot, parent_slot, 0.0, &Arc::new(Keypair::new()), 0) - .expect("Failed to create entry shredder"); + let mut current_entries = vec![]; + std::mem::swap(&mut slot_entries, &mut current_entries); + let start_index = { + if all_shreds.is_empty() { + start_index + } else { + 0 + } + }; + let (mut data_shreds, mut coding_shreds, _) = + shredder.entries_to_shreds(¤t_entries, true, start_index); + all_shreds.append(&mut data_shreds); + all_shreds.append(&mut coding_shreds); + shredder = Shredder::new(current_slot, parent_slot, 0.0, keypair.clone()) + .expect("Failed to create entry shredder"); } - if entry.borrow().is_tick() { + if entry.is_tick() { remaining_ticks_in_slot -= 1; } - - bincode::serialize_into(&mut shredder, &vec![entry.borrow().clone()]) - .expect("Expect to write all entries to shreds"); - if remaining_ticks_in_slot == 0 { - shredder.finalize_slot(); - } else { - shredder.finalize_data(); - } + slot_entries.push(entry); } - if is_full_slot && remaining_ticks_in_slot != 0 { - shredder.finalize_slot(); + if !slot_entries.is_empty() { + let (mut data_shreds, mut coding_shreds, _) = + shredder.entries_to_shreds(&slot_entries, is_full_slot, 0); + all_shreds.append(&mut data_shreds); + all_shreds.append(&mut coding_shreds); } - all_shreds.append(&mut shredder.shreds); let num_shreds = all_shreds.len(); self.insert_shreds(all_shreds, None)?; @@ -919,6 +919,7 @@ impl Blocktree { break; } let (current_slot, index) = db_iterator.key().expect("Expect a valid key"); + let current_index = { if current_slot > slot { end_index @@ -926,6 +927,7 @@ impl Blocktree { index } }; + let upper_index = cmp::min(current_index, end_index); for i in prev_index..upper_index { @@ -982,9 +984,9 @@ impl Blocktree { ) -> Result<(Vec, usize)> { // Find the next consecutive block of shreds. let mut serialized_shreds: Vec> = vec![]; - let data_cf = self.db.column::(); + let data_shred_cf = self.db.column::(); - while let Some(serialized_shred) = data_cf.get_bytes((slot, start_index))? { + while let Some(serialized_shred) = data_shred_cf.get_bytes((slot, start_index))? { serialized_shreds.push(serialized_shred); start_index += 1; } @@ -994,6 +996,7 @@ impl Blocktree { serialized_shreds.len(), slot ); + let mut shreds: Vec = serialized_shreds .into_iter() .filter_map(|serialized_shred| Shred::new_from_serialized_shred(serialized_shred).ok()) @@ -1036,7 +1039,6 @@ impl Blocktree { } trace!("Found {:?} entries", all_entries.len()); - Ok((all_entries, num)) } @@ -1551,15 +1553,14 @@ pub fn create_new_ledger(ledger_path: &Path, genesis_block: &GenesisBlock) -> Re // Fill slot 0 with ticks that link back to the genesis_block to bootstrap the ledger. let blocktree = Blocktree::open(ledger_path)?; + let entries = crate::entry::create_ticks(ticks_per_slot, genesis_block.hash()); + let last_hash = entries.last().unwrap().hash; - let mut shredder = Shredder::new(0, 0, 0.0, &Arc::new(Keypair::new()), 0) + let shredder = Shredder::new(0, 0, 0.0, Arc::new(Keypair::new())) .expect("Failed to create entry shredder"); - let last_hash = entries.last().unwrap().hash; - bincode::serialize_into(&mut shredder, &entries) - .expect("Expect to write all entries to shreds"); - shredder.finalize_slot(); - let shreds: Vec = shredder.shreds.drain(..).collect(); + let shreds = shredder.entries_to_shreds(&entries, true, 0).0; + assert!(shreds.last().unwrap().last_in_slot()); blocktree.insert_shreds(shreds, None)?; blocktree.set_roots(&[0])?; @@ -1641,24 +1642,18 @@ pub fn entries_to_test_shreds( parent_slot: u64, is_full_slot: bool, ) -> Vec { - let mut shredder = Shredder::new(slot, parent_slot, 0.0, &Arc::new(Keypair::new()), 0 as u32) + let shredder = Shredder::new(slot, parent_slot, 0.0, Arc::new(Keypair::new())) .expect("Failed to create entry shredder"); - bincode::serialize_into(&mut shredder, &entries) - .expect("Expect to write all entries to shreds"); - if is_full_slot { - shredder.finalize_slot(); - } else { - shredder.finalize_data(); - } - - shredder.shreds.drain(..).collect() + shredder.entries_to_shreds(&entries, is_full_slot, 0).0 } #[cfg(test)] pub mod tests { use super::*; use crate::entry::{create_ticks, Entry}; + use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo}; + use crate::shred::max_ticks_per_n_shreds; use itertools::Itertools; use rand::seq::SliceRandom; use rand::thread_rng; @@ -1667,6 +1662,54 @@ pub mod tests { use std::iter::FromIterator; use std::time::Duration; + #[test] + fn test_create_new_ledger() { + let mint_total = 1_000_000_000_000; + let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(mint_total); + let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block); + let ledger = Blocktree::open(&ledger_path).unwrap(); + + let ticks = create_ticks(genesis_block.ticks_per_slot, genesis_block.hash()); + let entries = ledger.get_slot_entries(0, 0, None).unwrap(); + + assert_eq!(ticks, entries); + + // Destroying database without closing it first is undefined behavior + drop(ledger); + Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + } + + #[test] + fn test_insert_get_bytes() { + // Create enough entries to ensure there are at least two shreds created + let num_entries = max_ticks_per_n_shreds(1) + 1; + assert!(num_entries > 1); + + let (mut shreds, _) = make_slot_entries(0, 0, num_entries); + + let ledger_path = get_tmp_ledger_path("test_insert_data_shreds_basic"); + let ledger = Blocktree::open(&ledger_path).unwrap(); + + // Insert last shred, test we can retrieve it + let last_shred = shreds.pop().unwrap(); + assert!(last_shred.index() > 0); + ledger + .insert_shreds(vec![last_shred.clone()], None) + .unwrap(); + + let serialized_shred = ledger + .data_shred_cf + .get_bytes((0, last_shred.index() as u64)) + .unwrap() + .unwrap(); + let deserialized_shred = Shred::new_from_serialized_shred(serialized_shred).unwrap(); + + assert_eq!(last_shred, deserialized_shred); + // Destroying database without closing it first is undefined behavior + drop(ledger); + Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + } + #[test] fn test_write_entries() { solana_logger::setup(); @@ -1877,7 +1920,8 @@ pub mod tests { #[test] fn test_insert_data_shreds_basic() { - let num_entries = 5; + // Create enough entries to ensure there are at least two shreds created + let num_entries = max_ticks_per_n_shreds(1) + 1; assert!(num_entries > 1); let (mut shreds, entries) = make_slot_entries(0, 0, num_entries); @@ -1888,6 +1932,7 @@ pub mod tests { // Insert last shred, we're missing the other shreds, so no consecutive // shreds starting from slot 0, index 0 should exist. + assert!(shreds.len() > 1); let last_shred = shreds.pop().unwrap(); ledger.insert_shreds(vec![last_shred], None).unwrap(); assert!(ledger.get_slot_entries(0, 0, None).unwrap().is_empty()); @@ -2098,21 +2143,28 @@ pub mod tests { let blocktree_path = get_tmp_ledger_path("test_insert_data_shreds_consecutive"); { let blocktree = Blocktree::open(&blocktree_path).unwrap(); + // Create enough entries to ensure there are at least two shreds created + let min_entries = max_ticks_per_n_shreds(1) + 1; for i in 0..4 { let slot = i; let parent_slot = if i == 0 { 0 } else { i - 1 }; // Write entries - let num_entries = 21 as u64 * (i + 1); - let (mut shreds, original_entries) = - make_slot_entries(slot, parent_slot, num_entries); + let num_entries = min_entries * (i + 1); + let (shreds, original_entries) = make_slot_entries(slot, parent_slot, num_entries); let num_shreds = shreds.len() as u64; + assert!(num_shreds > 1); + let mut even_shreds = vec![]; let mut odd_shreds = vec![]; - for i in (0..num_shreds).rev() { - if i % 2 != 0 { - odd_shreds.insert(0, shreds.remove(i as usize)); + + for (i, shred) in shreds.into_iter().enumerate() { + if i % 2 == 0 { + even_shreds.push(shred); + } else { + odd_shreds.push(shred); } } + blocktree.insert_shreds(odd_shreds, None).unwrap(); assert_eq!(blocktree.get_slot_entries(slot, 0, None).unwrap(), vec![]); @@ -2121,7 +2173,7 @@ pub mod tests { if num_shreds % 2 == 0 { assert_eq!(meta.received, num_shreds); } else { - debug!("got here"); + trace!("got here"); assert_eq!(meta.received, num_shreds - 1); } assert_eq!(meta.consumed, 0); @@ -2131,7 +2183,7 @@ pub mod tests { assert_eq!(meta.last_index, std::u64::MAX); } - blocktree.insert_shreds(shreds, None).unwrap(); + blocktree.insert_shreds(even_shreds, None).unwrap(); assert_eq!( blocktree.get_slot_entries(slot, 0, None).unwrap(), @@ -2504,11 +2556,13 @@ pub mod tests { { let blocktree = Blocktree::open(&blocktree_path).unwrap(); let num_slots = 15; - let entries_per_slot = 5; + // Create enough entries to ensure there are at least two shreds created + let entries_per_slot = max_ticks_per_n_shreds(1) + 1; assert!(entries_per_slot > 1); let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot); let shreds_per_slot = shreds.len() / num_slots as usize; + assert!(shreds_per_slot > 1); // Write the shreds such that every 3rd slot has a gap in the beginning let mut missing_shreds = vec![]; @@ -2852,13 +2906,15 @@ pub mod tests { // Write entries let gap: u64 = 10; assert!(gap > 3); - let num_entries = 10; + // Create enough entries to ensure there are at least two shreds created + let num_entries = max_ticks_per_n_shreds(1) + 1; let entries = create_ticks(num_entries, Hash::default()); let mut shreds = entries_to_test_shreds(entries, slot, 0, true); let num_shreds = shreds.len(); - for (i, b) in shreds.iter_mut().enumerate() { - b.set_index(i as u32 * gap as u32); - b.set_slot(slot); + assert!(num_shreds > 1); + for (i, s) in shreds.iter_mut().enumerate() { + s.set_index(i as u32 * gap as u32); + s.set_slot(slot); } blocktree.insert_shreds(shreds, None).unwrap(); @@ -2892,7 +2948,8 @@ pub mod tests { vec![1], ); - // Test with end indexes that are greater than the last item in the ledger + // Test with a range that encompasses a shred with index == gap which was + // already inserted. let mut expected: Vec = (1..gap).collect(); expected.push(gap + 1); assert_eq!( @@ -2943,8 +3000,9 @@ pub mod tests { assert_eq!(blocktree.find_missing_data_indexes(slot, 4, 3, 1), empty); assert_eq!(blocktree.find_missing_data_indexes(slot, 1, 2, 0), empty); - let entries = create_ticks(20, Hash::default()); + let entries = create_ticks(100, Hash::default()); let mut shreds = entries_to_test_shreds(entries, slot, 0, true); + assert!(shreds.len() > 2); shreds.drain(2..); const ONE: u64 = 1; @@ -3117,7 +3175,7 @@ pub mod tests { // Trying to insert the same shred again should fail { let index = index_cf - .get(shred.common_header.coding_header.slot) + .get(shred.coding_header.common_header.slot) .unwrap() .unwrap(); assert!(!Blocktree::should_insert_coding_shred( @@ -3127,13 +3185,13 @@ pub mod tests { )); } - shred.common_header.coding_header.index += 1; + shred.coding_header.common_header.index += 1; // Establish a baseline that works { let coding_shred = Shred::new_empty_from_header(shred.clone()); let index = index_cf - .get(shred.common_header.coding_header.slot) + .get(shred.coding_header.common_header.slot) .unwrap() .unwrap(); assert!(Blocktree::should_insert_coding_shred( @@ -3146,7 +3204,7 @@ pub mod tests { // Trying to insert a shred with index < position should fail { let mut coding_shred = Shred::new_empty_from_header(shred.clone()); - let index = coding_shred.headers.common_header.position - 1; + let index = coding_shred.headers.coding_header.position - 1; coding_shred.set_index(index as u32); let index = index_cf.get(coding_shred.slot()).unwrap().unwrap(); @@ -3160,7 +3218,7 @@ pub mod tests { // Trying to insert shred with num_coding == 0 should fail { let mut coding_shred = Shred::new_empty_from_header(shred.clone()); - coding_shred.headers.common_header.num_coding_shreds = 0; + coding_shred.headers.coding_header.num_coding_shreds = 0; let index = index_cf.get(coding_shred.slot()).unwrap().unwrap(); assert!(!Blocktree::should_insert_coding_shred( &coding_shred, @@ -3172,8 +3230,8 @@ pub mod tests { // Trying to insert shred with pos >= num_coding should fail { let mut coding_shred = Shred::new_empty_from_header(shred.clone()); - coding_shred.headers.common_header.num_coding_shreds = - coding_shred.headers.common_header.position; + coding_shred.headers.coding_header.num_coding_shreds = + coding_shred.headers.coding_header.position; let index = index_cf.get(coding_shred.slot()).unwrap().unwrap(); assert!(!Blocktree::should_insert_coding_shred( &coding_shred, @@ -3186,9 +3244,9 @@ pub mod tests { // has index > u32::MAX should fail { let mut coding_shred = Shred::new_empty_from_header(shred.clone()); - coding_shred.headers.common_header.num_coding_shreds = 3; - coding_shred.headers.common_header.coding_header.index = std::u32::MAX - 1; - coding_shred.headers.common_header.position = 0; + coding_shred.headers.coding_header.num_coding_shreds = 3; + coding_shred.headers.coding_header.common_header.index = std::u32::MAX - 1; + coding_shred.headers.coding_header.position = 0; let index = index_cf.get(coding_shred.slot()).unwrap().unwrap(); assert!(!Blocktree::should_insert_coding_shred( &coding_shred, @@ -3197,7 +3255,7 @@ pub mod tests { )); // Decreasing the number of num_coding_shreds will put it within the allowed limit - coding_shred.headers.common_header.num_coding_shreds = 2; + coding_shred.headers.coding_header.num_coding_shreds = 2; assert!(Blocktree::should_insert_coding_shred( &coding_shred, index.coding(), diff --git a/core/src/blocktree/rocks.rs b/core/src/blocktree/rocks.rs index b8aeb420eaf32e..a4d31cea6292c2 100644 --- a/core/src/blocktree/rocks.rs +++ b/core/src/blocktree/rocks.rs @@ -419,11 +419,11 @@ impl std::convert::From for Error { } fn get_cf_options(name: &'static str) -> Options { - use crate::blocktree::db::columns::{ShredCode, ShredData}; + use crate::blocktree::db::columns::{ErasureMeta, Index, ShredCode, ShredData}; let mut options = Options::default(); match name { - ShredCode::NAME | ShredData::NAME => { + ShredCode::NAME | ShredData::NAME | Index::NAME | ErasureMeta::NAME => { // 512MB * 8 = 4GB. 2 of these columns should take no more than 8GB of RAM options.set_max_write_buffer_number(8); options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE as usize); diff --git a/core/src/blocktree_processor.rs b/core/src/blocktree_processor.rs index 4771ca7c04d9fb..cf8ec2d1e47749 100644 --- a/core/src/blocktree_processor.rs +++ b/core/src/blocktree_processor.rs @@ -65,13 +65,23 @@ fn execute_batch(batch: &TransactionBatch) -> Result<()> { first_err.unwrap_or(Ok(())) } -fn execute_batches(batches: &[TransactionBatch]) -> Result<()> { +fn execute_batches( + bank: &Arc, + batches: &[TransactionBatch], + entry_callback: Option<&ProcessCallback>, +) -> Result<()> { inc_new_counter_debug!("bank-par_execute_entries-count", batches.len()); let results: Vec> = PAR_THREAD_POOL.with(|thread_pool| { thread_pool.borrow().install(|| { batches .into_par_iter() - .map(|batch| execute_batch(batch)) + .map(|batch| { + let result = execute_batch(batch); + if let Some(entry_callback) = entry_callback { + entry_callback(bank); + } + result + }) .collect() }) }); @@ -84,13 +94,22 @@ fn execute_batches(batches: &[TransactionBatch]) -> Result<()> { /// 2. Process the locked group in parallel /// 3. Register the `Tick` if it's available /// 4. Update the leader scheduler, goto 1 -pub fn process_entries(bank: &Bank, entries: &[Entry], randomize: bool) -> Result<()> { +pub fn process_entries(bank: &Arc, entries: &[Entry], randomize: bool) -> Result<()> { + process_entries_with_callback(bank, entries, randomize, None) +} + +fn process_entries_with_callback( + bank: &Arc, + entries: &[Entry], + randomize: bool, + entry_callback: Option<&ProcessCallback>, +) -> Result<()> { // accumulator for entries that can be processed in parallel let mut batches = vec![]; for entry in entries { if entry.is_tick() { // if its a tick, execute the group and register the tick - execute_batches(&batches)?; + execute_batches(bank, &batches, entry_callback)?; batches.clear(); bank.register_tick(&entry.hash); continue; @@ -136,12 +155,12 @@ pub fn process_entries(bank: &Bank, entries: &[Entry], randomize: bool) -> Resul } else { // else we have an entry that conflicts with a prior entry // execute the current queue and try to process this entry again - execute_batches(&batches)?; + execute_batches(bank, &batches, entry_callback)?; batches.clear(); } } } - execute_batches(&batches)?; + execute_batches(bank, &batches, entry_callback)?; Ok(()) } @@ -155,27 +174,45 @@ pub enum BlocktreeProcessorError { LedgerVerificationFailed, } +/// Callback for accessing bank state while processing the blocktree +pub type ProcessCallback = Arc () + Sync + Send>; + +#[derive(Default)] +pub struct ProcessOptions { + pub verify_ledger: bool, + pub full_leader_cache: bool, + pub dev_halt_at_slot: Option, + pub entry_callback: Option, + pub override_num_threads: Option, +} + pub fn process_blocktree( genesis_block: &GenesisBlock, blocktree: &Blocktree, account_paths: Option, - verify_ledger: bool, - dev_halt_at_slot: Option, + opts: ProcessOptions, ) -> result::Result<(BankForks, Vec, LeaderScheduleCache), BlocktreeProcessorError> { - info!("processing ledger from bank 0..."); + if let Some(num_threads) = opts.override_num_threads { + PAR_THREAD_POOL.with(|pool| { + *pool.borrow_mut() = rayon::ThreadPoolBuilder::new() + .num_threads(num_threads) + .build() + .unwrap() + }); + } // Setup bank for slot 0 let bank0 = Arc::new(Bank::new_with_paths(&genesis_block, account_paths)); - process_bank_0(&bank0, blocktree, verify_ledger)?; - process_blocktree_from_root(blocktree, bank0, verify_ledger, dev_halt_at_slot) + info!("processing ledger from bank 0..."); + process_bank_0(&bank0, blocktree, &opts)?; + process_blocktree_from_root(blocktree, bank0, &opts) } // Process blocktree from a known root bank pub fn process_blocktree_from_root( blocktree: &Blocktree, bank: Arc, - verify_ledger: bool, - dev_halt_at_slot: Option, + opts: &ProcessOptions, ) -> result::Result<(BankForks, Vec, LeaderScheduleCache), BlocktreeProcessorError> { info!("processing ledger from root: {}...", bank.slot()); // Starting slot must be a root, and thus has no parents @@ -183,7 +220,6 @@ pub fn process_blocktree_from_root( let start_slot = bank.slot(); let now = Instant::now(); let mut rooted_path = vec![start_slot]; - let dev_halt_at_slot = dev_halt_at_slot.unwrap_or(std::u64::MAX); blocktree .set_roots(&[start_slot]) @@ -196,14 +232,16 @@ pub fn process_blocktree_from_root( if let Some(meta) = meta { let epoch_schedule = bank.epoch_schedule(); let mut leader_schedule_cache = LeaderScheduleCache::new(*epoch_schedule, &bank); + if opts.full_leader_cache { + leader_schedule_cache.set_max_schedules(std::usize::MAX); + } let fork_info = process_pending_slots( &bank, &meta, blocktree, &mut leader_schedule_cache, &mut rooted_path, - verify_ledger, - dev_halt_at_slot, + opts, )?; let (banks, bank_forks_info): (Vec<_>, Vec<_>) = fork_info.into_iter().unzip(); let bank_forks = BankForks::new_from_banks(&banks, rooted_path); @@ -231,35 +269,37 @@ pub fn process_blocktree_from_root( } fn verify_and_process_entries( - bank: &Bank, + bank: &Arc, entries: &[Entry], - verify_ledger: bool, last_entry_hash: Hash, + opts: &ProcessOptions, ) -> result::Result { assert!(!entries.is_empty()); - if verify_ledger && !entries.verify(&last_entry_hash) { + if opts.verify_ledger && !entries.verify(&last_entry_hash) { warn!("Ledger proof of history failed at slot: {}", bank.slot()); return Err(BlocktreeProcessorError::LedgerVerificationFailed); } - process_entries(&bank, &entries, true).map_err(|err| { - warn!( - "Failed to process entries for slot {}: {:?}", - bank.slot(), - err - ); - BlocktreeProcessorError::LedgerVerificationFailed - })?; + process_entries_with_callback(bank, &entries, true, opts.entry_callback.as_ref()).map_err( + |err| { + warn!( + "Failed to process entries for slot {}: {:?}", + bank.slot(), + err + ); + BlocktreeProcessorError::LedgerVerificationFailed + }, + )?; Ok(entries.last().unwrap().hash) } // Special handling required for processing the entries in slot 0 fn process_bank_0( - bank0: &Bank, + bank0: &Arc, blocktree: &Blocktree, - verify_ledger: bool, + opts: &ProcessOptions, ) -> result::Result<(), BlocktreeProcessorError> { assert_eq!(bank0.slot(), 0); @@ -283,7 +323,7 @@ fn process_bank_0( } if !entries.is_empty() { - verify_and_process_entries(bank0, &entries, verify_ledger, entry0.hash)?; + verify_and_process_entries(bank0, &entries, entry0.hash, opts)?; } else { bank0.register_tick(&entry0.hash); } @@ -356,8 +396,7 @@ fn process_pending_slots( blocktree: &Blocktree, leader_schedule_cache: &mut LeaderScheduleCache, rooted_path: &mut Vec, - verify_ledger: bool, - dev_halt_at_slot: Slot, + opts: &ProcessOptions, ) -> result::Result, BankForksInfo)>, BlocktreeProcessorError> { let mut fork_info = vec![]; let mut last_status_report = Instant::now(); @@ -371,6 +410,7 @@ fn process_pending_slots( &mut fork_info, )?; + let dev_halt_at_slot = opts.dev_halt_at_slot.unwrap_or(std::u64::MAX); while !pending_slots.is_empty() { let (slot, meta, bank, last_entry_hash) = pending_slots.pop().unwrap(); @@ -385,7 +425,7 @@ fn process_pending_slots( BlocktreeProcessorError::LedgerVerificationFailed })?; - verify_and_process_entries(&bank, &entries, verify_ledger, last_entry_hash)?; + verify_and_process_entries(&bank, &entries, last_entry_hash, opts)?; bank.freeze(); // all banks handled by this routine are created from complete slots @@ -422,20 +462,22 @@ fn process_pending_slots( #[cfg(test)] pub mod tests { use super::*; - use crate::blocktree::create_new_tmp_ledger; - use crate::entry::{create_ticks, next_entry, next_entry_mut, Entry}; - use crate::genesis_utils::{ - create_genesis_block, create_genesis_block_with_leader, GenesisBlockInfo, + use crate::{ + blocktree::create_new_tmp_ledger, + entry::{create_ticks, next_entry, next_entry_mut, Entry}, + genesis_utils::{create_genesis_block, create_genesis_block_with_leader, GenesisBlockInfo}, }; use rand::{thread_rng, Rng}; - use solana_runtime::epoch_schedule::EpochSchedule; - use solana_sdk::hash::Hash; - use solana_sdk::instruction::InstructionError; - use solana_sdk::pubkey::Pubkey; - use solana_sdk::signature::{Keypair, KeypairUtil}; - use solana_sdk::system_transaction; - use solana_sdk::transaction::Transaction; - use solana_sdk::transaction::TransactionError; + use solana_sdk::{ + epoch_schedule::EpochSchedule, + hash::Hash, + instruction::InstructionError, + pubkey::Pubkey, + signature::{Keypair, KeypairUtil}, + system_transaction, + transaction::{Transaction, TransactionError}, + }; + use std::sync::RwLock; pub fn fill_blocktree_slot_with_ticks( blocktree: &Blocktree, @@ -456,7 +498,7 @@ pub mod tests { Some(parent_slot), true, &Arc::new(Keypair::new()), - &entries, + entries, ) .unwrap(); @@ -517,8 +559,12 @@ pub mod tests { // slot 2, points at slot 1 fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash); + let opts = ProcessOptions { + verify_ledger: true, + ..ProcessOptions::default() + }; let (mut _bank_forks, bank_forks_info, _) = - process_blocktree(&genesis_block, &blocktree, None, true, None).unwrap(); + process_blocktree(&genesis_block, &blocktree, None, opts).unwrap(); assert_eq!(bank_forks_info.len(), 1); assert_eq!( @@ -575,8 +621,12 @@ pub mod tests { blocktree.set_roots(&[0, 1, 4]).unwrap(); + let opts = ProcessOptions { + verify_ledger: true, + ..ProcessOptions::default() + }; let (bank_forks, bank_forks_info, _) = - process_blocktree(&genesis_block, &blocktree, None, true, None).unwrap(); + process_blocktree(&genesis_block, &blocktree, None, opts).unwrap(); assert_eq!(bank_forks_info.len(), 1); // One fork, other one is ignored b/c not a descendant of the root @@ -645,8 +695,12 @@ pub mod tests { blocktree.set_roots(&[0, 1]).unwrap(); + let opts = ProcessOptions { + verify_ledger: true, + ..ProcessOptions::default() + }; let (bank_forks, bank_forks_info, _) = - process_blocktree(&genesis_block, &blocktree, None, true, None).unwrap(); + process_blocktree(&genesis_block, &blocktree, None, opts).unwrap(); assert_eq!(bank_forks_info.len(), 2); // There are two forks assert_eq!( @@ -721,8 +775,12 @@ pub mod tests { blocktree.set_roots(&[last_slot + 1]).unwrap(); // Check that we can properly restart the ledger / leader scheduler doesn't fail + let opts = ProcessOptions { + verify_ledger: true, + ..ProcessOptions::default() + }; let (bank_forks, bank_forks_info, _) = - process_blocktree(&genesis_block, &blocktree, None, true, None).unwrap(); + process_blocktree(&genesis_block, &blocktree, None, opts).unwrap(); assert_eq!(bank_forks_info.len(), 1); // There is one fork assert_eq!( @@ -783,7 +841,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_block(2); - let bank = Bank::new(&genesis_block); + let bank = Arc::new(Bank::new(&genesis_block)); let keypair = Keypair::new(); let slot_entries = create_ticks(genesis_block.ticks_per_slot - 1, genesis_block.hash()); let tx = system_transaction::create_user_account( @@ -849,7 +907,7 @@ pub mod tests { // Fill up the rest of slot 1 with ticks entries.extend(create_ticks(genesis_block.ticks_per_slot, last_entry_hash)); - + let last_blockhash = entries.last().unwrap().hash; let blocktree = Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); blocktree @@ -861,11 +919,15 @@ pub mod tests { None, true, &Arc::new(Keypair::new()), - &entries, + entries, ) .unwrap(); + let opts = ProcessOptions { + verify_ledger: true, + ..ProcessOptions::default() + }; let (bank_forks, bank_forks_info, _) = - process_blocktree(&genesis_block, &blocktree, None, true, None).unwrap(); + process_blocktree(&genesis_block, &blocktree, None, opts).unwrap(); assert_eq!(bank_forks_info.len(), 1); assert_eq!(bank_forks.root(), 0); @@ -877,7 +939,7 @@ pub mod tests { mint - deducted_from_mint ); assert_eq!(bank.tick_height(), 2 * genesis_block.ticks_per_slot - 1); - assert_eq!(bank.last_blockhash(), entries.last().unwrap().hash); + assert_eq!(bank.last_blockhash(), last_blockhash); } #[test] @@ -889,8 +951,12 @@ pub mod tests { let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block); let blocktree = Blocktree::open(&ledger_path).unwrap(); + let opts = ProcessOptions { + verify_ledger: true, + ..ProcessOptions::default() + }; let (bank_forks, bank_forks_info, _) = - process_blocktree(&genesis_block, &blocktree, None, true, None).unwrap(); + process_blocktree(&genesis_block, &blocktree, None, opts).unwrap(); assert_eq!(bank_forks_info.len(), 1); assert_eq!(bank_forks_info[0], BankForksInfo { bank_slot: 0 }); @@ -898,10 +964,106 @@ pub mod tests { assert_eq!(bank.tick_height(), 0); } + #[test] + fn test_process_ledger_options_override_threads() { + let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(123); + let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block); + + let blocktree = Blocktree::open(&ledger_path).unwrap(); + let opts = ProcessOptions { + override_num_threads: Some(1), + ..ProcessOptions::default() + }; + process_blocktree(&genesis_block, &blocktree, None, opts).unwrap(); + PAR_THREAD_POOL.with(|pool| { + assert_eq!(pool.borrow().current_num_threads(), 1); + }); + } + + #[test] + fn test_process_ledger_options_full_leader_cache() { + let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(123); + let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block); + + let blocktree = Blocktree::open(&ledger_path).unwrap(); + let opts = ProcessOptions { + full_leader_cache: true, + ..ProcessOptions::default() + }; + let (_bank_forks, _bank_forks_info, cached_leader_schedule) = + process_blocktree(&genesis_block, &blocktree, None, opts).unwrap(); + assert_eq!(cached_leader_schedule.max_schedules(), std::usize::MAX); + } + + #[test] + fn test_process_ledger_options_entry_callback() { + let GenesisBlockInfo { + genesis_block, + mint_keypair, + .. + } = create_genesis_block(100); + let (ledger_path, last_entry_hash) = create_new_tmp_ledger!(&genesis_block); + let blocktree = + Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); + let blockhash = genesis_block.hash(); + let keypairs = [Keypair::new(), Keypair::new(), Keypair::new()]; + + let tx = system_transaction::create_user_account( + &mint_keypair, + &keypairs[0].pubkey(), + 1, + blockhash, + ); + let entry_1 = next_entry(&last_entry_hash, 1, vec![tx]); + + let tx = system_transaction::create_user_account( + &mint_keypair, + &keypairs[1].pubkey(), + 1, + blockhash, + ); + let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]); + + let mut entries = vec![entry_1, entry_2]; + entries.extend(create_ticks(genesis_block.ticks_per_slot, last_entry_hash)); + blocktree + .write_entries( + 1, + 0, + 0, + genesis_block.ticks_per_slot, + None, + true, + &Arc::new(Keypair::new()), + entries, + ) + .unwrap(); + + let callback_counter: Arc> = Arc::default(); + let entry_callback = { + let counter = callback_counter.clone(); + let pubkeys: Vec = keypairs.iter().map(|k| k.pubkey()).collect(); + Arc::new(move |bank: &Bank| { + let mut counter = counter.write().unwrap(); + assert_eq!(bank.get_balance(&pubkeys[*counter]), 1); + assert_eq!(bank.get_balance(&pubkeys[*counter + 1]), 0); + *counter += 1; + }) + }; + + let opts = ProcessOptions { + override_num_threads: Some(1), + entry_callback: Some(entry_callback), + ..ProcessOptions::default() + }; + process_blocktree(&genesis_block, &blocktree, None, opts).unwrap(); + assert_eq!(*callback_counter.write().unwrap(), 2); + } + #[test] fn test_process_entries_tick() { let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(1000); - let bank = Bank::new(&genesis_block); + let bank = Arc::new(Bank::new(&genesis_block)); // ensure bank can process a tick assert_eq!(bank.tick_height(), 0); @@ -917,7 +1079,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_block(1000); - let bank = Bank::new(&genesis_block); + let bank = Arc::new(Bank::new(&genesis_block)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); @@ -951,7 +1113,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_block(1000); - let bank = Bank::new(&genesis_block); + let bank = Arc::new(Bank::new(&genesis_block)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -1008,7 +1170,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_block(1000); - let bank = Bank::new(&genesis_block); + let bank = Arc::new(Bank::new(&genesis_block)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -1061,7 +1223,7 @@ pub mod tests { assert!(process_entries( &bank, &[entry_1_to_mint.clone(), entry_2_to_3_mint_to_1.clone()], - false + false, ) .is_err()); @@ -1093,7 +1255,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_block(1000); - let bank = Bank::new(&genesis_block); + let bank = Arc::new(Bank::new(&genesis_block)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -1171,7 +1333,7 @@ pub mod tests { entry_2_to_3_and_1_to_mint.clone(), entry_conflict_itself.clone() ], - false + false, ) .is_err()); @@ -1188,7 +1350,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_block(1000); - let bank = Bank::new(&genesis_block); + let bank = Arc::new(Bank::new(&genesis_block)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -1239,7 +1401,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_block(1_000_000_000); - let bank = Bank::new(&genesis_block); + let bank = Arc::new(Bank::new(&genesis_block)); const NUM_TRANSFERS_PER_ENTRY: usize = 8; const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32; @@ -1299,7 +1461,7 @@ pub mod tests { .. } = create_genesis_block((num_accounts + 1) as u64 * initial_lamports); - let bank = Bank::new(&genesis_block); + let bank = Arc::new(Bank::new(&genesis_block)); let mut keypairs: Vec = vec![]; @@ -1366,7 +1528,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_block(1000); - let bank = Bank::new(&genesis_block); + let bank = Arc::new(Bank::new(&genesis_block)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -1438,7 +1600,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_block(11_000); - let bank = Bank::new(&genesis_block); + let bank = Arc::new(Bank::new(&genesis_block)); let pubkey = Pubkey::new_rand(); bank.transfer(1_000, &mint_keypair, &pubkey).unwrap(); assert_eq!(bank.transaction_count(), 1); @@ -1480,7 +1642,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_block(11_000); - let bank = Bank::new(&genesis_block); + let bank = Arc::new(Bank::new(&genesis_block)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let success_tx = system_transaction::create_user_account( @@ -1552,15 +1714,19 @@ pub mod tests { // Set up bank1 let bank0 = Arc::new(Bank::new(&genesis_block)); - process_bank_0(&bank0, &blocktree, true).unwrap(); + let opts = ProcessOptions { + verify_ledger: true, + ..ProcessOptions::default() + }; + process_bank_0(&bank0, &blocktree, &opts).unwrap(); let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1)); bank1.squash(); let slot1_entries = blocktree.get_slot_entries(1, 0, None).unwrap(); - verify_and_process_entries(&bank1, &slot1_entries, true, bank0.last_blockhash()).unwrap(); + verify_and_process_entries(&bank1, &slot1_entries, bank0.last_blockhash(), &opts).unwrap(); // Test process_blocktree_from_root() from slot 1 onwards let (bank_forks, bank_forks_info, _) = - process_blocktree_from_root(&blocktree, bank1, true, None).unwrap(); + process_blocktree_from_root(&blocktree, bank1, &opts).unwrap(); assert_eq!(bank_forks_info.len(), 1); // One fork assert_eq!( @@ -1596,7 +1762,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_block(1_000_000_000); - let mut bank = Bank::new(&genesis_block); + let mut bank = Arc::new(Bank::new(&genesis_block)); const NUM_TRANSFERS_PER_ENTRY: usize = 8; const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32; @@ -1676,19 +1842,17 @@ pub mod tests { ) .expect("process ticks failed"); - let parent = Arc::new(bank); - if i % 16 == 0 { root.map(|old_root| old_root.squash()); - root = Some(parent.clone()); + root = Some(bank.clone()); } i += 1; - bank = Bank::new_from_parent( - &parent, + bank = Arc::new(Bank::new_from_parent( + &bank, &Pubkey::default(), - parent.slot() + thread_rng().gen_range(1, 3), - ); + bank.slot() + thread_rng().gen_range(1, 3), + )); } } diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index f4196ead3574f2..c3a3d5721c4ec1 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -8,7 +8,7 @@ use crate::poh_recorder::WorkingBankEntry; use crate::result::{Error, Result}; use crate::service::Service; use crate::staking_utils; -use solana_metrics::{datapoint_info, inc_new_counter_error, inc_new_counter_info}; +use solana_metrics::{inc_new_counter_error, inc_new_counter_info}; use std::net::UdpSocket; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::{Receiver, RecvTimeoutError}; @@ -45,14 +45,17 @@ impl BroadcastStageType { blocktree: &Arc, ) -> BroadcastStage { match self { - BroadcastStageType::Standard => BroadcastStage::new( - sock, - cluster_info, - receiver, - exit_sender, - blocktree, - StandardBroadcastRun::new(), - ), + BroadcastStageType::Standard => { + let keypair = cluster_info.read().unwrap().keypair.clone(); + BroadcastStage::new( + sock, + cluster_info, + receiver, + exit_sender, + blocktree, + StandardBroadcastRun::new(keypair), + ) + } BroadcastStageType::FailEntryVerification => BroadcastStage::new( sock, @@ -235,6 +238,7 @@ mod test { let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(10_000); let bank = Arc::new(Bank::new(&genesis_block)); + let leader_keypair = cluster_info.read().unwrap().keypair.clone(); // Start up the broadcast stage let broadcast_service = BroadcastStage::new( leader_info.sockets.broadcast, @@ -242,7 +246,7 @@ mod test { entry_receiver, &exit_sender, &blocktree, - StandardBroadcastRun::new(), + StandardBroadcastRun::new(leader_keypair), ); MockBroadcastStage { diff --git a/core/src/broadcast_stage/broadcast_fake_blobs_run.rs b/core/src/broadcast_stage/broadcast_fake_blobs_run.rs index 3cefb53112df4c..0b4d865fe144cd 100644 --- a/core/src/broadcast_stage/broadcast_fake_blobs_run.rs +++ b/core/src/broadcast_stage/broadcast_fake_blobs_run.rs @@ -1,5 +1,6 @@ use super::*; use crate::entry::Entry; +use crate::shred::{Shredder, RECOMMENDED_FEC_RATE}; use solana_sdk::hash::Hash; pub(super) struct BroadcastFakeBlobsRun { @@ -30,22 +31,26 @@ impl BroadcastRun for BroadcastFakeBlobsRun { let last_tick = receive_results.last_tick; let keypair = &cluster_info.read().unwrap().keypair.clone(); - let latest_blob_index = blocktree + let next_shred_index = blocktree .meta(bank.slot()) .expect("Database error") .map(|meta| meta.consumed) - .unwrap_or(0); + .unwrap_or(0) as u32; let num_entries = receive_results.entries.len(); - let (shred_bufs, _) = broadcast_utils::entries_to_shreds( - receive_results.entries, + + let shredder = Shredder::new( bank.slot(), - receive_results.last_tick, - bank.max_tick_height(), - keypair, - latest_blob_index, bank.parent().unwrap().slot(), - None, + RECOMMENDED_FEC_RATE, + keypair.clone(), + ) + .expect("Expected to create a new shredder"); + + let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds( + &receive_results.entries, + last_tick == bank.max_tick_height(), + next_shred_index, ); // If the last blockhash is default, a new block is being created @@ -58,15 +63,10 @@ impl BroadcastRun for BroadcastFakeBlobsRun { .map(|_| Entry::new(&self.last_blockhash, 0, vec![])) .collect(); - let (fake_shred_bufs, _) = broadcast_utils::entries_to_shreds( - fake_entries, - receive_results.last_tick, - bank.slot(), - bank.max_tick_height(), - keypair, - latest_blob_index, - bank.parent().unwrap().slot(), - None, + let (fake_data_shreds, fake_coding_shreds, _) = shredder.entries_to_shreds( + &fake_entries, + last_tick == bank.max_tick_height(), + next_shred_index, ); // If it's the last tick, reset the last block hash to default @@ -75,19 +75,27 @@ impl BroadcastRun for BroadcastFakeBlobsRun { self.last_blockhash = Hash::default(); } - blocktree.insert_shreds(shred_bufs.clone(), None)?; + blocktree.insert_shreds(data_shreds.clone(), None)?; + blocktree.insert_shreds(coding_shreds.clone(), None)?; + // 3) Start broadcast step let peers = cluster_info.read().unwrap().tvu_peers(); peers.iter().enumerate().for_each(|(i, peer)| { if i <= self.partition { // Send fake blobs to the first N peers - fake_shred_bufs.iter().for_each(|b| { - sock.send_to(&b.payload, &peer.tvu_forwards).unwrap(); - }); + fake_data_shreds + .iter() + .chain(fake_coding_shreds.iter()) + .for_each(|b| { + sock.send_to(&b.payload, &peer.tvu_forwards).unwrap(); + }); } else { - shred_bufs.iter().for_each(|b| { - sock.send_to(&b.payload, &peer.tvu_forwards).unwrap(); - }); + data_shreds + .iter() + .chain(coding_shreds.iter()) + .for_each(|b| { + sock.send_to(&b.payload, &peer.tvu_forwards).unwrap(); + }); } }); diff --git a/core/src/broadcast_stage/broadcast_utils.rs b/core/src/broadcast_stage/broadcast_utils.rs index e4876798db269a..26d0f038b42087 100644 --- a/core/src/broadcast_stage/broadcast_utils.rs +++ b/core/src/broadcast_stage/broadcast_utils.rs @@ -1,9 +1,7 @@ use crate::entry::Entry; use crate::poh_recorder::WorkingBankEntry; use crate::result::Result; -use crate::shred::{Shred, Shredder, RECOMMENDED_FEC_RATE}; use solana_runtime::bank::Bank; -use solana_sdk::signature::Keypair; use std::sync::mpsc::Receiver; use std::sync::Arc; use std::time::{Duration, Instant}; @@ -17,7 +15,7 @@ pub(super) struct ReceiveResults { #[derive(Copy, Clone)] pub struct UnfinishedSlotInfo { - pub next_index: u64, + pub next_shred_index: u32, pub slot: u64, pub parent: u64, } @@ -29,8 +27,8 @@ const RECEIVE_ENTRY_COUNT_THRESHOLD: usize = 8; pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result { let timer = Duration::new(1, 0); - let (mut bank, (entry, mut last_tick)) = receiver.recv_timeout(timer)?; let recv_start = Instant::now(); + let (mut bank, (entry, mut last_tick)) = receiver.recv_timeout(timer)?; let mut entries = vec![entry]; let mut slot = bank.slot(); @@ -43,6 +41,7 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result // If the bank changed, that implies the previous slot was interrupted and we do not have to // broadcast its entries. if try_bank.slot() != slot { + warn!("Broadcast for slot: {} interrupted", bank.slot()); entries.clear(); bank = try_bank; slot = bank.slot(); @@ -71,66 +70,6 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result }) } -pub(super) fn entries_to_shreds( - entries: Vec, - last_tick: u64, - slot: u64, - bank_max_tick: u64, - keypair: &Arc, - latest_shred_index: u64, - parent_slot: u64, - last_unfinished_slot: Option, -) -> (Vec, Option) { - let mut shreds = if let Some(unfinished_slot) = last_unfinished_slot { - if unfinished_slot.slot != slot { - let mut shredder = Shredder::new( - unfinished_slot.slot, - unfinished_slot.parent, - RECOMMENDED_FEC_RATE, - keypair, - unfinished_slot.next_index as u32, - ) - .expect("Expected to create a new shredder"); - shredder.finalize_slot(); - shredder.shreds.drain(..).collect() - } else { - vec![] - } - } else { - vec![] - }; - - let mut shredder = Shredder::new( - slot, - parent_slot, - RECOMMENDED_FEC_RATE, - keypair, - latest_shred_index as u32, - ) - .expect("Expected to create a new shredder"); - - bincode::serialize_into(&mut shredder, &entries) - .expect("Expect to write all entries to shreds"); - - let unfinished_slot = if last_tick == bank_max_tick { - shredder.finalize_slot(); - None - } else { - shredder.finalize_data(); - Some(UnfinishedSlotInfo { - next_index: u64::from(shredder.index), - slot, - parent: parent_slot, - }) - }; - - shreds.append(&mut shredder.shreds); - - trace!("Inserting {:?} shreds in blocktree", shreds.len()); - - (shreds, unfinished_slot) -} - #[cfg(test)] mod tests { use super::*; diff --git a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index 6cf88319a7217b..6c1b37e71d6d76 100644 --- a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -1,4 +1,5 @@ use super::*; +use crate::shred::{Shredder, RECOMMENDED_FEC_RATE}; use solana_sdk::hash::Hash; pub(super) struct FailEntryVerificationBroadcastRun {} @@ -29,38 +30,52 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { last_entry.hash = Hash::default(); } - let keypair = &cluster_info.read().unwrap().keypair.clone(); - let latest_blob_index = blocktree + let keypair = cluster_info.read().unwrap().keypair.clone(); + let next_shred_index = blocktree .meta(bank.slot()) .expect("Database error") .map(|meta| meta.consumed) - .unwrap_or(0); + .unwrap_or(0) as u32; - let (shred_infos, _) = broadcast_utils::entries_to_shreds( - receive_results.entries, - last_tick, + let shredder = Shredder::new( bank.slot(), - bank.max_tick_height(), - keypair, - latest_blob_index, bank.parent().unwrap().slot(), - None, - ); + RECOMMENDED_FEC_RATE, + keypair.clone(), + ) + .expect("Expected to create a new shredder"); - let seeds: Vec<[u8; 32]> = shred_infos.iter().map(|s| s.seed()).collect(); + let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds( + &receive_results.entries, + last_tick == bank.max_tick_height(), + next_shred_index, + ); - blocktree.insert_shreds(shred_infos.clone(), None)?; + let all_shreds = data_shreds + .iter() + .cloned() + .chain(coding_shreds.iter().cloned()) + .collect::>(); + let all_seeds: Vec<[u8; 32]> = all_shreds.iter().map(|s| s.seed()).collect(); + blocktree + .insert_shreds(all_shreds, None) + .expect("Failed to insert shreds in blocktree"); // 3) Start broadcast step - let bank_epoch = bank.get_stakers_epoch(bank.slot()); + let bank_epoch = bank.get_leader_schedule_epoch(bank.slot()); let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch); - let shred_bufs: Vec> = shred_infos.into_iter().map(|s| s.payload).collect(); - // Broadcast data + erasures + let all_shred_bufs: Vec> = data_shreds + .into_iter() + .chain(coding_shreds.into_iter()) + .map(|s| s.payload) + .collect(); + + // Broadcast data cluster_info.read().unwrap().broadcast_shreds( sock, - &shred_bufs, - &seeds, + all_shred_bufs, + &all_seeds, stakes.as_ref(), )?; diff --git a/core/src/broadcast_stage/standard_broadcast_run.rs b/core/src/broadcast_stage/standard_broadcast_run.rs index 818f34c078a112..fd0310fe6d684e 100644 --- a/core/src/broadcast_stage/standard_broadcast_run.rs +++ b/core/src/broadcast_stage/standard_broadcast_run.rs @@ -1,154 +1,432 @@ -use super::broadcast_utils; +use super::broadcast_utils::{self, ReceiveResults}; use super::*; -use crate::broadcast_stage::broadcast_utils::{entries_to_shreds, UnfinishedSlotInfo}; -use solana_sdk::timing::duration_as_ms; +use crate::broadcast_stage::broadcast_utils::UnfinishedSlotInfo; +use crate::entry::Entry; +use crate::shred::{Shred, Shredder, RECOMMENDED_FEC_RATE}; +use solana_sdk::signature::Keypair; +use solana_sdk::timing::duration_as_us; +use std::time::Duration; #[derive(Default)] struct BroadcastStats { - num_entries: Vec, - run_elapsed: Vec, - to_blobs_elapsed: Vec, + // Per-slot elapsed time + shredding_elapsed: u64, + insert_shreds_elapsed: u64, + broadcast_elapsed: u64, + receive_elapsed: u64, + clone_and_seed_elapsed: u64, +} + +impl BroadcastStats { + fn reset(&mut self) { + self.insert_shreds_elapsed = 0; + self.shredding_elapsed = 0; + self.broadcast_elapsed = 0; + self.receive_elapsed = 0; + self.clone_and_seed_elapsed = 0; + } } pub(super) struct StandardBroadcastRun { stats: BroadcastStats, unfinished_slot: Option, + current_slot_and_parent: Option<(u64, u64)>, + slot_broadcast_start: Option, + keypair: Arc, } impl StandardBroadcastRun { - pub(super) fn new() -> Self { + pub(super) fn new(keypair: Arc) -> Self { Self { stats: BroadcastStats::default(), unfinished_slot: None, + current_slot_and_parent: None, + slot_broadcast_start: None, + keypair, } } - fn update_broadcast_stats( - &mut self, - receive_entries_elapsed: u64, - shredding_elapsed: u64, - insert_shreds_elapsed: u64, - broadcast_elapsed: u64, - run_elapsed: u64, - num_entries: usize, - num_shreds: usize, - blob_index: u64, - ) { - inc_new_counter_info!("broadcast_service-time_ms", broadcast_elapsed as usize); - - self.stats.num_entries.push(num_entries); - self.stats.to_blobs_elapsed.push(shredding_elapsed); - self.stats.run_elapsed.push(run_elapsed); - if self.stats.num_entries.len() >= 16 { - info!( - "broadcast: entries: {:?} blob times ms: {:?} broadcast times ms: {:?}", - self.stats.num_entries, self.stats.to_blobs_elapsed, self.stats.run_elapsed - ); - self.stats.num_entries.clear(); - self.stats.to_blobs_elapsed.clear(); - self.stats.run_elapsed.clear(); + fn check_for_interrupted_slot(&mut self) -> Option { + let (slot, _) = self.current_slot_and_parent.unwrap(); + let last_unfinished_slot_shred = self + .unfinished_slot + .map(|last_unfinished_slot| { + if last_unfinished_slot.slot != slot { + self.report_and_reset_stats(); + Some(Shred::new_from_data( + last_unfinished_slot.slot, + last_unfinished_slot.next_shred_index, + (last_unfinished_slot.slot - last_unfinished_slot.parent) as u16, + None, + true, + true, + )) + } else { + None + } + }) + .unwrap_or(None); + + // This shred should only be Some if the previous slot was interrupted + if last_unfinished_slot_shred.is_some() { + self.unfinished_slot = None; } - datapoint_info!( - "broadcast-service", - ("num_entries", num_entries as i64, i64), - ("num_shreds", num_shreds as i64, i64), - ("receive_time", receive_entries_elapsed as i64, i64), - ("shredding_time", shredding_elapsed as i64, i64), - ("insert_shred_time", insert_shreds_elapsed as i64, i64), - ("broadcast_time", broadcast_elapsed as i64, i64), - ("transmit-index", blob_index as i64, i64), - ); + last_unfinished_slot_shred } -} -impl BroadcastRun for StandardBroadcastRun { - fn run( + fn coalesce_shreds( + data_shreds: Vec, + coding_shreds: Vec, + last_unfinished_slot_shred: Option, + ) -> Vec { + if let Some(shred) = last_unfinished_slot_shred { + data_shreds + .iter() + .chain(coding_shreds.iter()) + .cloned() + .chain(std::iter::once(shred)) + .collect::>() + } else { + data_shreds + .iter() + .chain(coding_shreds.iter()) + .cloned() + .collect::>() + } + } + + fn entries_to_shreds( + &mut self, + blocktree: &Blocktree, + entries: &[Entry], + is_slot_end: bool, + ) -> (Vec, Vec) { + let (slot, parent_slot) = self.current_slot_and_parent.unwrap(); + let shredder = Shredder::new( + slot, + parent_slot, + RECOMMENDED_FEC_RATE, + self.keypair.clone(), + ) + .expect("Expected to create a new shredder"); + + let next_shred_index = self + .unfinished_slot + .map(|s| s.next_shred_index) + .unwrap_or_else(|| { + blocktree + .meta(slot) + .expect("Database error") + .map(|meta| meta.consumed) + .unwrap_or(0) as u32 + }); + + let (data_shreds, coding_shreds, new_next_shred_index) = + shredder.entries_to_shreds(entries, is_slot_end, next_shred_index); + + self.unfinished_slot = Some(UnfinishedSlotInfo { + next_shred_index: new_next_shred_index, + slot, + parent: parent_slot, + }); + + (data_shreds, coding_shreds) + } + + fn process_receive_results( &mut self, cluster_info: &Arc>, - receiver: &Receiver, sock: &UdpSocket, blocktree: &Arc, + receive_results: ReceiveResults, ) -> Result<()> { - // 1) Pull entries from banking stage - let receive_results = broadcast_utils::recv_slot_entries(receiver)?; - let receive_elapsed = receive_results.time_elapsed; + let mut receive_elapsed = receive_results.time_elapsed; let num_entries = receive_results.entries.len(); let bank = receive_results.bank.clone(); let last_tick = receive_results.last_tick; inc_new_counter_info!("broadcast_service-entries_received", num_entries); - // 2) Convert entries to blobs + generate coding blobs - let keypair = &cluster_info.read().unwrap().keypair.clone(); - let latest_shred_index = blocktree - .meta(bank.slot()) - .expect("Database error") - .map(|meta| meta.consumed) - .unwrap_or(0); + if self.current_slot_and_parent.is_none() + || bank.slot() != self.current_slot_and_parent.unwrap().0 + { + self.slot_broadcast_start = Some(Instant::now()); + let slot = bank.slot(); + let parent_slot = { + if let Some(parent_bank) = bank.parent() { + parent_bank.slot() + } else { + 0 + } + }; - let parent_slot = if let Some(parent_bank) = bank.parent() { - parent_bank.slot() - } else { - 0 - }; + self.current_slot_and_parent = Some((slot, parent_slot)); + receive_elapsed = Duration::new(0, 0); + } let to_shreds_start = Instant::now(); - let (shred_infos, uninished_slot) = entries_to_shreds( - receive_results.entries, - last_tick, - bank.slot(), - bank.max_tick_height(), - keypair, - latest_shred_index, - parent_slot, - self.unfinished_slot, + + // 1) Check if slot was interrupted + let last_unfinished_slot_shred = self.check_for_interrupted_slot(); + + // 2) Convert entries to shreds and coding shreds + let (data_shreds, coding_shreds) = self.entries_to_shreds( + blocktree, + &receive_results.entries, + last_tick == bank.max_tick_height(), ); let to_shreds_elapsed = to_shreds_start.elapsed(); - self.unfinished_slot = uninished_slot; - let all_seeds: Vec<[u8; 32]> = shred_infos.iter().map(|s| s.seed()).collect(); - let num_shreds = shred_infos.len(); + let clone_and_seed_start = Instant::now(); + let all_shreds = + Self::coalesce_shreds(data_shreds, coding_shreds, last_unfinished_slot_shred); + let all_shreds_ = all_shreds.clone(); + let all_seeds: Vec<[u8; 32]> = all_shreds.iter().map(|s| s.seed()).collect(); + let clone_and_seed_elapsed = clone_and_seed_start.elapsed(); + + // 3) Insert shreds into blocktree let insert_shreds_start = Instant::now(); blocktree - .insert_shreds(shred_infos.clone(), None) + .insert_shreds(all_shreds_, None) .expect("Failed to insert shreds in blocktree"); let insert_shreds_elapsed = insert_shreds_start.elapsed(); - // 3) Start broadcast step + // 4) Broadcast the shreds let broadcast_start = Instant::now(); - let bank_epoch = bank.get_stakers_epoch(bank.slot()); + let bank_epoch = bank.get_leader_schedule_epoch(bank.slot()); let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch); - let all_shred_bufs: Vec> = shred_infos.into_iter().map(|s| s.payload).collect(); + let all_shred_bufs: Vec> = all_shreds.into_iter().map(|s| s.payload).collect(); trace!("Broadcasting {:?} shreds", all_shred_bufs.len()); + cluster_info.read().unwrap().broadcast_shreds( sock, - &all_shred_bufs, + all_shred_bufs, &all_seeds, stakes.as_ref(), )?; let broadcast_elapsed = broadcast_start.elapsed(); - let latest_shred_index = uninished_slot.map(|s| s.next_index).unwrap_or_else(|| { - blocktree - .meta(bank.slot()) - .expect("Database error") - .map(|meta| meta.consumed) - .unwrap_or(0) - }); + self.update_broadcast_stats( - duration_as_ms(&receive_elapsed), - duration_as_ms(&to_shreds_elapsed), - duration_as_ms(&insert_shreds_elapsed), - duration_as_ms(&broadcast_elapsed), - duration_as_ms( - &(receive_elapsed + to_shreds_elapsed + insert_shreds_elapsed + broadcast_elapsed), - ), - num_entries, - num_shreds, - latest_shred_index, + duration_as_us(&receive_elapsed), + duration_as_us(&to_shreds_elapsed), + duration_as_us(&insert_shreds_elapsed), + duration_as_us(&broadcast_elapsed), + duration_as_us(&clone_and_seed_elapsed), + last_tick == bank.max_tick_height(), ); + if last_tick == bank.max_tick_height() { + self.unfinished_slot = None; + } + Ok(()) } + + #[allow(clippy::too_many_arguments)] + fn update_broadcast_stats( + &mut self, + receive_entries_elapsed: u64, + shredding_elapsed: u64, + insert_shreds_elapsed: u64, + broadcast_elapsed: u64, + clone_and_seed_elapsed: u64, + slot_ended: bool, + ) { + self.stats.receive_elapsed += receive_entries_elapsed; + self.stats.shredding_elapsed += shredding_elapsed; + self.stats.insert_shreds_elapsed += insert_shreds_elapsed; + self.stats.broadcast_elapsed += broadcast_elapsed; + self.stats.clone_and_seed_elapsed += clone_and_seed_elapsed; + + if slot_ended { + self.report_and_reset_stats() + } + } + + fn report_and_reset_stats(&mut self) { + assert!(self.unfinished_slot.is_some()); + datapoint_info!( + "broadcast-bank-stats", + ("slot", self.unfinished_slot.unwrap().slot as i64, i64), + ("shredding_time", self.stats.shredding_elapsed as i64, i64), + ( + "insertion_time", + self.stats.insert_shreds_elapsed as i64, + i64 + ), + ("broadcast_time", self.stats.broadcast_elapsed as i64, i64), + ("receive_time", self.stats.receive_elapsed as i64, i64), + ( + "clone_and_seed", + self.stats.clone_and_seed_elapsed as i64, + i64 + ), + ( + "num_shreds", + i64::from(self.unfinished_slot.unwrap().next_shred_index), + i64 + ), + ( + "slot_broadcast_time", + self.slot_broadcast_start.unwrap().elapsed().as_millis() as i64, + i64 + ), + ); + self.stats.reset(); + } +} + +impl BroadcastRun for StandardBroadcastRun { + fn run( + &mut self, + cluster_info: &Arc>, + receiver: &Receiver, + sock: &UdpSocket, + blocktree: &Arc, + ) -> Result<()> { + let receive_results = broadcast_utils::recv_slot_entries(receiver)?; + self.process_receive_results(cluster_info, sock, blocktree, receive_results) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::blocktree::{get_tmp_ledger_path, Blocktree}; + use crate::cluster_info::{ClusterInfo, Node}; + use crate::entry::create_ticks; + use crate::genesis_utils::create_genesis_block; + use crate::shred::max_ticks_per_n_shreds; + use solana_runtime::bank::Bank; + use solana_sdk::genesis_block::GenesisBlock; + use solana_sdk::signature::{Keypair, KeypairUtil}; + use std::sync::{Arc, RwLock}; + use std::time::Duration; + + fn setup( + num_shreds_per_slot: u64, + ) -> ( + Arc, + GenesisBlock, + Arc>, + Arc, + Arc, + UdpSocket, + ) { + // Setup + let ledger_path = get_tmp_ledger_path!(); + let blocktree = Arc::new( + Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), + ); + let leader_keypair = Arc::new(Keypair::new()); + let leader_pubkey = leader_keypair.pubkey(); + let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey); + let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair( + leader_info.info.clone(), + ))); + let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); + let mut genesis_block = create_genesis_block(10_000).genesis_block; + genesis_block.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot) + 1; + let bank0 = Arc::new(Bank::new(&genesis_block)); + ( + blocktree, + genesis_block, + cluster_info, + bank0, + leader_keypair, + socket, + ) + } + + #[test] + fn test_slot_interrupt() { + // Setup + let num_shreds_per_slot = 2; + let (blocktree, genesis_block, cluster_info, bank0, leader_keypair, socket) = + setup(num_shreds_per_slot); + + // Insert 1 less than the number of ticks needed to finish the slot + let ticks = create_ticks(genesis_block.ticks_per_slot - 1, genesis_block.hash()); + let receive_results = ReceiveResults { + entries: ticks.clone(), + time_elapsed: Duration::new(3, 0), + bank: bank0.clone(), + last_tick: (ticks.len() - 1) as u64, + }; + + // Step 1: Make an incomplete transmission for slot 0 + let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair.clone()); + standard_broadcast_run + .process_receive_results(&cluster_info, &socket, &blocktree, receive_results) + .unwrap(); + let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap(); + assert_eq!(unfinished_slot.next_shred_index as u64, num_shreds_per_slot); + assert_eq!(unfinished_slot.slot, 0); + assert_eq!(unfinished_slot.parent, 0); + // Make sure the slot is not complete + assert!(!blocktree.is_full(0)); + // Modify the stats, should reset later + standard_broadcast_run.stats.receive_elapsed = 10; + + // Try to fetch ticks from blocktree, nothing should break + assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), ticks); + assert_eq!( + blocktree + .get_slot_entries(0, num_shreds_per_slot, None) + .unwrap(), + vec![], + ); + + // Step 2: Make a transmission for another bank that interrupts the transmission for + // slot 0 + let bank2 = Arc::new(Bank::new_from_parent(&bank0, &leader_keypair.pubkey(), 2)); + + // Interrupting the slot should cause the unfinished_slot and stats to reset + let num_shreds = 1; + assert!(num_shreds < num_shreds_per_slot); + let ticks = create_ticks(max_ticks_per_n_shreds(num_shreds), genesis_block.hash()); + let receive_results = ReceiveResults { + entries: ticks.clone(), + time_elapsed: Duration::new(2, 0), + bank: bank2.clone(), + last_tick: (ticks.len() - 1) as u64, + }; + standard_broadcast_run + .process_receive_results(&cluster_info, &socket, &blocktree, receive_results) + .unwrap(); + let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap(); + + // The shred index should have reset to 0, which makes it possible for the + // index < the previous shred index for slot 0 + assert_eq!(unfinished_slot.next_shred_index as u64, num_shreds); + assert_eq!(unfinished_slot.slot, 2); + assert_eq!(unfinished_slot.parent, 0); + // Check that the stats were reset as well + assert_eq!(standard_broadcast_run.stats.receive_elapsed, 0); + } + + #[test] + fn test_slot_finish() { + // Setup + let num_shreds_per_slot = 2; + let (blocktree, genesis_block, cluster_info, bank0, leader_keypair, socket) = + setup(num_shreds_per_slot); + + // Insert complete slot of ticks needed to finish the slot + let ticks = create_ticks(genesis_block.ticks_per_slot, genesis_block.hash()); + let receive_results = ReceiveResults { + entries: ticks.clone(), + time_elapsed: Duration::new(3, 0), + bank: bank0.clone(), + last_tick: (ticks.len() - 1) as u64, + }; + + let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair); + standard_broadcast_run + .process_receive_results(&cluster_info, &socket, &blocktree, receive_results) + .unwrap(); + assert!(standard_broadcast_run.unfinished_slot.is_none()) + } } diff --git a/core/src/chacha.rs b/core/src/chacha.rs index c668d641045eb8..c3147a74854e30 100644 --- a/core/src/chacha.rs +++ b/core/src/chacha.rs @@ -136,7 +136,7 @@ mod tests { None, true, &Arc::new(keypair), - &entries, + entries, ) .unwrap(); @@ -153,7 +153,7 @@ mod tests { hasher.hash(&buf[..size]); // golden needs to be updated if blob stuff changes.... - let golden: Hash = "CLGvEayebjdgnLdttFAweZE9rqVkehXqEStUifG9kiU9" + let golden: Hash = "CGL4L6Q2QwiZQDCMwzshqj3S9riroUQuDjx8bS7ra2PU" .parse() .unwrap(); diff --git a/core/src/chacha_cuda.rs b/core/src/chacha_cuda.rs index aeac34c4de1ecc..50c7a348baadbd 100644 --- a/core/src/chacha_cuda.rs +++ b/core/src/chacha_cuda.rs @@ -146,7 +146,7 @@ mod tests { Some(0), true, &Arc::new(Keypair::new()), - &entries, + entries, ) .unwrap(); @@ -193,10 +193,10 @@ mod tests { return; } - let entries = create_ticks(32, Hash::default()); let ledger_dir = "test_encrypt_file_many_keys_multiple"; let ledger_path = get_tmp_ledger_path(ledger_dir); - let ticks_per_slot = 16; + let ticks_per_slot = 90; + let entries = create_ticks(2 * ticks_per_slot, Hash::default()); let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); blocktree .write_entries( @@ -207,7 +207,7 @@ mod tests { Some(0), true, &Arc::new(Keypair::new()), - &entries, + entries, ) .unwrap(); diff --git a/core/src/cluster_info.rs b/core/src/cluster_info.rs index a1361a1f7cb11e..417e3e66af170e 100644 --- a/core/src/cluster_info.rs +++ b/core/src/cluster_info.rs @@ -21,7 +21,8 @@ use crate::crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS}; use crate::crds_value::{CrdsValue, CrdsValueLabel, EpochSlots, Vote}; use crate::packet::{to_shared_blob, Blob, Packet, SharedBlob}; use crate::repair_service::RepairType; -use crate::result::Result; +use crate::result::{Error, Result}; +use crate::sendmmsg::{multicast, send_mmsg}; use crate::staking_utils; use crate::streamer::{BlobReceiver, BlobSender}; use crate::weighted_shuffle::{weighted_best, weighted_shuffle}; @@ -31,7 +32,6 @@ use itertools::Itertools; use rand::SeedableRng; use rand::{thread_rng, Rng}; use rand_chacha::ChaChaRng; -use rayon::prelude::*; use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error}; use solana_netutil::{ bind_common, bind_common_in_range, bind_in_range, find_available_port_in_range, @@ -52,7 +52,7 @@ use std::sync::{Arc, RwLock}; use std::thread::{sleep, Builder, JoinHandle}; use std::time::{Duration, Instant}; -pub const FULLNODE_PORT_RANGE: PortRange = (8000, 10_000); +pub const VALIDATOR_PORT_RANGE: PortRange = (8000, 10_000); /// The Data plane fanout size, also used as the neighborhood size pub const DATA_PLANE_FANOUT: usize = 200; @@ -456,6 +456,7 @@ impl ClusterInfo { .filter_map(|x| x.value.contact_info()) .filter(|x| x.id != me) .filter(|x| ContactInfo::is_valid_address(&x.tvu)) + .filter(|x| ContactInfo::is_valid_address(&x.tvu_forwards)) .cloned() .collect() } @@ -482,48 +483,17 @@ impl ClusterInfo { && !ContactInfo::is_valid_address(&contact_info.tpu) } - fn stake_weighted_shuffle( - peers: &[ContactInfo], - stakes: Option<&HashMap>, - rng: ChaChaRng, - ) -> Vec<(u64, ContactInfo)> { - let (stake_weights, peers_with_stakes): (Vec<_>, Vec<_>) = peers - .iter() - .map(|c| { - let stake = stakes.map_or(0, |stakes| *stakes.get(&c.id).unwrap_or(&0)); - // For stake weighted shuffle a valid weight is atleast 1. Weight 0 is - // assumed to be missing entry. So let's make sure stake weights are atleast 1 - (cmp::max(1, stake), (stake, c.clone())) - }) - .sorted_by(|(_, (l_stake, l_info)), (_, (r_stake, r_info))| { - if r_stake == l_stake { - r_info.id.cmp(&l_info.id) - } else { - r_stake.cmp(&l_stake) - } - }) - .unzip(); - - let shuffle = weighted_shuffle(stake_weights, rng); - - let mut out: Vec<(u64, ContactInfo)> = shuffle - .iter() - .map(|x| peers_with_stakes[*x].clone()) - .collect(); - - out.dedup(); - out - } - - fn peers_and_stakes( + fn sorted_stakes_with_index( peers: &[ContactInfo], stakes: Option<&HashMap>, ) -> Vec<(u64, usize)> { - let mut stakes_and_index: Vec<_> = peers + let stakes_and_index: Vec<_> = peers .iter() .enumerate() .map(|(i, c)| { - let stake = stakes.map_or(0, |stakes| *stakes.get(&c.id).unwrap_or(&0)); + // For stake weighted shuffle a valid weight is atleast 1. Weight 0 is + // assumed to be missing entry. So let's make sure stake weights are atleast 1 + let stake = 1.max(stakes.map_or(1, |stakes| *stakes.get(&c.id).unwrap_or(&1))); (stake, i) }) .sorted_by(|(l_stake, l_info), (r_stake, r_info)| { @@ -535,36 +505,50 @@ impl ClusterInfo { }) .collect(); - // For stake weighted shuffle a valid weight is atleast 1. Weight 0 is - // assumed to be missing entry. So let's make sure stake weights are atleast 1 stakes_and_index - .iter_mut() - .for_each(|(stake, _)| *stake = cmp::max(1, *stake)); + } - stakes_and_index + fn stake_weighted_shuffle( + stakes_and_index: &[(u64, usize)], + rng: ChaChaRng, + ) -> Vec<(u64, usize)> { + let stake_weights = stakes_and_index.iter().map(|(w, _)| *w).collect(); + + let shuffle = weighted_shuffle(stake_weights, rng); + + shuffle.iter().map(|x| stakes_and_index[*x]).collect() } - /// Return sorted Retransmit peers and index of `Self.id()` as if it were in that list - pub fn shuffle_peers_and_index( + // Return sorted_retransmit_peers(including self) and their stakes + pub fn sorted_retransmit_peers_and_stakes( &self, - stakes: Option<&HashMap>, - rng: ChaChaRng, - ) -> (usize, Vec) { + stakes: Option<&HashMap>, + ) -> (Vec, Vec<(u64, usize)>) { let mut peers = self.retransmit_peers(); + // insert "self" into this list for the layer and neighborhood computation peers.push(self.lookup(&self.id()).unwrap().clone()); - let contacts_and_stakes: Vec<_> = ClusterInfo::stake_weighted_shuffle(&peers, stakes, rng); - let mut index = 0; - let peers: Vec<_> = contacts_and_stakes - .into_iter() + let stakes_and_index = ClusterInfo::sorted_stakes_with_index(&peers, stakes); + (peers, stakes_and_index) + } + + /// Return sorted Retransmit peers and index of `Self.id()` as if it were in that list + pub fn shuffle_peers_and_index( + id: &Pubkey, + peers: &[ContactInfo], + stakes_and_index: &[(u64, usize)], + rng: ChaChaRng, + ) -> (usize, Vec<(u64, usize)>) { + let shuffled_stakes_and_index = ClusterInfo::stake_weighted_shuffle(stakes_and_index, rng); + let mut self_index = 0; + shuffled_stakes_and_index + .iter() .enumerate() - .map(|(i, (_, peer))| { - if peer.id == self.id() { - index = i; + .for_each(|(i, (_stake, index))| { + if &peers[*index].id == id { + self_index = i; } - peer - }) - .collect(); - (index, peers) + }); + (self_index, shuffled_stakes_and_index) } /// compute broadcast table @@ -716,8 +700,8 @@ impl ClusterInfo { ) -> (Vec, Vec<(u64, usize)>) { let mut peers = self.tvu_peers(); peers.dedup(); - let peers_and_stakes = ClusterInfo::peers_and_stakes(&peers, stakes); - (peers, peers_and_stakes) + let stakes_and_index = ClusterInfo::sorted_stakes_with_index(&peers, stakes); + (peers, stakes_and_index) } /// broadcast messages from the leader to layer 1 nodes @@ -725,28 +709,37 @@ impl ClusterInfo { pub fn broadcast_shreds( &self, s: &UdpSocket, - shreds: &[Vec], + shreds: Vec>, seeds: &[[u8; 32]], stakes: Option<&HashMap>, ) -> Result<()> { - let mut last_err = Ok(()); let (peers, peers_and_stakes) = self.sorted_tvu_peers_and_stakes(stakes); let broadcast_len = peers_and_stakes.len(); if broadcast_len == 0 { - datapoint_info!("cluster_info-num_nodes", ("count", 1, i64)); + datapoint_debug!("cluster_info-num_nodes", ("count", 1, i64)); return Ok(()); } - shreds.iter().zip(seeds).for_each(|(shred, seed)| { - let broadcast_index = weighted_best(&peers_and_stakes, ChaChaRng::from_seed(*seed)); + let mut packets: Vec<_> = shreds + .into_iter() + .zip(seeds) + .map(|(shred, seed)| { + let broadcast_index = weighted_best(&peers_and_stakes, ChaChaRng::from_seed(*seed)); - if let Err(e) = s.send_to(shred, &peers[broadcast_index].tvu) { - trace!("{}: broadcast result {:?}", self.id(), e); - last_err = Err(e); + (shred, &peers[broadcast_index].tvu) + }) + .collect(); + + let mut sent = 0; + while sent < packets.len() { + match send_mmsg(s, &mut packets[sent..]) { + Ok(n) => sent += n, + Err(e) => { + return Err(Error::IO(e)); + } } - }); + } - last_err?; - datapoint_info!("cluster_info-num_nodes", ("count", broadcast_len + 1, i64)); + datapoint_debug!("cluster_info-num_nodes", ("count", broadcast_len + 1, i64)); Ok(()) } @@ -754,34 +747,33 @@ impl ClusterInfo { /// # Remarks /// We need to avoid having obj locked while doing a io, such as the `send_to` pub fn retransmit_to( - obj: &Arc>, - peers: &[ContactInfo], - packet: &Packet, + peers: &[&ContactInfo], + packet: &mut Packet, slot_leader_pubkey: Option, s: &UdpSocket, forwarded: bool, ) -> Result<()> { - let (me, orders): (ContactInfo, &[ContactInfo]) = { - // copy to avoid locking during IO - let s = obj.read().unwrap(); - (s.my_data().clone(), peers) - }; - trace!("retransmit orders {}", orders.len()); - let errs: Vec<_> = orders - .par_iter() + trace!("retransmit orders {}", peers.len()); + let dests: Vec<_> = peers + .iter() .filter(|v| v.id != slot_leader_pubkey.unwrap_or_default()) - .map(|v| { - let dest = if forwarded { &v.tvu_forwards } else { &v.tvu }; - debug!("{}: retransmit packet to {} {}", me.id, v.id, *dest,); - s.send_to(&packet.data, dest) - }) + .map(|v| if forwarded { &v.tvu_forwards } else { &v.tvu }) .collect(); - for e in errs { - if let Err(e) = &e { - inc_new_counter_error!("cluster_info-retransmit-send_to_error", 1, 1); - error!("retransmit result {:?}", e); + + let mut sent = 0; + while sent < dests.len() { + match multicast(s, &mut packet.data[..packet.meta.size], &dests[sent..]) { + Ok(n) => sent += n, + Err(e) => { + inc_new_counter_error!( + "cluster_info-retransmit-send_to_error", + dests.len() - sent, + 1 + ); + error!("retransmit result {:?}", e); + return Err(Error::IO(e)); + } } - e?; } Ok(()) } @@ -843,23 +835,47 @@ impl ClusterInfo { } // If the network entrypoint hasn't been discovered yet, add it to the crds table fn add_entrypoint(&mut self, pulls: &mut Vec<(Pubkey, CrdsFilter, SocketAddr, CrdsValue)>) { - match &self.entrypoint { - Some(entrypoint) => { + let pull_from_entrypoint = if let Some(entrypoint) = &mut self.entrypoint { + if pulls.is_empty() { + // Nobody else to pull from, try the entrypoint + true + } else { + let now = timestamp(); + // Only consider pulling from the entrypoint periodically to avoid spamming it + if timestamp() - entrypoint.wallclock <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2 { + false + } else { + entrypoint.wallclock = now; + let found_entrypoint = self.gossip.crds.table.iter().any(|(_, v)| { + v.value + .contact_info() + .map(|ci| ci.gossip == entrypoint.gossip) + .unwrap_or(false) + }); + !found_entrypoint + } + } + } else { + false + }; + + if pull_from_entrypoint { + if let Some(entrypoint) = &self.entrypoint { let self_info = self .gossip .crds .lookup(&CrdsValueLabel::ContactInfo(self.id())) .unwrap_or_else(|| panic!("self_id invalid {}", self.id())); - self.gossip + return self + .gossip .pull .build_crds_filters(&self.gossip.crds, Self::max_bloom_size()) .into_iter() .for_each(|filter| { pulls.push((entrypoint.id, filter, entrypoint.gossip, self_info.clone())) - }) + }); } - None => (), } } @@ -927,9 +943,7 @@ impl ClusterInfo { }) .flatten() .collect(); - if pulls.is_empty() { - self.add_entrypoint(&mut pulls); - } + self.add_entrypoint(&mut pulls); pulls .into_iter() .map(|(peer, filter, gossip, self_info)| { @@ -1478,7 +1492,7 @@ impl ClusterInfo { /// An alternative to Spy Node that has a valid gossip address and fully participate in Gossip. pub fn gossip_node(id: &Pubkey, gossip_addr: &SocketAddr) -> (ContactInfo, UdpSocket) { - let (port, (gossip_socket, _)) = Node::get_gossip_port(gossip_addr, FULLNODE_PORT_RANGE); + let (port, (gossip_socket, _)) = Node::get_gossip_port(gossip_addr, VALIDATOR_PORT_RANGE); let daddr = socketaddr_any!(); let node = ContactInfo::new( @@ -1498,7 +1512,7 @@ impl ClusterInfo { /// A Node with invalid ports to spy on gossip via pull requests pub fn spy_node(id: &Pubkey) -> (ContactInfo, UdpSocket) { - let (_, gossip_socket) = bind_in_range(FULLNODE_PORT_RANGE).unwrap(); + let (_, gossip_socket) = bind_in_range(VALIDATOR_PORT_RANGE).unwrap(); let daddr = socketaddr_any!(); let node = ContactInfo::new( @@ -1524,27 +1538,28 @@ impl ClusterInfo { /// 1.2 - If no, then figure out what layer the node is in and who the neighbors are and only broadcast to them /// 1 - also check if there are nodes in the next layer and repeat the layer 1 to layer 2 logic -/// Returns Neighbor Nodes and Children Nodes `(neighbors, children)` for a given node based on its stake (Bank Balance) +/// Returns Neighbor Nodes and Children Nodes `(neighbors, children)` for a given node based on its stake pub fn compute_retransmit_peers( fanout: usize, my_index: usize, - peers: Vec, -) -> (Vec, Vec) { + stakes_and_index: Vec, +) -> (Vec, Vec) { //calc num_layers and num_neighborhoods using the total number of nodes - let (num_layers, layer_indices) = ClusterInfo::describe_data_plane(peers.len(), fanout); + let (num_layers, layer_indices) = + ClusterInfo::describe_data_plane(stakes_and_index.len(), fanout); if num_layers <= 1 { /* single layer data plane */ - (peers, vec![]) + (stakes_and_index, vec![]) } else { //find my layer let locality = ClusterInfo::localize(&layer_indices, fanout, my_index); - let upper_bound = cmp::min(locality.neighbor_bounds.1, peers.len()); - let neighbors = peers[locality.neighbor_bounds.0..upper_bound].to_vec(); + let upper_bound = cmp::min(locality.neighbor_bounds.1, stakes_and_index.len()); + let neighbors = stakes_and_index[locality.neighbor_bounds.0..upper_bound].to_vec(); let mut children = Vec::new(); for ix in locality.next_layer_peers { - if let Some(peer) = peers.get(ix) { - children.push(peer.clone()); + if let Some(peer) = stakes_and_index.get(ix) { + children.push(*peer); continue; } break; @@ -1563,7 +1578,7 @@ pub struct Sockets { pub tpu_forwards: Vec, pub broadcast: UdpSocket, pub repair: UdpSocket, - pub retransmit: UdpSocket, + pub retransmit_sockets: Vec, pub storage: Option, } @@ -1611,7 +1626,7 @@ impl Node { tpu_forwards: vec![], broadcast, repair, - retransmit, + retransmit_sockets: vec![retransmit], storage: Some(storage), ip_echo: None, }, @@ -1632,7 +1647,7 @@ impl Node { SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), rpc_pubsub_port); let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap(); - let retransmit = UdpSocket::bind("0.0.0.0:0").unwrap(); + let retransmit_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let storage = UdpSocket::bind("0.0.0.0:0").unwrap(); let info = ContactInfo::new( pubkey, @@ -1657,7 +1672,7 @@ impl Node { tpu_forwards: vec![tpu_forwards], broadcast, repair, - retransmit, + retransmit_sockets: vec![retransmit_socket], storage: None, }, } @@ -1690,16 +1705,18 @@ impl Node { let (tvu_port, tvu_sockets) = multi_bind_in_range(port_range, 8).expect("tvu multi_bind"); let (tvu_forwards_port, tvu_forwards_sockets) = - multi_bind_in_range(port_range, 8).expect("tpu multi_bind"); + multi_bind_in_range(port_range, 8).expect("tvu_forwards multi_bind"); let (tpu_port, tpu_sockets) = multi_bind_in_range(port_range, 32).expect("tpu multi_bind"); let (tpu_forwards_port, tpu_forwards_sockets) = - multi_bind_in_range(port_range, 8).expect("tpu multi_bind"); + multi_bind_in_range(port_range, 8).expect("tpu_forwards multi_bind"); + + let (_, retransmit_sockets) = + multi_bind_in_range(port_range, 8).expect("retransmit multi_bind"); let (_, repair) = Self::bind(port_range); let (_, broadcast) = Self::bind(port_range); - let (_, retransmit) = Self::bind(port_range); let info = ContactInfo::new( pubkey, @@ -1725,7 +1742,7 @@ impl Node { tpu_forwards: tpu_forwards_sockets, broadcast, repair, - retransmit, + retransmit_sockets, storage: None, ip_echo: Some(ip_echo), }, @@ -1769,9 +1786,10 @@ mod tests { use crate::crds_value::CrdsValueLabel; use crate::repair_service::RepairType; use crate::result::Error; - use crate::shred::{DataShredHeader, Shred}; + use crate::shred::max_ticks_per_n_shreds; + use crate::shred::{Shred, ShredHeader}; use crate::test_tx::test_tx; - use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT; + use rayon::prelude::*; use solana_sdk::hash::Hash; use solana_sdk::signature::{Keypair, KeypairUtil}; use std::collections::HashSet; @@ -1929,10 +1947,10 @@ mod tests { 0, ); assert!(rv.is_empty()); - let mut data_shred = DataShredHeader::default(); - data_shred.data_header.slot = 2; - data_shred.parent_offset = 1; - data_shred.data_header.index = 1; + let mut data_shred = ShredHeader::default(); + data_shred.data_header.common_header.slot = 2; + data_shred.data_header.parent_offset = 1; + data_shred.data_header.common_header.index = 1; let shred_info = Shred::new_empty_from_header(data_shred); blocktree @@ -1974,7 +1992,7 @@ mod tests { let _ = fill_blocktree_slot_with_ticks( &blocktree, - DEFAULT_TICKS_PER_SLOT, + max_ticks_per_n_shreds(1) + 1, 2, 1, Hash::default(), @@ -2080,27 +2098,27 @@ mod tests { let node = Node::new_with_external_ip( &Pubkey::new_rand(), &socketaddr!(ip, 0), - FULLNODE_PORT_RANGE, + VALIDATOR_PORT_RANGE, ); - check_node_sockets(&node, IpAddr::V4(ip), FULLNODE_PORT_RANGE); + check_node_sockets(&node, IpAddr::V4(ip), VALIDATOR_PORT_RANGE); } #[test] fn new_with_external_ip_test_gossip() { let ip = IpAddr::V4(Ipv4Addr::from(0)); let port = { - bind_in_range(FULLNODE_PORT_RANGE) + bind_in_range(VALIDATOR_PORT_RANGE) .expect("Failed to bind") .0 }; let node = Node::new_with_external_ip( &Pubkey::new_rand(), &socketaddr!(0, port), - FULLNODE_PORT_RANGE, + VALIDATOR_PORT_RANGE, ); - check_node_sockets(&node, ip, FULLNODE_PORT_RANGE); + check_node_sockets(&node, ip, VALIDATOR_PORT_RANGE); assert_eq!(node.sockets.gossip.local_addr().unwrap().port(), port); } @@ -2111,15 +2129,15 @@ mod tests { let node = Node::new_replicator_with_external_ip( &Pubkey::new_rand(), &socketaddr!(ip, 0), - FULLNODE_PORT_RANGE, + VALIDATOR_PORT_RANGE, ); let ip = IpAddr::V4(ip); - check_socket(&node.sockets.storage.unwrap(), ip, FULLNODE_PORT_RANGE); - check_socket(&node.sockets.gossip, ip, FULLNODE_PORT_RANGE); - check_socket(&node.sockets.repair, ip, FULLNODE_PORT_RANGE); + check_socket(&node.sockets.storage.unwrap(), ip, VALIDATOR_PORT_RANGE); + check_socket(&node.sockets.gossip, ip, VALIDATOR_PORT_RANGE); + check_socket(&node.sockets.repair, ip, VALIDATOR_PORT_RANGE); - check_sockets(&node.sockets.tvu, ip, FULLNODE_PORT_RANGE); + check_sockets(&node.sockets.tvu, ip, VALIDATOR_PORT_RANGE); } //test that all cluster_info objects only generate signed messages @@ -2446,4 +2464,45 @@ mod tests { assert_eq!(peers_and_stakes[0].0, 10); assert_eq!(peers_and_stakes[1].0, 1); } + + #[test] + fn test_pull_from_entrypoint_if_not_present() { + let node_keypair = Arc::new(Keypair::new()); + let mut cluster_info = ClusterInfo::new( + ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()), + node_keypair, + ); + let entrypoint_pubkey = Pubkey::new_rand(); + let mut entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp()); + entrypoint.gossip = socketaddr!("127.0.0.2:1234"); + cluster_info.set_entrypoint(entrypoint.clone()); + + let mut stakes = HashMap::new(); + + let other_node_pubkey = Pubkey::new_rand(); + let other_node = ContactInfo::new_localhost(&other_node_pubkey, timestamp()); + assert_ne!(other_node.gossip, entrypoint.gossip); + cluster_info.insert_info(other_node.clone()); + stakes.insert(other_node_pubkey, 10); + + // Pull request 1: `other_node` is present but `entrypoint` was just added (so it has a + // fresh timestamp). There should only be one pull request to `other_node` + let pulls = cluster_info.new_pull_requests(&stakes); + assert_eq!(1, pulls.len() as u64); + assert_eq!(pulls.get(0).unwrap().0, other_node.gossip); + + // Pull request 2: pretend it's been a while since we've pulled from `entrypoint`. There should + // now be two pull requests + cluster_info.entrypoint.as_mut().unwrap().wallclock = 0; + let pulls = cluster_info.new_pull_requests(&stakes); + assert_eq!(2, pulls.len() as u64); + assert_eq!(pulls.get(0).unwrap().0, other_node.gossip); + assert_eq!(pulls.get(1).unwrap().0, entrypoint.gossip); + + // Pull request 3: `other_node` is present and `entrypoint` was just pulled from. There should + // only be one pull request to `other_node` + let pulls = cluster_info.new_pull_requests(&stakes); + assert_eq!(1, pulls.len() as u64); + assert_eq!(pulls.get(0).unwrap().0, other_node.gossip); + } } diff --git a/core/src/cluster_info_repair_listener.rs b/core/src/cluster_info_repair_listener.rs index 2c4feed2651675..8a6761325b0a7e 100644 --- a/core/src/cluster_info_repair_listener.rs +++ b/core/src/cluster_info_repair_listener.rs @@ -8,17 +8,18 @@ use rand::seq::SliceRandom; use rand::SeedableRng; use rand_chacha::ChaChaRng; use solana_metrics::datapoint; -use solana_runtime::epoch_schedule::EpochSchedule; -use solana_sdk::pubkey::Pubkey; -use std::cmp; -use std::collections::HashMap; -use std::mem; -use std::net::SocketAddr; -use std::net::UdpSocket; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, RwLock}; -use std::thread::{self, sleep, Builder, JoinHandle}; -use std::time::Duration; +use solana_sdk::{epoch_schedule::EpochSchedule, pubkey::Pubkey}; +use std::{ + cmp, + collections::HashMap, + mem, + net::SocketAddr, + net::UdpSocket, + sync::atomic::{AtomicBool, Ordering}, + sync::{Arc, RwLock}, + thread::{self, sleep, Builder, JoinHandle}, + time::Duration, +}; pub const REPAIRMEN_SLEEP_MILLIS: usize = 100; pub const REPAIR_REDUNDANCY: usize = 1; @@ -278,7 +279,7 @@ impl ClusterInfoRepairListener { let mut total_coding_blobs_sent = 0; let mut num_slots_repaired = 0; let max_confirmed_repairee_epoch = - epoch_schedule.get_stakers_epoch(repairee_epoch_slots.root); + epoch_schedule.get_leader_schedule_epoch(repairee_epoch_slots.root); let max_confirmed_repairee_slot = epoch_schedule.get_last_slot_in_epoch(max_confirmed_repairee_epoch); @@ -655,7 +656,7 @@ mod tests { let eligible_repairmen_refs: Vec<_> = eligible_repairmen.iter().collect(); // Have all the repairman send the repairs - let epoch_schedule = EpochSchedule::new(32, 16, false); + let epoch_schedule = EpochSchedule::custom(32, 16, false); let num_missing_slots = num_slots / 2; for repairman_pubkey in &eligible_repairmen { ClusterInfoRepairListener::serve_repairs_to_repairee( @@ -699,7 +700,7 @@ mod tests { let blocktree = Blocktree::open(&blocktree_path).unwrap(); let stakers_slot_offset = 16; let slots_per_epoch = stakers_slot_offset * 2; - let epoch_schedule = EpochSchedule::new(slots_per_epoch, stakers_slot_offset, false); + let epoch_schedule = EpochSchedule::custom(slots_per_epoch, stakers_slot_offset, false); // Create blobs for first two epochs and write them to blocktree let total_slots = slots_per_epoch * 2; diff --git a/core/src/confidence.rs b/core/src/confidence.rs index 50c7b6c702af46..0243e58e440b97 100644 --- a/core/src/confidence.rs +++ b/core/src/confidence.rs @@ -10,7 +10,7 @@ use std::sync::{Arc, RwLock}; use std::thread::{self, Builder, JoinHandle}; use std::time::Duration; -#[derive(Debug, Default, Eq, PartialEq)] +#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] pub struct BankConfidence { confidence: [u64; MAX_LOCKOUT_HISTORY], } @@ -25,25 +25,56 @@ impl BankConfidence { assert!(confirmation_count > 0 && confirmation_count <= MAX_LOCKOUT_HISTORY); self.confidence[confirmation_count - 1] } + #[cfg(test)] + pub(crate) fn new(confidence: [u64; MAX_LOCKOUT_HISTORY]) -> Self { + Self { confidence } + } } -#[derive(Default)] +#[derive(Debug, Default)] pub struct ForkConfidenceCache { bank_confidence: HashMap, - _total_stake: u64, + total_stake: u64, } impl ForkConfidenceCache { pub fn new(bank_confidence: HashMap, total_stake: u64) -> Self { Self { bank_confidence, - _total_stake: total_stake, + total_stake, } } pub fn get_fork_confidence(&self, fork: u64) -> Option<&BankConfidence> { self.bank_confidence.get(&fork) } + + pub fn total_stake(&self) -> u64 { + self.total_stake + } + + pub fn get_fork_with_depth_confidence( + &self, + minimum_depth: usize, + minimum_stake_percentage: f64, + ) -> Option { + self.bank_confidence + .iter() + .filter(|&(_, bank_confidence)| { + let fork_stake_minimum_depth: u64 = bank_confidence.confidence[minimum_depth..] + .iter() + .cloned() + .sum(); + fork_stake_minimum_depth as f64 / self.total_stake as f64 + >= minimum_stake_percentage + }) + .map(|(slot, _)| *slot) + .max() + } + + pub fn get_rooted_fork_with_confidence(&self, minimum_stake_percentage: f64) -> Option { + self.get_fork_with_depth_confidence(MAX_LOCKOUT_HISTORY - 1, minimum_stake_percentage) + } } pub struct ConfidenceAggregationData { @@ -223,6 +254,87 @@ mod tests { assert_eq!(cache.get_confirmation_stake(1), 30); } + #[test] + fn test_get_fork_with_depth_confidence() { + // Build ForkConfidenceCache with votes at depths 0 and 1 for 2 slots + let mut cache0 = BankConfidence::default(); + cache0.increase_confirmation_stake(1, 15); + cache0.increase_confirmation_stake(2, 25); + + let mut cache1 = BankConfidence::default(); + cache1.increase_confirmation_stake(1, 10); + cache1.increase_confirmation_stake(2, 20); + + let mut bank_confidence = HashMap::new(); + bank_confidence.entry(0).or_insert(cache0.clone()); + bank_confidence.entry(1).or_insert(cache1.clone()); + let fork_confidence_cache = ForkConfidenceCache::new(bank_confidence, 50); + + // Neither slot has rooted votes + assert_eq!( + fork_confidence_cache.get_rooted_fork_with_confidence(0.1), + None + ); + // Neither slot meets the minimum level of confidence 0.6 at depth 1 + assert_eq!( + fork_confidence_cache.get_fork_with_depth_confidence(1, 0.6), + None + ); + // Only slot 0 meets the minimum level of confidence 0.5 at depth 1 + assert_eq!( + fork_confidence_cache.get_fork_with_depth_confidence(1, 0.5), + Some(0) + ); + // If multiple slots meet the minimum level of confidence, method should return the most recent + assert_eq!( + fork_confidence_cache.get_fork_with_depth_confidence(1, 0.4), + Some(1) + ); + // If multiple slots meet the minimum level of confidence, method should return the most recent + assert_eq!( + fork_confidence_cache.get_fork_with_depth_confidence(0, 0.6), + Some(1) + ); + // Neither slot meets the minimum level of confidence 0.9 at depth 0 + assert_eq!( + fork_confidence_cache.get_fork_with_depth_confidence(0, 0.9), + None + ); + } + + #[test] + fn test_get_rooted_fork_with_confidence() { + // Build ForkConfidenceCache with rooted votes + let mut cache0 = BankConfidence::new([0; MAX_LOCKOUT_HISTORY]); + cache0.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, 40); + cache0.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 1, 10); + let mut cache1 = BankConfidence::new([0; MAX_LOCKOUT_HISTORY]); + cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, 30); + cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 1, 10); + cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 2, 10); + + let mut bank_confidence = HashMap::new(); + bank_confidence.entry(0).or_insert(cache0.clone()); + bank_confidence.entry(1).or_insert(cache1.clone()); + let fork_confidence_cache = ForkConfidenceCache::new(bank_confidence, 50); + + // Only slot 0 meets the minimum level of confidence 0.66 at root + assert_eq!( + fork_confidence_cache.get_rooted_fork_with_confidence(0.66), + Some(0) + ); + // If multiple slots meet the minimum level of confidence, method should return the most recent + assert_eq!( + fork_confidence_cache.get_rooted_fork_with_confidence(0.6), + Some(1) + ); + // Neither slot meets the minimum level of confidence 0.9 at root + assert_eq!( + fork_confidence_cache.get_rooted_fork_with_confidence(0.9), + None + ); + } + #[test] fn test_aggregate_confidence_for_vote_account_1() { let ancestors = vec![3, 4, 5, 7, 9, 11]; diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 786efde574020d..aaa5fa82518698 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -1,5 +1,5 @@ use crate::bank_forks::BankForks; -use solana_metrics::datapoint_info; +use solana_metrics::datapoint_debug; use solana_runtime::bank::Bank; use solana_sdk::account::Account; use solana_sdk::hash::Hash; @@ -99,7 +99,7 @@ impl Tower { vote_state.nth_recent_vote(0).map(|v| v.slot).unwrap_or(0) as i64 ); debug!("observed root {}", vote_state.root_slot.unwrap_or(0) as i64); - datapoint_info!( + datapoint_debug!( "tower-observed", ( "slot", @@ -214,7 +214,7 @@ impl Tower { self.lockouts.process_vote_unchecked(&vote); self.last_vote = vote; - datapoint_info!( + datapoint_debug!( "tower-vote", ("latest", slot, i64), ("root", self.lockouts.root_slot.unwrap_or(0), i64) diff --git a/core/src/cuda_runtime.rs b/core/src/cuda_runtime.rs index 252e81b3eccf39..27c2e38bbc68af 100644 --- a/core/src/cuda_runtime.rs +++ b/core/src/cuda_runtime.rs @@ -5,7 +5,6 @@ // copies from host memory to GPU memory unless the memory is page-pinned and // cannot be paged to disk. The cuda driver provides these interfaces to pin and unpin memory. -#[cfg(feature = "pin_gpu_memory")] use crate::perf_libs; use crate::recycler::Reset; use std::ops::{Deref, DerefMut}; @@ -195,12 +194,10 @@ impl PinnedVec { self.x.len() } - #[cfg(feature = "cuda")] pub fn as_ptr(&self) -> *const T { self.x.as_ptr() } - #[cfg(feature = "cuda")] pub fn as_mut_ptr(&mut self) -> *mut T { self.x.as_mut_ptr() } @@ -230,23 +227,23 @@ impl PinnedVec { } fn check_ptr(&mut self, _old_ptr: *mut T, _old_capacity: usize, _from: &'static str) { - #[cfg(feature = "cuda")] + let api = perf_libs::api(); + if api.is_some() + && self.pinnable + && (self.x.as_ptr() != _old_ptr || self.x.capacity() != _old_capacity) { - if self.pinnable && (self.x.as_ptr() != _old_ptr || self.x.capacity() != _old_capacity) - { - if self.pinned { - unpin(_old_ptr); - } - - trace!( - "pinning from check_ptr old: {} size: {} from: {}", - _old_capacity, - self.x.capacity(), - _from - ); - pin(&mut self.x); - self.pinned = true; + if self.pinned { + unpin(_old_ptr); } + + trace!( + "pinning from check_ptr old: {} size: {} from: {}", + _old_capacity, + self.x.capacity(), + _from + ); + pin(&mut self.x); + self.pinned = true; } } } diff --git a/core/src/fetch_stage.rs b/core/src/fetch_stage.rs index 7c043248cb85e9..9b70f5a3f0f4ad 100644 --- a/core/src/fetch_stage.rs +++ b/core/src/fetch_stage.rs @@ -61,6 +61,10 @@ impl FetchStage { while let Ok(more) = recvr.try_recv() { len += more.packets.len(); batch.push(more); + // Read at most 1K transactions in a loop + if len > 1024 { + break; + } } if poh_recorder.lock().unwrap().would_be_leader( diff --git a/core/src/gossip_service.rs b/core/src/gossip_service.rs index 45a9e08db259fa..c0cb25d1387aaf 100644 --- a/core/src/gossip_service.rs +++ b/core/src/gossip_service.rs @@ -2,7 +2,7 @@ use crate::bank_forks::BankForks; use crate::blocktree::Blocktree; -use crate::cluster_info::{ClusterInfo, FULLNODE_PORT_RANGE}; +use crate::cluster_info::{ClusterInfo, VALIDATOR_PORT_RANGE}; use crate::contact_info::ContactInfo; use crate::service::Service; use crate::streamer; @@ -119,7 +119,7 @@ pub fn get_clients(nodes: &[ContactInfo]) -> Vec { nodes .iter() .filter_map(ContactInfo::valid_client_facing_addr) - .map(|addrs| create_client(addrs, FULLNODE_PORT_RANGE)) + .map(|addrs| create_client(addrs, VALIDATOR_PORT_RANGE)) .collect() } @@ -130,7 +130,7 @@ pub fn get_client(nodes: &[ContactInfo]) -> ThinClient { .filter_map(ContactInfo::valid_client_facing_addr) .collect(); let select = thread_rng().gen_range(0, nodes.len()); - create_client(nodes[select], FULLNODE_PORT_RANGE) + create_client(nodes[select], VALIDATOR_PORT_RANGE) } pub fn get_multi_client(nodes: &[ContactInfo]) -> (ThinClient, usize) { @@ -141,7 +141,7 @@ pub fn get_multi_client(nodes: &[ContactInfo]) -> (ThinClient, usize) { .collect(); let rpc_addrs: Vec<_> = addrs.iter().map(|addr| addr.0).collect(); let tpu_addrs: Vec<_> = addrs.iter().map(|addr| addr.1).collect(); - let (_, transactions_socket) = solana_netutil::bind_in_range(FULLNODE_PORT_RANGE).unwrap(); + let (_, transactions_socket) = solana_netutil::bind_in_range(VALIDATOR_PORT_RANGE).unwrap(); let num_nodes = tpu_addrs.len(); ( ThinClient::new_from_addrs(rpc_addrs, tpu_addrs, transactions_socket), diff --git a/core/src/leader_schedule_cache.rs b/core/src/leader_schedule_cache.rs index 007d5552b693e4..f211d8db39ea6e 100644 --- a/core/src/leader_schedule_cache.rs +++ b/core/src/leader_schedule_cache.rs @@ -1,22 +1,28 @@ -use crate::blocktree::Blocktree; -use crate::leader_schedule::LeaderSchedule; -use crate::leader_schedule_utils; +use crate::{blocktree::Blocktree, leader_schedule::LeaderSchedule, leader_schedule_utils}; use solana_runtime::bank::Bank; -use solana_runtime::epoch_schedule::EpochSchedule; -use solana_sdk::pubkey::Pubkey; -use std::collections::hash_map::Entry; -use std::collections::{HashMap, VecDeque}; -use std::sync::{Arc, RwLock}; +use solana_sdk::{epoch_schedule::EpochSchedule, pubkey::Pubkey}; +use std::{ + collections::{hash_map::Entry, HashMap, VecDeque}, + sync::{Arc, RwLock}, +}; type CachedSchedules = (HashMap>, VecDeque); const MAX_SCHEDULES: usize = 10; +struct CacheCapacity(usize); +impl Default for CacheCapacity { + fn default() -> Self { + CacheCapacity(MAX_SCHEDULES) + } +} + #[derive(Default)] pub struct LeaderScheduleCache { // Map from an epoch to a leader schedule for that epoch pub cached_schedules: RwLock, epoch_schedule: EpochSchedule, max_epoch: RwLock, + max_schedules: CacheCapacity, } impl LeaderScheduleCache { @@ -29,13 +35,14 @@ impl LeaderScheduleCache { cached_schedules: RwLock::new((HashMap::new(), VecDeque::new())), epoch_schedule, max_epoch: RwLock::new(0), + max_schedules: CacheCapacity::default(), }; - // This sets the root and calculates the schedule at stakers_epoch(root) + // This sets the root and calculates the schedule at leader_schedule_epoch(root) cache.set_root(root_bank); - // Calculate the schedule for all epochs between 0 and stakers_epoch(root) - let stakers_epoch = epoch_schedule.get_stakers_epoch(root_bank.slot()); + // Calculate the schedule for all epochs between 0 and leader_schedule_epoch(root) + let stakers_epoch = epoch_schedule.get_leader_schedule_epoch(root_bank.slot()); for epoch in 0..stakers_epoch { let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch); cache.slot_leader_at(first_slot_in_epoch, Some(root_bank)); @@ -43,8 +50,20 @@ impl LeaderScheduleCache { cache } + pub fn set_max_schedules(&mut self, max_schedules: usize) { + if max_schedules > 0 { + self.max_schedules = CacheCapacity(max_schedules); + } + } + + pub fn max_schedules(&self) -> usize { + self.max_schedules.0 + } + pub fn set_root(&self, root_bank: &Bank) { - let new_max_epoch = self.epoch_schedule.get_stakers_epoch(root_bank.slot()); + let new_max_epoch = self + .epoch_schedule + .get_leader_schedule_epoch(root_bank.slot()); let old_max_epoch = { let mut max_epoch = self.max_epoch.write().unwrap(); let old_max_epoch = *max_epoch; @@ -189,14 +208,18 @@ impl LeaderScheduleCache { if let Entry::Vacant(v) = entry { v.insert(leader_schedule.clone()); order.push_back(epoch); - Self::retain_latest(cached_schedules, order); + Self::retain_latest(cached_schedules, order, self.max_schedules()); } leader_schedule }) } - fn retain_latest(schedules: &mut HashMap>, order: &mut VecDeque) { - if schedules.len() > MAX_SCHEDULES { + fn retain_latest( + schedules: &mut HashMap>, + order: &mut VecDeque, + max_schedules: usize, + ) { + while schedules.len() > max_schedules { let first = order.pop_front().unwrap(); schedules.remove(&first); } @@ -206,19 +229,20 @@ impl LeaderScheduleCache { #[cfg(test)] mod tests { use super::*; - use crate::blocktree::tests::make_slot_entries; - use crate::genesis_utils::create_genesis_block; - use crate::genesis_utils::{ - create_genesis_block_with_leader, GenesisBlockInfo, BOOTSTRAP_LEADER_LAMPORTS, + use crate::{ + blocktree::{get_tmp_ledger_path, tests::make_slot_entries}, + genesis_utils::{ + create_genesis_block, create_genesis_block_with_leader, GenesisBlockInfo, + BOOTSTRAP_LEADER_LAMPORTS, + }, + staking_utils::tests::setup_vote_and_stake_accounts, }; - use crate::staking_utils::tests::setup_vote_and_stake_accounts; use solana_runtime::bank::Bank; - use solana_runtime::epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH}; - use std::sync::mpsc::channel; - use std::sync::Arc; - use std::thread::Builder; - - use crate::blocktree::get_tmp_ledger_path; + use solana_sdk::epoch_schedule::{ + EpochSchedule, DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET, DEFAULT_SLOTS_PER_EPOCH, + MINIMUM_SLOTS_PER_EPOCH, + }; + use std::{sync::mpsc::channel, sync::Arc, thread::Builder}; #[test] fn test_new_cache() { @@ -226,12 +250,13 @@ mod tests { let bank = Bank::new(&genesis_block); let cache = LeaderScheduleCache::new_from_bank(&bank); assert_eq!(bank.slot(), 0); + assert_eq!(cache.max_schedules(), MAX_SCHEDULES); // Epoch schedule for all epochs in the range: // [0, stakers_epoch(bank.slot())] should // be calculated by constructor let epoch_schedule = bank.epoch_schedule(); - let stakers_epoch = bank.get_stakers_epoch(bank.slot()); + let stakers_epoch = bank.get_leader_schedule_epoch(bank.slot()); for epoch in 0..=stakers_epoch { let first_slot_in_stakers_epoch = epoch_schedule.get_first_slot_in_epoch(epoch); let last_slot_in_stakers_epoch = epoch_schedule.get_last_slot_in_epoch(epoch); @@ -263,7 +288,7 @@ mod tests { cached_schedules.insert(i as u64, Arc::new(LeaderSchedule::default())); order.push_back(i as u64); } - LeaderScheduleCache::retain_latest(&mut cached_schedules, &mut order); + LeaderScheduleCache::retain_latest(&mut cached_schedules, &mut order, MAX_SCHEDULES); assert_eq!(cached_schedules.len(), MAX_SCHEDULES); let mut keys: Vec<_> = cached_schedules.keys().cloned().collect(); keys.sort(); @@ -283,7 +308,7 @@ mod tests { fn run_thread_race() { let slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64; - let epoch_schedule = EpochSchedule::new(slots_per_epoch, slots_per_epoch / 2, true); + let epoch_schedule = EpochSchedule::custom(slots_per_epoch, slots_per_epoch / 2, true); let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(2); let bank = Arc::new(Bank::new(&genesis_block)); let cache = Arc::new(LeaderScheduleCache::new(epoch_schedule, &bank)); @@ -329,7 +354,11 @@ mod tests { BOOTSTRAP_LEADER_LAMPORTS, ) .genesis_block; - genesis_block.epoch_warmup = false; + genesis_block.epoch_schedule = EpochSchedule::custom( + DEFAULT_SLOTS_PER_EPOCH, + DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET, + false, + ); let bank = Bank::new(&genesis_block); let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); @@ -349,7 +378,7 @@ mod tests { assert_eq!( cache.next_leader_slot( &pubkey, - 2 * genesis_block.slots_per_epoch - 1, // no schedule generated for epoch 2 + 2 * genesis_block.epoch_schedule.slots_per_epoch - 1, // no schedule generated for epoch 2 &bank, None ), @@ -376,7 +405,7 @@ mod tests { BOOTSTRAP_LEADER_LAMPORTS, ) .genesis_block; - genesis_block.epoch_warmup = false; + genesis_block.epoch_schedule.warmup = false; let bank = Bank::new(&genesis_block); let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); @@ -428,7 +457,7 @@ mod tests { assert_eq!( cache.next_leader_slot( &pubkey, - 2 * genesis_block.slots_per_epoch - 1, // no schedule generated for epoch 2 + 2 * genesis_block.epoch_schedule.slots_per_epoch - 1, // no schedule generated for epoch 2 &bank, Some(&blocktree) ), @@ -455,7 +484,7 @@ mod tests { mint_keypair, .. } = create_genesis_block(10_000); - genesis_block.epoch_warmup = false; + genesis_block.epoch_schedule.warmup = false; let bank = Bank::new(&genesis_block); let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); @@ -474,14 +503,14 @@ mod tests { // Have to wait until the epoch at after the epoch stakes generated at genesis // for the new votes to take effect. let mut target_slot = 1; - let epoch = bank.get_stakers_epoch(0); - while bank.get_stakers_epoch(target_slot) == epoch { + let epoch = bank.get_leader_schedule_epoch(0); + while bank.get_leader_schedule_epoch(target_slot) == epoch { target_slot += 1; } let bank = Bank::new_from_parent(&Arc::new(bank), &Pubkey::default(), target_slot); let mut expected_slot = 0; - let epoch = bank.get_stakers_epoch(target_slot); + let epoch = bank.get_leader_schedule_epoch(target_slot); for i in 0..epoch { expected_slot += bank.get_slots_in_epoch(i); } @@ -490,7 +519,7 @@ mod tests { let mut index = 0; while schedule[index] != node_pubkey { index += 1; - assert_ne!(index, genesis_block.slots_per_epoch); + assert_ne!(index, genesis_block.epoch_schedule.slots_per_epoch); } expected_slot += index; @@ -539,4 +568,18 @@ mod tests { assert_eq!(bank2.get_epoch_and_slot_index(224).0, 3); assert!(cache.slot_leader_at(224, Some(&bank2)).is_none()); } + + #[test] + fn test_set_max_schedules() { + let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(2); + let bank = Arc::new(Bank::new(&genesis_block)); + let mut cache = LeaderScheduleCache::new_from_bank(&bank); + + // Max schedules must be greater than 0 + cache.set_max_schedules(0); + assert_eq!(cache.max_schedules(), MAX_SCHEDULES); + + cache.set_max_schedules(std::usize::MAX); + assert_eq!(cache.max_schedules(), std::usize::MAX); + } } diff --git a/core/src/leader_schedule_utils.rs b/core/src/leader_schedule_utils.rs index 0bfb9f109fb970..00c10079f30440 100644 --- a/core/src/leader_schedule_utils.rs +++ b/core/src/leader_schedule_utils.rs @@ -69,7 +69,7 @@ mod tests { let leader_schedule = LeaderSchedule::new( &pubkeys_and_stakes, seed, - genesis_block.slots_per_epoch, + genesis_block.epoch_schedule.slots_per_epoch, NUM_CONSECUTIVE_LEADER_SLOTS, ); diff --git a/core/src/lib.rs b/core/src/lib.rs index 3d74112e5684ca..b5be3afc2ce7e6 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -2,12 +2,13 @@ //! It includes a full Rust implementation of the architecture (see //! [Validator](server/struct.Validator.html)) as well as hooks to GPU implementations of its most //! paralellizable components (i.e. [SigVerify](sigverify/index.html)). It also includes -//! command-line tools to spin up fullnodes and a Rust library +//! command-line tools to spin up validators and a Rust library //! pub mod bank_forks; pub mod banking_stage; -pub mod blob_fetch_stage; +#[macro_use] +pub mod blocktree; pub mod broadcast_stage; pub mod chacha; pub mod chacha_cuda; @@ -15,22 +16,21 @@ pub mod cluster_info_vote_listener; pub mod confidence; pub mod perf_libs; pub mod recycler; +pub mod shred_fetch_stage; #[macro_use] pub mod contact_info; -pub mod crds; -pub mod crds_gossip; -pub mod crds_gossip_error; -pub mod crds_gossip_pull; -pub mod crds_gossip_push; -pub mod crds_value; -#[macro_use] -pub mod blocktree; pub mod blockstream; pub mod blockstream_service; pub mod blocktree_processor; pub mod cluster_info; pub mod cluster_info_repair_listener; pub mod consensus; +pub mod crds; +pub mod crds_gossip; +pub mod crds_gossip_error; +pub mod crds_gossip_pull; +pub mod crds_gossip_push; +pub mod crds_value; pub mod cuda_runtime; pub mod entry; pub mod erasure; @@ -58,6 +58,7 @@ pub mod rpc_pubsub; pub mod rpc_pubsub_service; pub mod rpc_service; pub mod rpc_subscriptions; +pub mod sendmmsg; pub mod service; pub mod shred; pub mod sigverify; diff --git a/core/src/local_vote_signer_service.rs b/core/src/local_vote_signer_service.rs index e01b979f768ea1..3d7619b66b2368 100644 --- a/core/src/local_vote_signer_service.rs +++ b/core/src/local_vote_signer_service.rs @@ -1,4 +1,4 @@ -//! The `local_vote_signer_service` can be started locally to sign fullnode votes +//! The `local_vote_signer_service` can be started locally to sign validator votes use crate::service::Service; use solana_netutil::PortRange; diff --git a/core/src/packet.rs b/core/src/packet.rs index b2faac352b3a1d..6dc966f34f4b9f 100644 --- a/core/src/packet.rs +++ b/core/src/packet.rs @@ -41,6 +41,7 @@ pub const PACKETS_BATCH_SIZE: usize = (PACKETS_PER_BATCH * PACKET_DATA_SIZE); pub struct Meta { pub size: usize, pub forward: bool, + pub repair: bool, pub addr: [u16; 8], pub port: u16, pub v6: bool, diff --git a/core/src/poh.rs b/core/src/poh.rs index 8e66bf82e63a09..d47afdb0b154f7 100644 --- a/core/src/poh.rs +++ b/core/src/poh.rs @@ -1,5 +1,7 @@ //! The `Poh` module provides an object for generating a Proof of History. use solana_sdk::hash::{hash, hashv, Hash}; +use std::thread::{Builder, JoinHandle}; +use std::time::{Duration, Instant}; pub struct Poh { pub hash: Hash, @@ -80,6 +82,37 @@ impl Poh { } } +pub fn compute_hashes_per_tick(duration: Duration, hashes_sample_size: u64) -> u64 { + let num_cpu = sys_info::cpu_num().unwrap(); + // calculate hash rate with the system under maximum load + info!( + "Running {} hashes in parallel on all threads...", + hashes_sample_size + ); + let threads: Vec> = (0..num_cpu) + .map(|_| { + Builder::new() + .name("solana-poh".to_string()) + .spawn(move || { + let mut v = Hash::default(); + let start = Instant::now(); + for _ in 0..hashes_sample_size { + v = hash(&v.as_ref()); + } + start.elapsed().as_millis() as u64 + }) + .unwrap() + }) + .collect(); + + let avg_elapsed = (threads + .into_iter() + .map(|elapsed| elapsed.join().unwrap()) + .sum::()) + / u64::from(num_cpu); + duration.as_millis() as u64 * hashes_sample_size / avg_elapsed +} + #[cfg(test)] mod tests { use crate::poh::{Poh, PohEntry}; diff --git a/core/src/recvmmsg.rs b/core/src/recvmmsg.rs index 99d5dc83eefcda..16b9cab7e9558b 100644 --- a/core/src/recvmmsg.rs +++ b/core/src/recvmmsg.rs @@ -5,7 +5,7 @@ use std::cmp; use std::io; use std::net::UdpSocket; -pub const NUM_RCVMMSGS: usize = 16; +pub const NUM_RCVMMSGS: usize = 128; #[cfg(not(target_os = "linux"))] pub fn recv_mmsg(socket: &UdpSocket, packets: &mut [Packet]) -> io::Result<(usize, usize)> { @@ -92,19 +92,20 @@ mod tests { use crate::recvmmsg::*; use std::time::{Duration, Instant}; + const TEST_NUM_MSGS: usize = 32; #[test] pub fn test_recv_mmsg_one_iter() { let reader = UdpSocket::bind("127.0.0.1:0").expect("bind"); let addr = reader.local_addr().unwrap(); let sender = UdpSocket::bind("127.0.0.1:0").expect("bind"); let saddr = sender.local_addr().unwrap(); - let sent = NUM_RCVMMSGS - 1; + let sent = TEST_NUM_MSGS - 1; for _ in 0..sent { let data = [0; PACKET_DATA_SIZE]; sender.send_to(&data[..], &addr).unwrap(); } - let mut packets = vec![Packet::default(); NUM_RCVMMSGS]; + let mut packets = vec![Packet::default(); TEST_NUM_MSGS]; let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; assert_eq!(sent, recv); for i in 0..recv { @@ -119,22 +120,22 @@ mod tests { let addr = reader.local_addr().unwrap(); let sender = UdpSocket::bind("127.0.0.1:0").expect("bind"); let saddr = sender.local_addr().unwrap(); - let sent = NUM_RCVMMSGS + 10; + let sent = TEST_NUM_MSGS + 10; for _ in 0..sent { let data = [0; PACKET_DATA_SIZE]; sender.send_to(&data[..], &addr).unwrap(); } - let mut packets = vec![Packet::default(); NUM_RCVMMSGS * 2]; + let mut packets = vec![Packet::default(); TEST_NUM_MSGS]; let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; - assert_eq!(NUM_RCVMMSGS, recv); + assert_eq!(TEST_NUM_MSGS, recv); for i in 0..recv { assert_eq!(packets[i].meta.size, PACKET_DATA_SIZE); assert_eq!(packets[i].meta.addr(), saddr); } let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; - assert_eq!(sent - NUM_RCVMMSGS, recv); + assert_eq!(sent - TEST_NUM_MSGS, recv); for i in 0..recv { assert_eq!(packets[i].meta.size, PACKET_DATA_SIZE); assert_eq!(packets[i].meta.addr(), saddr); @@ -149,16 +150,16 @@ mod tests { reader.set_nonblocking(false).unwrap(); let sender = UdpSocket::bind("127.0.0.1:0").expect("bind"); let saddr = sender.local_addr().unwrap(); - let sent = NUM_RCVMMSGS; + let sent = TEST_NUM_MSGS; for _ in 0..sent { let data = [0; PACKET_DATA_SIZE]; sender.send_to(&data[..], &addr).unwrap(); } let start = Instant::now(); - let mut packets = vec![Packet::default(); NUM_RCVMMSGS * 2]; + let mut packets = vec![Packet::default(); TEST_NUM_MSGS]; let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; - assert_eq!(NUM_RCVMMSGS, recv); + assert_eq!(TEST_NUM_MSGS, recv); for i in 0..recv { assert_eq!(packets[i].meta.size, PACKET_DATA_SIZE); assert_eq!(packets[i].meta.addr(), saddr); @@ -176,11 +177,11 @@ mod tests { let sender1 = UdpSocket::bind("127.0.0.1:0").expect("bind"); let saddr1 = sender1.local_addr().unwrap(); - let sent1 = NUM_RCVMMSGS - 1; + let sent1 = TEST_NUM_MSGS - 1; let sender2 = UdpSocket::bind("127.0.0.1:0").expect("bind"); let saddr2 = sender2.local_addr().unwrap(); - let sent2 = NUM_RCVMMSGS + 1; + let sent2 = TEST_NUM_MSGS + 1; for _ in 0..sent1 { let data = [0; PACKET_DATA_SIZE]; @@ -192,10 +193,10 @@ mod tests { sender2.send_to(&data[..], &addr).unwrap(); } - let mut packets = vec![Packet::default(); NUM_RCVMMSGS * 2]; + let mut packets = vec![Packet::default(); TEST_NUM_MSGS]; let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; - assert_eq!(NUM_RCVMMSGS, recv); + assert_eq!(TEST_NUM_MSGS, recv); for i in 0..sent1 { assert_eq!(packets[i].meta.size, PACKET_DATA_SIZE); assert_eq!(packets[i].meta.addr(), saddr1); @@ -207,10 +208,55 @@ mod tests { } let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; - assert_eq!(sent1 + sent2 - NUM_RCVMMSGS, recv); + assert_eq!(sent1 + sent2 - TEST_NUM_MSGS, recv); for i in 0..recv { assert_eq!(packets[i].meta.size, PACKET_DATA_SIZE); assert_eq!(packets[i].meta.addr(), saddr2); } } + + #[cfg(target_os = "linux")] + #[test] + pub fn test_recv_mmsg_batch_size() { + let reader = UdpSocket::bind("127.0.0.1:0").expect("bind"); + let addr = reader.local_addr().unwrap(); + let sender = UdpSocket::bind("127.0.0.1:0").expect("bind"); + + const TEST_BATCH_SIZE: usize = 64; + let sent = TEST_BATCH_SIZE; + + let mut elapsed_in_max_batch = 0; + (0..1000).for_each(|_| { + for _ in 0..sent { + let data = [0; PACKET_DATA_SIZE]; + sender.send_to(&data[..], &addr).unwrap(); + } + let mut packets = vec![Packet::default(); TEST_BATCH_SIZE]; + let now = Instant::now(); + let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; + elapsed_in_max_batch += now.elapsed().as_nanos(); + assert_eq!(TEST_BATCH_SIZE, recv); + }); + + let mut elapsed_in_small_batch = 0; + (0..1000).for_each(|_| { + for _ in 0..sent { + let data = [0; PACKET_DATA_SIZE]; + sender.send_to(&data[..], &addr).unwrap(); + } + let mut packets = vec![Packet::default(); 4]; + let mut recv = 0; + let now = Instant::now(); + while let Ok(num) = recv_mmsg(&reader, &mut packets[..]) { + recv += num.1; + if recv >= TEST_BATCH_SIZE { + break; + } + } + elapsed_in_small_batch += now.elapsed().as_nanos(); + assert_eq!(TEST_BATCH_SIZE, recv); + }); + + assert!(elapsed_in_max_batch <= elapsed_in_small_batch); + } } diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index 44e72bdc18404d..c2804996ca5d4f 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -1,22 +1,24 @@ //! The `repair_service` module implements the tools necessary to generate a thread which //! regularly finds missing blobs in the ledger and sends repair requests for those blobs -use crate::bank_forks::BankForks; -use crate::blocktree::{Blocktree, CompletedSlotsReceiver, SlotMeta}; -use crate::cluster_info::ClusterInfo; -use crate::cluster_info_repair_listener::ClusterInfoRepairListener; -use crate::result::Result; -use crate::service::Service; -use solana_metrics::datapoint_info; -use solana_runtime::epoch_schedule::EpochSchedule; -use solana_sdk::pubkey::Pubkey; -use std::collections::BTreeSet; -use std::net::UdpSocket; -use std::ops::Bound::{Excluded, Unbounded}; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, RwLock}; -use std::thread::sleep; -use std::thread::{self, Builder, JoinHandle}; -use std::time::Duration; +use crate::{ + bank_forks::BankForks, + blocktree::{Blocktree, CompletedSlotsReceiver, SlotMeta}, + cluster_info::ClusterInfo, + cluster_info_repair_listener::ClusterInfoRepairListener, + result::Result, + service::Service, +}; +use solana_sdk::{epoch_schedule::EpochSchedule, pubkey::Pubkey}; +use std::{ + collections::BTreeSet, + net::UdpSocket, + ops::Bound::{Excluded, Unbounded}, + sync::atomic::{AtomicBool, Ordering}, + sync::{Arc, RwLock}, + thread::sleep, + thread::{self, Builder, JoinHandle}, + time::Duration, +}; pub const MAX_REPAIR_LENGTH: usize = 16; pub const REPAIR_MS: u64 = 100; @@ -170,7 +172,7 @@ impl RepairService { for ((to, req), repair_request) in reqs { if let Ok(local_addr) = repair_socket.local_addr() { - datapoint_info!( + datapoint_debug!( "repair_service", ("repair_request", format!("{:?}", repair_request), String), ("to", to.to_string(), String), @@ -299,7 +301,7 @@ impl RepairService { root: u64, epoch_schedule: &EpochSchedule, ) { - let last_confirmed_epoch = epoch_schedule.get_stakers_epoch(root); + let last_confirmed_epoch = epoch_schedule.get_leader_schedule_epoch(root); let last_epoch_slot = epoch_schedule.get_last_slot_in_epoch(last_confirmed_epoch); let meta_iter = blocktree @@ -404,6 +406,7 @@ mod test { }; use crate::blocktree::{get_tmp_ledger_path, Blocktree}; use crate::cluster_info::Node; + use crate::shred::max_ticks_per_n_shreds; use itertools::Itertools; use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; @@ -535,7 +538,7 @@ mod test { let blocktree = Blocktree::open(&blocktree_path).unwrap(); let slots: Vec = vec![1, 3, 5, 7, 8]; - let num_entries_per_slot = 10; + let num_entries_per_slot = max_ticks_per_n_shreds(1) + 1; let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot); for (mut slot_shreds, _) in shreds.into_iter() { @@ -651,7 +654,7 @@ mod test { .unwrap(); // Test that only slots > root from fork1 were included - let epoch_schedule = EpochSchedule::new(32, 32, false); + let epoch_schedule = EpochSchedule::custom(32, 32, false); RepairService::get_completed_slots_past_root( &blocktree, @@ -664,7 +667,7 @@ mod test { assert_eq!(full_slots, expected); // Test that slots past the last confirmed epoch boundary don't get included - let last_epoch = epoch_schedule.get_stakers_epoch(root); + let last_epoch = epoch_schedule.get_leader_schedule_epoch(root); let last_slot = epoch_schedule.get_last_slot_in_epoch(last_epoch); let fork3 = vec![last_slot, last_slot + 1]; let fork3_shreds: Vec<_> = make_chaining_slot_entries(&fork3, num_entries_per_slot) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index b80846847bd8b5..a15c2a49abae52 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -174,7 +174,7 @@ impl ReplayStage { if let Some(new_leader) = leader_schedule_cache.slot_leader_at(next_slot, Some(&bank)) { - datapoint_info!( + datapoint_debug!( "replay_stage-new_leader", ("slot", next_slot, i64), ("leader", new_leader.to_string(), String), @@ -330,7 +330,7 @@ impl ReplayStage { return; } - datapoint_info!( + datapoint_debug!( "replay_stage-new_leader", ("slot", poh_slot, i64), ("leader", next_leader.to_string(), String), @@ -364,7 +364,7 @@ impl ReplayStage { // Returns the replay result and the number of replayed transactions fn replay_blocktree_into_bank( - bank: &Bank, + bank: &Arc, blocktree: &Blocktree, progress: &mut HashMap, ) -> (Result<()>, usize) { @@ -675,7 +675,7 @@ impl ReplayStage { } fn replay_entries_into_bank( - bank: &Bank, + bank: &Arc, entries: Vec, progress: &mut HashMap, num: usize, @@ -698,7 +698,7 @@ impl ReplayStage { } pub fn verify_and_process_entries( - bank: &Bank, + bank: &Arc, entries: &[Entry], last_entry: &Hash, shred_index: usize, @@ -1005,7 +1005,7 @@ mod test { create_genesis_block_with_leader(50, &leader_pubkey, leader_lamports); let mut genesis_block = genesis_block_info.genesis_block; let leader_voting_pubkey = genesis_block_info.voting_keypair.pubkey(); - genesis_block.epoch_warmup = false; + genesis_block.epoch_schedule.warmup = false; genesis_block.ticks_per_slot = 4; let bank0 = Bank::new(&genesis_block); for _ in 1..genesis_block.ticks_per_slot { diff --git a/core/src/replicator.rs b/core/src/replicator.rs index 62a7c81e2569ae..5acb6564142aab 100644 --- a/core/src/replicator.rs +++ b/core/src/replicator.rs @@ -1,7 +1,6 @@ -use crate::blob_fetch_stage::BlobFetchStage; use crate::blocktree::Blocktree; use crate::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE}; -use crate::cluster_info::{ClusterInfo, Node, FULLNODE_PORT_RANGE}; +use crate::cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE}; use crate::contact_info::ContactInfo; use crate::gossip_service::GossipService; use crate::leader_schedule_cache::LeaderScheduleCache; @@ -12,6 +11,7 @@ use crate::repair_service::{RepairService, RepairSlotRange, RepairStrategy}; use crate::result::{Error, Result}; use crate::service::Service; use crate::shred::Shred; +use crate::shred_fetch_stage::ShredFetchStage; use crate::storage_stage::NUM_STORAGE_SAMPLES; use crate::streamer::{receiver, responder, PacketReceiver}; use crate::window_service::WindowService; @@ -35,7 +35,7 @@ use solana_sdk::timing::timestamp; use solana_sdk::transaction::Transaction; use solana_sdk::transport::TransportError; use solana_storage_api::storage_contract::StorageContract; -use solana_storage_api::storage_instruction; +use solana_storage_api::storage_instruction::{self, StorageAccountType}; use std::fs::File; use std::io::{self, BufReader, ErrorKind, Read, Seek, SeekFrom}; use std::mem::size_of; @@ -253,9 +253,8 @@ impl Replicator { }; let repair_socket = Arc::new(node.sockets.repair); - let mut blob_sockets: Vec> = + let blob_sockets: Vec> = node.sockets.tvu.into_iter().map(Arc::new).collect(); - blob_sockets.push(repair_socket.clone()); let blob_forward_sockets: Vec> = node .sockets .tvu_forwards @@ -263,9 +262,10 @@ impl Replicator { .map(Arc::new) .collect(); let (blob_fetch_sender, blob_fetch_receiver) = channel(); - let fetch_stage = BlobFetchStage::new_multi_socket_packet( + let fetch_stage = ShredFetchStage::new( blob_sockets, blob_forward_sockets, + repair_socket.clone(), &blob_fetch_sender, &exit, ); @@ -600,11 +600,12 @@ impl Replicator { } }; - let ix = storage_instruction::create_replicator_storage_account( + let ix = storage_instruction::create_storage_account( &keypair.pubkey(), &keypair.pubkey(), &storage_keypair.pubkey(), 1, + StorageAccountType::Replicator, ); let tx = Transaction::new_signed_instructions(&[keypair], ix, blockhash); let signature = client.async_send_transaction(tx)?; @@ -804,7 +805,7 @@ impl Replicator { let exit = Arc::new(AtomicBool::new(false)); let (s_reader, r_reader) = channel(); - let repair_socket = Arc::new(bind_in_range(FULLNODE_PORT_RANGE).unwrap().1); + let repair_socket = Arc::new(bind_in_range(VALIDATOR_PORT_RANGE).unwrap().1); let t_receiver = receiver( repair_socket.clone(), &exit, @@ -906,7 +907,7 @@ impl Replicator { } fn get_replicator_segment_slot(to: SocketAddr) -> u64 { - let (_port, socket) = bind_in_range(FULLNODE_PORT_RANGE).unwrap(); + let (_port, socket) = bind_in_range(VALIDATOR_PORT_RANGE).unwrap(); socket .set_read_timeout(Some(Duration::from_secs(5))) .unwrap(); diff --git a/core/src/retransmit_stage.rs b/core/src/retransmit_stage.rs index a63b532fb7638a..64414db11d5b80 100644 --- a/core/src/retransmit_stage.rs +++ b/core/src/retransmit_stage.rs @@ -1,65 +1,130 @@ //! The `retransmit_stage` retransmits blobs between validators -use crate::bank_forks::BankForks; -use crate::blocktree::{Blocktree, CompletedSlotsReceiver}; -use crate::cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT}; -use crate::leader_schedule_cache::LeaderScheduleCache; -use crate::repair_service::RepairStrategy; -use crate::result::{Error, Result}; -use crate::service::Service; -use crate::staking_utils; -use crate::streamer::PacketReceiver; -use crate::window_service::{should_retransmit_and_persist, WindowService}; +use crate::{ + bank_forks::BankForks, + blocktree::{Blocktree, CompletedSlotsReceiver}, + cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT}, + leader_schedule_cache::LeaderScheduleCache, + repair_service::RepairStrategy, + result::{Error, Result}, + service::Service, + staking_utils, + streamer::PacketReceiver, + window_service::{should_retransmit_and_persist, WindowService}, +}; use rand::SeedableRng; use rand_chacha::ChaChaRng; -use solana_metrics::{datapoint_info, inc_new_counter_error}; -use solana_runtime::epoch_schedule::EpochSchedule; -use std::cmp; -use std::net::UdpSocket; -use std::sync::atomic::AtomicBool; -use std::sync::mpsc::channel; -use std::sync::mpsc::RecvTimeoutError; -use std::sync::{Arc, RwLock}; -use std::thread::{self, Builder, JoinHandle}; -use std::time::Duration; +use solana_measure::measure::Measure; +use solana_metrics::inc_new_counter_error; +use solana_sdk::epoch_schedule::EpochSchedule; +use std::{ + cmp, + net::UdpSocket, + sync::atomic::AtomicBool, + sync::mpsc::channel, + sync::mpsc::RecvTimeoutError, + sync::Mutex, + sync::{Arc, RwLock}, + thread::{self, Builder, JoinHandle}, + time::Duration, +}; + +// Limit a given thread to consume about this many packets so that +// it doesn't pull up too much work. +const MAX_PACKET_BATCH_SIZE: usize = 100; fn retransmit( bank_forks: &Arc>, leader_schedule_cache: &Arc, cluster_info: &Arc>, - r: &PacketReceiver, + r: &Arc>, sock: &UdpSocket, + id: u32, ) -> Result<()> { let timer = Duration::new(1, 0); - let mut packets = r.recv_timeout(timer)?; - while let Ok(mut nq) = r.try_recv() { - packets.packets.append(&mut nq.packets); + let r_lock = r.lock().unwrap(); + let packets = r_lock.recv_timeout(timer)?; + let mut timer_start = Measure::start("retransmit"); + let mut total_packets = packets.packets.len(); + let mut packet_v = vec![packets]; + while let Ok(nq) = r_lock.try_recv() { + total_packets += nq.packets.len(); + packet_v.push(nq); + if total_packets >= MAX_PACKET_BATCH_SIZE { + break; + } } - - datapoint_info!("retransmit-stage", ("count", packets.packets.len(), i64)); + drop(r_lock); let r_bank = bank_forks.read().unwrap().working_bank(); - let bank_epoch = r_bank.get_stakers_epoch(r_bank.slot()); + let bank_epoch = r_bank.get_leader_schedule_epoch(r_bank.slot()); let mut peers_len = 0; - for packet in &packets.packets { - let (my_index, mut peers) = cluster_info.read().unwrap().shuffle_peers_and_index( - staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch).as_ref(), - ChaChaRng::from_seed(packet.meta.seed), - ); - peers_len = cmp::max(peers_len, peers.len()); - peers.remove(my_index); + let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch); + let (peers, stakes_and_index) = cluster_info + .read() + .unwrap() + .sorted_retransmit_peers_and_stakes(stakes.as_ref()); + let me = cluster_info.read().unwrap().my_data().clone(); + let mut retransmit_total = 0; + let mut compute_turbine_peers_total = 0; + for mut packets in packet_v { + for packet in packets.packets.iter_mut() { + // skip repair packets + if packet.meta.repair { + total_packets -= 1; + continue; + } + let mut compute_turbine_peers = Measure::start("turbine_start"); + let (my_index, mut shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index( + &me.id, + &peers, + &stakes_and_index, + ChaChaRng::from_seed(packet.meta.seed), + ); + peers_len = cmp::max(peers_len, shuffled_stakes_and_index.len()); + shuffled_stakes_and_index.remove(my_index); + // split off the indexes, we don't need the stakes anymore + let indexes = shuffled_stakes_and_index + .into_iter() + .map(|(_, index)| index) + .collect(); - let (neighbors, children) = compute_retransmit_peers(DATA_PLANE_FANOUT, my_index, peers); + let (neighbors, children) = + compute_retransmit_peers(DATA_PLANE_FANOUT, my_index, indexes); + let neighbors: Vec<_> = neighbors.into_iter().map(|index| &peers[index]).collect(); + let children: Vec<_> = children.into_iter().map(|index| &peers[index]).collect(); + compute_turbine_peers.stop(); + compute_turbine_peers_total += compute_turbine_peers.as_ms(); - let leader = leader_schedule_cache.slot_leader_at(packet.meta.slot, Some(r_bank.as_ref())); - if !packet.meta.forward { - ClusterInfo::retransmit_to(&cluster_info, &neighbors, packet, leader, sock, true)?; - ClusterInfo::retransmit_to(&cluster_info, &children, packet, leader, sock, false)?; - } else { - ClusterInfo::retransmit_to(&cluster_info, &children, packet, leader, sock, true)?; + let leader = + leader_schedule_cache.slot_leader_at(packet.meta.slot, Some(r_bank.as_ref())); + let mut retransmit_time = Measure::start("retransmit_to"); + if !packet.meta.forward { + ClusterInfo::retransmit_to(&neighbors, packet, leader, sock, true)?; + ClusterInfo::retransmit_to(&children, packet, leader, sock, false)?; + } else { + ClusterInfo::retransmit_to(&children, packet, leader, sock, true)?; + } + retransmit_time.stop(); + retransmit_total += retransmit_time.as_ms(); } } - datapoint_info!("cluster_info-num_nodes", ("count", peers_len, i64)); + timer_start.stop(); + debug!( + "retransmitted {} packets in {}ms retransmit_time: {}ms id: {}", + total_packets, + timer_start.as_ms(), + retransmit_total, + id, + ); + datapoint_debug!("cluster_info-num_nodes", ("count", peers_len, i64)); + datapoint_debug!( + "retransmit-stage", + ("total_time", timer_start.as_ms() as i64, i64), + ("total_packets", total_packets as i64, i64), + ("retransmit_total", retransmit_total as i64, i64), + ("compute_turbine", compute_turbine_peers_total as i64, i64), + ); Ok(()) } @@ -71,39 +136,48 @@ fn retransmit( /// * `cluster_info` - This structure needs to be updated and populated by the bank and via gossip. /// * `recycler` - Blob recycler. /// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes. -fn retransmitter( - sock: Arc, +pub fn retransmitter( + sockets: Arc>, bank_forks: Arc>, leader_schedule_cache: &Arc, cluster_info: Arc>, - r: PacketReceiver, -) -> JoinHandle<()> { - let bank_forks = bank_forks.clone(); - let leader_schedule_cache = leader_schedule_cache.clone(); - Builder::new() - .name("solana-retransmitter".to_string()) - .spawn(move || { - trace!("retransmitter started"); - loop { - if let Err(e) = retransmit( - &bank_forks, - &leader_schedule_cache, - &cluster_info, - &r, - &sock, - ) { - match e { - Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break, - Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (), - _ => { - inc_new_counter_error!("streamer-retransmit-error", 1, 1); + r: Arc>, +) -> Vec> { + (0..sockets.len()) + .map(|s| { + let sockets = sockets.clone(); + let bank_forks = bank_forks.clone(); + let leader_schedule_cache = leader_schedule_cache.clone(); + let r = r.clone(); + let cluster_info = cluster_info.clone(); + + Builder::new() + .name("solana-retransmitter".to_string()) + .spawn(move || { + trace!("retransmitter started"); + loop { + if let Err(e) = retransmit( + &bank_forks, + &leader_schedule_cache, + &cluster_info, + &r, + &sockets[s], + s as u32, + ) { + match e { + Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break, + Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (), + _ => { + inc_new_counter_error!("streamer-retransmit-error", 1, 1); + } + } } } - } - } - trace!("exiting retransmitter"); + trace!("exiting retransmitter"); + }) + .unwrap() }) - .unwrap() + .collect() } pub struct RetransmitStage { @@ -119,7 +193,7 @@ impl RetransmitStage { leader_schedule_cache: &Arc, blocktree: Arc, cluster_info: &Arc>, - retransmit_socket: Arc, + retransmit_sockets: Arc>, repair_socket: Arc, fetch_stage_receiver: PacketReceiver, exit: &Arc, @@ -128,8 +202,9 @@ impl RetransmitStage { ) -> Self { let (retransmit_sender, retransmit_receiver) = channel(); + let retransmit_receiver = Arc::new(Mutex::new(retransmit_receiver)); let t_retransmit = retransmitter( - retransmit_socket, + retransmit_sockets, bank_forks.clone(), leader_schedule_cache, cluster_info.clone(), @@ -162,7 +237,7 @@ impl RetransmitStage { }, ); - let thread_hdls = vec![t_retransmit]; + let thread_hdls = t_retransmit; Self { thread_hdls, window_service, @@ -181,3 +256,82 @@ impl Service for RetransmitStage { Ok(()) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::blocktree::create_new_tmp_ledger; + use crate::blocktree_processor::{process_blocktree, ProcessOptions}; + use crate::contact_info::ContactInfo; + use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo}; + use crate::packet::{Meta, Packet, Packets}; + use solana_netutil::find_available_port_in_range; + use solana_sdk::pubkey::Pubkey; + + #[test] + fn test_skip_repair() { + let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(123); + let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block); + let blocktree = Blocktree::open(&ledger_path).unwrap(); + let opts = ProcessOptions { + full_leader_cache: true, + ..ProcessOptions::default() + }; + let (bank_forks, _, cached_leader_schedule) = + process_blocktree(&genesis_block, &blocktree, None, opts).unwrap(); + let leader_schedule_cache = Arc::new(cached_leader_schedule); + let bank_forks = Arc::new(RwLock::new(bank_forks)); + + let mut me = ContactInfo::new_localhost(&Pubkey::new_rand(), 0); + let port = find_available_port_in_range((8000, 10000)).unwrap(); + let me_retransmit = UdpSocket::bind(format!("127.0.0.1:{}", port)).unwrap(); + // need to make sure tvu and tpu are valid addresses + me.tvu_forwards = me_retransmit.local_addr().unwrap(); + let port = find_available_port_in_range((8000, 10000)).unwrap(); + me.tvu = UdpSocket::bind(format!("127.0.0.1:{}", port)) + .unwrap() + .local_addr() + .unwrap(); + + let other = ContactInfo::new_localhost(&Pubkey::new_rand(), 0); + let mut cluster_info = ClusterInfo::new_with_invalid_keypair(other); + cluster_info.insert_info(me); + + let retransmit_socket = Arc::new(vec![UdpSocket::bind("0.0.0.0:0").unwrap()]); + let cluster_info = Arc::new(RwLock::new(cluster_info)); + + let (retransmit_sender, retransmit_receiver) = channel(); + let t_retransmit = retransmitter( + retransmit_socket, + bank_forks, + &leader_schedule_cache, + cluster_info, + Arc::new(Mutex::new(retransmit_receiver)), + ); + let _thread_hdls = vec![t_retransmit]; + + let packets = Packets::new(vec![Packet::default()]); + // it should send this over the sockets. + retransmit_sender.send(packets).unwrap(); + let mut packets = Packets::new(vec![]); + packets.recv_from(&me_retransmit).unwrap(); + assert_eq!(packets.packets.len(), 1); + assert_eq!(packets.packets[0].meta.repair, false); + + let repair = Packet { + meta: Meta { + repair: true, + ..Meta::default() + }, + ..Packet::default() + }; + + // send 1 repair and 1 "regular" packet so that we don't block forever on the recv_from + let packets = Packets::new(vec![repair, Packet::default()]); + retransmit_sender.send(packets).unwrap(); + let mut packets = Packets::new(vec![]); + packets.recv_from(&me_retransmit).unwrap(); + assert_eq!(packets.packets.len(), 1); + assert_eq!(packets.packets[0].meta.repair, false); + } +} diff --git a/core/src/rpc.rs b/core/src/rpc.rs index 6b2f432454b46f..c05794479635a6 100644 --- a/core/src/rpc.rs +++ b/core/src/rpc.rs @@ -1,41 +1,48 @@ //! The `rpc` module implements the Solana RPC interface. -use crate::bank_forks::BankForks; -use crate::cluster_info::ClusterInfo; -use crate::contact_info::ContactInfo; -use crate::packet::PACKET_DATA_SIZE; -use crate::storage_stage::StorageState; -use crate::validator::ValidatorExit; -use crate::version::VERSION; +use crate::{ + bank_forks::BankForks, + cluster_info::ClusterInfo, + confidence::{BankConfidence, ForkConfidenceCache}, + contact_info::ContactInfo, + packet::PACKET_DATA_SIZE, + storage_stage::StorageState, + validator::ValidatorExit, + version::VERSION, +}; use bincode::{deserialize, serialize}; use jsonrpc_core::{Error, Metadata, Result}; use jsonrpc_derive::rpc; -use solana_client::rpc_request::RpcEpochInfo; +use solana_client::rpc_request::{RpcEpochInfo, RpcVoteAccountInfo, RpcVoteAccountStatus}; use solana_drone::drone::request_airdrop_transaction; use solana_runtime::bank::Bank; -use solana_sdk::account::Account; -use solana_sdk::fee_calculator::FeeCalculator; -use solana_sdk::hash::Hash; -use solana_sdk::inflation::Inflation; -use solana_sdk::pubkey::Pubkey; -use solana_sdk::signature::Signature; -use solana_sdk::transaction::{self, Transaction}; +use solana_sdk::{ + account::Account, + fee_calculator::FeeCalculator, + hash::Hash, + inflation::Inflation, + pubkey::Pubkey, + signature::Signature, + transaction::{self, Transaction}, +}; use solana_vote_api::vote_state::{VoteState, MAX_LOCKOUT_HISTORY}; -use std::net::{SocketAddr, UdpSocket}; -use std::sync::{Arc, RwLock}; -use std::thread::sleep; -use std::time::{Duration, Instant}; +use std::{ + net::{SocketAddr, UdpSocket}, + sync::{Arc, RwLock}, + thread::sleep, + time::{Duration, Instant}, +}; #[derive(Debug, Clone)] pub struct JsonRpcConfig { - pub enable_fullnode_exit: bool, // Enable the 'fullnodeExit' command + pub enable_validator_exit: bool, // Enable the 'validatorExit' command pub drone_addr: Option, } impl Default for JsonRpcConfig { fn default() -> Self { Self { - enable_fullnode_exit: false, + enable_validator_exit: false, drone_addr: None, } } @@ -44,6 +51,7 @@ impl Default for JsonRpcConfig { #[derive(Clone)] pub struct JsonRpcRequestProcessor { bank_forks: Arc>, + fork_confidence_cache: Arc>, storage_state: StorageState, config: JsonRpcConfig, validator_exit: Arc>>, @@ -58,10 +66,12 @@ impl JsonRpcRequestProcessor { storage_state: StorageState, config: JsonRpcConfig, bank_forks: Arc>, + fork_confidence_cache: Arc>, validator_exit: &Arc>>, ) -> Self { JsonRpcRequestProcessor { bank_forks, + fork_confidence_cache, storage_state, config, validator_exit: validator_exit.clone(), @@ -100,6 +110,14 @@ impl JsonRpcRequestProcessor { (blockhash.to_string(), fee_calculator) } + fn get_block_confidence(&self, block: u64) -> (Option, u64) { + let r_fork_confidence = self.fork_confidence_cache.read().unwrap(); + ( + r_fork_confidence.get_fork_confidence(block).cloned(), + r_fork_confidence.total_stake(), + ) + } + pub fn get_signature_status(&self, signature: Signature) -> Option> { self.get_signature_confirmation_status(signature) .map(|x| x.1) @@ -159,6 +177,7 @@ impl JsonRpcRequestProcessor { node_pubkey: vote_state.node_pubkey.to_string(), activated_stake: *activated_stake, commission: vote_state.commission, + root_slot: vote_state.root_slot.unwrap_or(0), epoch_vote_account, last_vote, } @@ -197,15 +216,15 @@ impl JsonRpcRequestProcessor { .get_pubkeys_for_slot(slot, &self.bank_forks)) } - pub fn fullnode_exit(&self) -> Result { - if self.config.enable_fullnode_exit { - warn!("fullnode_exit request..."); + pub fn validator_exit(&self) -> Result { + if self.config.enable_validator_exit { + warn!("validator_exit request..."); if let Some(x) = self.validator_exit.write().unwrap().take() { x.exit() } Ok(true) } else { - debug!("fullnode_exit ignored"); + debug!("validator_exit ignored"); Ok(false) } } @@ -243,34 +262,6 @@ pub struct RpcContactInfo { /// JSON RPC port pub rpc: Option, } -#[derive(Serialize, Deserialize, Clone, Debug)] -#[serde(rename_all = "camelCase")] -pub struct RpcVoteAccountStatus { - pub current: Vec, - pub delinquent: Vec, -} - -#[derive(Serialize, Deserialize, Clone, Debug)] -#[serde(rename_all = "camelCase")] -pub struct RpcVoteAccountInfo { - /// Vote account pubkey as base-58 encoded string - pub vote_pubkey: String, - - /// The pubkey of the node that votes using this account - pub node_pubkey: String, - - /// The current stake, in lamports, delegated to this vote account - pub activated_stake: u64, - - /// An 8-bit integer used as a fraction (commission/MAX_U8) for rewards payout - pub commission: u8, - - /// Whether this account is staked for the current epoch - pub epoch_vote_account: bool, - - /// Most recent slot voted on by this vote account - pub last_vote: u64, -} #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(rename_all = "kebab-case")] @@ -307,6 +298,13 @@ pub trait RpcSol { #[rpc(meta, name = "getEpochInfo")] fn get_epoch_info(&self, _: Self::Metadata) -> Result; + #[rpc(meta, name = "getBlockConfidence")] + fn get_block_confidence( + &self, + _: Self::Metadata, + _: u64, + ) -> Result<(Option, u64)>; + #[rpc(meta, name = "getGenesisBlockhash")] fn get_genesis_blockhash(&self, _: Self::Metadata) -> Result; @@ -356,8 +354,8 @@ pub trait RpcSol { #[rpc(meta, name = "getStoragePubkeysForSlot")] fn get_storage_pubkeys_for_slot(&self, _: Self::Metadata, _: u64) -> Result>; - #[rpc(meta, name = "fullnodeExit")] - fn fullnode_exit(&self, _: Self::Metadata) -> Result; + #[rpc(meta, name = "validatorExit")] + fn validator_exit(&self, _: Self::Metadata) -> Result; #[rpc(meta, name = "getNumBlocksSinceSignatureConfirmation")] fn get_num_blocks_since_signature_confirmation( @@ -487,6 +485,18 @@ impl RpcSol for RpcSolImpl { }) } + fn get_block_confidence( + &self, + meta: Self::Metadata, + block: u64, + ) -> Result<(Option, u64)> { + Ok(meta + .request_processor + .read() + .unwrap() + .get_block_confidence(block)) + } + fn get_genesis_blockhash(&self, meta: Self::Metadata) -> Result { debug!("get_genesis_blockhash rpc request received"); Ok(meta.genesis_blockhash.to_string()) @@ -689,8 +699,8 @@ impl RpcSol for RpcSolImpl { .get_storage_pubkeys_for_slot(slot) } - fn fullnode_exit(&self, meta: Self::Metadata) -> Result { - meta.request_processor.read().unwrap().fullnode_exit() + fn validator_exit(&self, meta: Self::Metadata) -> Result { + meta.request_processor.read().unwrap().validator_exit() } fn get_version(&self, _: Self::Metadata) -> Result { @@ -708,25 +718,49 @@ impl RpcSol for RpcSolImpl { #[cfg(test)] pub mod tests { use super::*; - use crate::contact_info::ContactInfo; - use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo}; + use crate::{ + contact_info::ContactInfo, + genesis_utils::{create_genesis_block, GenesisBlockInfo}, + }; use jsonrpc_core::{MetaIoHandler, Output, Response, Value}; - use solana_sdk::fee_calculator::DEFAULT_BURN_PERCENT; - use solana_sdk::hash::{hash, Hash}; - use solana_sdk::instruction::InstructionError; - use solana_sdk::signature::{Keypair, KeypairUtil}; - use solana_sdk::system_transaction; - use solana_sdk::transaction::TransactionError; - use std::sync::atomic::{AtomicBool, Ordering}; - use std::thread; + use solana_sdk::{ + fee_calculator::DEFAULT_BURN_PERCENT, + hash::{hash, Hash}, + instruction::InstructionError, + signature::{Keypair, KeypairUtil}, + system_transaction, + transaction::TransactionError, + }; + use std::{ + collections::HashMap, + sync::atomic::{AtomicBool, Ordering}, + thread, + }; const TEST_MINT_LAMPORTS: u64 = 10_000; - fn start_rpc_handler_with_tx( - pubkey: &Pubkey, - ) -> (MetaIoHandler, Meta, Arc, Hash, Keypair, Pubkey) { + struct RpcHandler { + io: MetaIoHandler, + meta: Meta, + bank: Arc, + blockhash: Hash, + alice: Keypair, + leader_pubkey: Pubkey, + fork_confidence_cache: Arc>, + } + + fn start_rpc_handler_with_tx(pubkey: &Pubkey) -> RpcHandler { let (bank_forks, alice) = new_bank_forks(); let bank = bank_forks.read().unwrap().working_bank(); + + let confidence_slot0 = BankConfidence::new([8; MAX_LOCKOUT_HISTORY]); + let confidence_slot1 = BankConfidence::new([9; MAX_LOCKOUT_HISTORY]); + let mut bank_confidence: HashMap = HashMap::new(); + bank_confidence.entry(0).or_insert(confidence_slot0.clone()); + bank_confidence.entry(1).or_insert(confidence_slot1.clone()); + let fork_confidence_cache = + Arc::new(RwLock::new(ForkConfidenceCache::new(bank_confidence, 42))); + let leader_pubkey = *bank.collector_id(); let exit = Arc::new(AtomicBool::new(false)); let validator_exit = create_validator_exit(&exit); @@ -742,6 +776,7 @@ pub mod tests { StorageState::default(), JsonRpcConfig::default(), bank_forks, + fork_confidence_cache.clone(), &validator_exit, ))); let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair( @@ -764,7 +799,15 @@ pub mod tests { cluster_info, genesis_blockhash: Hash::default(), }; - (io, meta, bank, blockhash, alice, leader_pubkey) + RpcHandler { + io, + meta, + bank, + blockhash, + alice, + leader_pubkey, + fork_confidence_cache, + } } #[test] @@ -774,10 +817,12 @@ pub mod tests { let validator_exit = create_validator_exit(&exit); let (bank_forks, alice) = new_bank_forks(); let bank = bank_forks.read().unwrap().working_bank(); + let fork_confidence_cache = Arc::new(RwLock::new(ForkConfidenceCache::default())); let request_processor = JsonRpcRequestProcessor::new( StorageState::default(), JsonRpcConfig::default(), bank_forks, + fork_confidence_cache, &validator_exit, ); thread::spawn(move || { @@ -793,8 +838,7 @@ pub mod tests { #[test] fn test_rpc_get_balance() { let bob_pubkey = Pubkey::new_rand(); - let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) = - start_rpc_handler_with_tx(&bob_pubkey); + let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#, @@ -812,8 +856,12 @@ pub mod tests { #[test] fn test_rpc_get_cluster_nodes() { let bob_pubkey = Pubkey::new_rand(); - let (io, meta, _bank, _blockhash, _alice, leader_pubkey) = - start_rpc_handler_with_tx(&bob_pubkey); + let RpcHandler { + io, + meta, + leader_pubkey, + .. + } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getClusterNodes"}}"#); let res = io.handle_request_sync(&req, meta); @@ -833,8 +881,12 @@ pub mod tests { #[test] fn test_rpc_get_slot_leader() { let bob_pubkey = Pubkey::new_rand(); - let (io, meta, _bank, _blockhash, _alice, leader_pubkey) = - start_rpc_handler_with_tx(&bob_pubkey); + let RpcHandler { + io, + meta, + leader_pubkey, + .. + } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeader"}}"#); let res = io.handle_request_sync(&req, meta); @@ -849,8 +901,7 @@ pub mod tests { #[test] fn test_rpc_get_tx_count() { let bob_pubkey = Pubkey::new_rand(); - let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) = - start_rpc_handler_with_tx(&bob_pubkey); + let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}}"#); let res = io.handle_request_sync(&req, meta); @@ -865,8 +916,7 @@ pub mod tests { #[test] fn test_rpc_get_total_supply() { let bob_pubkey = Pubkey::new_rand(); - let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) = - start_rpc_handler_with_tx(&bob_pubkey); + let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTotalSupply"}}"#); let rep = io.handle_request_sync(&req, meta); @@ -892,8 +942,7 @@ pub mod tests { fn test_rpc_get_minimum_balance_for_rent_exemption() { let bob_pubkey = Pubkey::new_rand(); let data_len = 50; - let (io, meta, bank, _blockhash, _alice, _leader_pubkey) = - start_rpc_handler_with_tx(&bob_pubkey); + let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getMinimumBalanceForRentExemption","params":[{}]}}"#, @@ -924,8 +973,7 @@ pub mod tests { #[test] fn test_rpc_get_inflation() { let bob_pubkey = Pubkey::new_rand(); - let (io, meta, bank, _blockhash, _alice, _leader_pubkey) = - start_rpc_handler_with_tx(&bob_pubkey); + let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getInflation"}}"#); let rep = io.handle_request_sync(&req, meta); @@ -946,8 +994,7 @@ pub mod tests { #[test] fn test_rpc_get_account_info() { let bob_pubkey = Pubkey::new_rand(); - let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) = - start_rpc_handler_with_tx(&bob_pubkey); + let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}"]}}"#, @@ -975,8 +1022,13 @@ pub mod tests { #[test] fn test_rpc_get_program_accounts() { let bob = Keypair::new(); - let (io, meta, bank, blockhash, _alice, _leader_pubkey) = - start_rpc_handler_with_tx(&bob.pubkey()); + let RpcHandler { + io, + meta, + bank, + blockhash, + .. + } = start_rpc_handler_with_tx(&bob.pubkey()); let new_program_id = Pubkey::new_rand(); let tx = system_transaction::assign(&bob, blockhash, &new_program_id); @@ -1011,8 +1063,13 @@ pub mod tests { #[test] fn test_rpc_confirm_tx() { let bob_pubkey = Pubkey::new_rand(); - let (io, meta, _bank, blockhash, alice, _leader_pubkey) = - start_rpc_handler_with_tx(&bob_pubkey); + let RpcHandler { + io, + meta, + blockhash, + alice, + .. + } = start_rpc_handler_with_tx(&bob_pubkey); let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash); let req = format!( @@ -1031,8 +1088,13 @@ pub mod tests { #[test] fn test_rpc_get_signature_status() { let bob_pubkey = Pubkey::new_rand(); - let (io, meta, _bank, blockhash, alice, _leader_pubkey) = - start_rpc_handler_with_tx(&bob_pubkey); + let RpcHandler { + io, + meta, + blockhash, + alice, + .. + } = start_rpc_handler_with_tx(&bob_pubkey); let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash); let req = format!( @@ -1096,8 +1158,12 @@ pub mod tests { #[test] fn test_rpc_get_recent_blockhash() { let bob_pubkey = Pubkey::new_rand(); - let (io, meta, _bank, blockhash, _alice, _leader_pubkey) = - start_rpc_handler_with_tx(&bob_pubkey); + let RpcHandler { + io, + meta, + blockhash, + .. + } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getRecentBlockhash"}}"#); let res = io.handle_request_sync(&req, meta); @@ -1123,8 +1189,7 @@ pub mod tests { #[test] fn test_rpc_fail_request_airdrop() { let bob_pubkey = Pubkey::new_rand(); - let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) = - start_rpc_handler_with_tx(&bob_pubkey); + let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); // Expect internal error because no drone is available let req = format!( @@ -1145,6 +1210,7 @@ pub mod tests { fn test_rpc_send_bad_tx() { let exit = Arc::new(AtomicBool::new(false)); let validator_exit = create_validator_exit(&exit); + let fork_confidence_cache = Arc::new(RwLock::new(ForkConfidenceCache::default())); let mut io = MetaIoHandler::default(); let rpc = RpcSolImpl; @@ -1155,6 +1221,7 @@ pub mod tests { StorageState::default(), JsonRpcConfig::default(), new_bank_forks().0, + fork_confidence_cache, &validator_exit, ); Arc::new(RwLock::new(request_processor)) @@ -1238,39 +1305,43 @@ pub mod tests { } #[test] - fn test_rpc_request_processor_config_default_trait_fullnode_exit_fails() { + fn test_rpc_request_processor_config_default_trait_validator_exit_fails() { let exit = Arc::new(AtomicBool::new(false)); let validator_exit = create_validator_exit(&exit); + let fork_confidence_cache = Arc::new(RwLock::new(ForkConfidenceCache::default())); let request_processor = JsonRpcRequestProcessor::new( StorageState::default(), JsonRpcConfig::default(), new_bank_forks().0, + fork_confidence_cache, &validator_exit, ); - assert_eq!(request_processor.fullnode_exit(), Ok(false)); + assert_eq!(request_processor.validator_exit(), Ok(false)); assert_eq!(exit.load(Ordering::Relaxed), false); } #[test] - fn test_rpc_request_processor_allow_fullnode_exit_config() { + fn test_rpc_request_processor_allow_validator_exit_config() { let exit = Arc::new(AtomicBool::new(false)); let validator_exit = create_validator_exit(&exit); + let fork_confidence_cache = Arc::new(RwLock::new(ForkConfidenceCache::default())); let mut config = JsonRpcConfig::default(); - config.enable_fullnode_exit = true; + config.enable_validator_exit = true; let request_processor = JsonRpcRequestProcessor::new( StorageState::default(), config, new_bank_forks().0, + fork_confidence_cache, &validator_exit, ); - assert_eq!(request_processor.fullnode_exit(), Ok(true)); + assert_eq!(request_processor.validator_exit(), Ok(true)); assert_eq!(exit.load(Ordering::Relaxed), true); } #[test] fn test_rpc_get_version() { let bob_pubkey = Pubkey::new_rand(); - let (io, meta, ..) = start_rpc_handler_with_tx(&bob_pubkey); + let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getVersion"}}"#); let res = io.handle_request_sync(&req, meta); @@ -1287,4 +1358,90 @@ pub mod tests { .expect("actual response deserialization"); assert_eq!(expected, result); } + + #[test] + fn test_rpc_processor_get_block_confidence() { + let exit = Arc::new(AtomicBool::new(false)); + let validator_exit = create_validator_exit(&exit); + let confidence_slot0 = BankConfidence::new([8; MAX_LOCKOUT_HISTORY]); + let confidence_slot1 = BankConfidence::new([9; MAX_LOCKOUT_HISTORY]); + let mut bank_confidence: HashMap = HashMap::new(); + bank_confidence.entry(0).or_insert(confidence_slot0.clone()); + bank_confidence.entry(1).or_insert(confidence_slot1.clone()); + let fork_confidence_cache = + Arc::new(RwLock::new(ForkConfidenceCache::new(bank_confidence, 42))); + + let mut config = JsonRpcConfig::default(); + config.enable_validator_exit = true; + let request_processor = JsonRpcRequestProcessor::new( + StorageState::default(), + config, + new_bank_forks().0, + fork_confidence_cache, + &validator_exit, + ); + assert_eq!( + request_processor.get_block_confidence(0), + (Some(confidence_slot0), 42) + ); + assert_eq!( + request_processor.get_block_confidence(1), + (Some(confidence_slot1), 42) + ); + assert_eq!(request_processor.get_block_confidence(2), (None, 42)); + } + + #[test] + fn test_rpc_get_block_confidence() { + let bob_pubkey = Pubkey::new_rand(); + let RpcHandler { + io, + meta, + fork_confidence_cache, + .. + } = start_rpc_handler_with_tx(&bob_pubkey); + + let req = + format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockConfidence","params":[0]}}"#); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let (confidence, total_staked): (Option, u64) = + if let Response::Single(res) = result { + if let Output::Success(res) = res { + serde_json::from_value(res.result).unwrap() + } else { + panic!("Expected success"); + } + } else { + panic!("Expected single response"); + }; + assert_eq!( + confidence, + fork_confidence_cache + .read() + .unwrap() + .get_fork_confidence(0) + .cloned() + ); + assert_eq!(total_staked, 42); + + let req = + format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockConfidence","params":[2]}}"#); + let res = io.handle_request_sync(&req, meta); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let (confidence, total_staked): (Option, u64) = + if let Response::Single(res) = result { + if let Output::Success(res) = res { + serde_json::from_value(res.result).unwrap() + } else { + panic!("Expected success"); + } + } else { + panic!("Expected single response"); + }; + assert_eq!(confidence, None); + assert_eq!(total_staked, 42); + } } diff --git a/core/src/rpc_service.rs b/core/src/rpc_service.rs index d2f40b72631890..69bc6e4ab9faca 100644 --- a/core/src/rpc_service.rs +++ b/core/src/rpc_service.rs @@ -1,23 +1,22 @@ //! The `rpc_service` module implements the Solana JSON RPC service. -use crate::bank_forks::BankForks; -use crate::cluster_info::ClusterInfo; -use crate::rpc::*; -use crate::service::Service; -use crate::storage_stage::StorageState; -use crate::validator::ValidatorExit; +use crate::{ + bank_forks::BankForks, cluster_info::ClusterInfo, confidence::ForkConfidenceCache, rpc::*, + service::Service, storage_stage::StorageState, validator::ValidatorExit, +}; use jsonrpc_core::MetaIoHandler; -use jsonrpc_http_server::CloseHandle; use jsonrpc_http_server::{ - hyper, AccessControlAllowOrigin, DomainsValidation, RequestMiddleware, RequestMiddlewareAction, - ServerBuilder, + hyper, AccessControlAllowOrigin, CloseHandle, DomainsValidation, RequestMiddleware, + RequestMiddlewareAction, ServerBuilder, }; use solana_sdk::hash::Hash; -use std::net::SocketAddr; -use std::path::{Path, PathBuf}; -use std::sync::mpsc::channel; -use std::sync::{Arc, RwLock}; -use std::thread::{self, Builder, JoinHandle}; +use std::{ + net::SocketAddr, + path::{Path, PathBuf}, + sync::mpsc::channel, + sync::{Arc, RwLock}, + thread::{self, Builder, JoinHandle}, +}; use tokio::prelude::Future; pub struct JsonRpcService { @@ -91,6 +90,7 @@ impl JsonRpcService { storage_state: StorageState, config: JsonRpcConfig, bank_forks: Arc>, + fork_confidence_cache: Arc>, ledger_path: &Path, genesis_blockhash: Hash, validator_exit: &Arc>>, @@ -101,6 +101,7 @@ impl JsonRpcService { storage_state, config, bank_forks, + fork_confidence_cache, validator_exit, ))); let request_processor_ = request_processor.clone(); @@ -197,12 +198,14 @@ mod tests { solana_netutil::find_available_port_in_range((10000, 65535)).unwrap(), ); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank.slot(), bank))); + let fork_confidence_cache = Arc::new(RwLock::new(ForkConfidenceCache::default())); let mut rpc_service = JsonRpcService::new( &cluster_info, rpc_addr, StorageState::default(), JsonRpcConfig::default(), bank_forks, + fork_confidence_cache, &PathBuf::from("farf"), Hash::default(), &validator_exit, diff --git a/core/src/sendmmsg.rs b/core/src/sendmmsg.rs new file mode 100644 index 00000000000000..e43a4adc1c9c5f --- /dev/null +++ b/core/src/sendmmsg.rs @@ -0,0 +1,248 @@ +//! The `sendmmsg` module provides sendmmsg() API implementation + +use std::io; +use std::net::{SocketAddr, UdpSocket}; + +#[cfg(not(target_os = "linux"))] +pub fn send_mmsg(sock: &UdpSocket, packets: &mut [(Vec, &SocketAddr)]) -> io::Result { + let count = packets.len(); + for (p, a) in packets { + sock.send_to(p, *a)?; + } + + Ok(count) +} + +#[cfg(target_os = "linux")] +use libc::{iovec, mmsghdr, sockaddr_in, sockaddr_in6}; + +#[cfg(target_os = "linux")] +fn mmsghdr_for_packet( + packet: &mut [u8], + dest: &SocketAddr, + index: usize, + addr_in_len: u32, + addr_in6_len: u32, + iovs: &mut Vec, + addr_in: &mut Vec, + addr_in6: &mut Vec, +) -> mmsghdr { + use libc::c_void; + use nix::sys::socket::InetAddr; + use std::mem; + + iovs.push(iovec { + iov_base: packet.as_mut_ptr() as *mut c_void, + iov_len: packet.len(), + }); + + let mut hdr: mmsghdr = unsafe { mem::zeroed() }; + hdr.msg_hdr.msg_iov = &mut iovs[index]; + hdr.msg_hdr.msg_iovlen = 1; + hdr.msg_len = packet.len() as u32; + + match InetAddr::from_std(dest) { + InetAddr::V4(addr) => { + addr_in.insert(index, addr); + hdr.msg_hdr.msg_name = &mut addr_in[index] as *mut _ as *mut _; + hdr.msg_hdr.msg_namelen = addr_in_len; + } + InetAddr::V6(addr) => { + addr_in6.insert(index, addr); + hdr.msg_hdr.msg_name = &mut addr_in6[index] as *mut _ as *mut _; + hdr.msg_hdr.msg_namelen = addr_in6_len; + } + }; + hdr +} + +#[cfg(target_os = "linux")] +pub fn send_mmsg(sock: &UdpSocket, packets: &mut [(Vec, &SocketAddr)]) -> io::Result { + use libc::{sendmmsg, socklen_t}; + use std::mem; + use std::os::unix::io::AsRawFd; + + // The vectors are allocated with capacity, as later code inserts elements + // at specific indices, and uses the address of the vector index in hdrs + let mut iovs: Vec = Vec::with_capacity(packets.len()); + let mut addr_in: Vec = Vec::with_capacity(packets.len()); + let mut addr_in6: Vec = Vec::with_capacity(packets.len()); + + let addr_in_len = mem::size_of_val(&addr_in) as socklen_t; + let addr_in6_len = mem::size_of_val(&addr_in6) as socklen_t; + let sock_fd = sock.as_raw_fd(); + + let mut hdrs: Vec = packets + .iter_mut() + .enumerate() + .map(|(i, (packet, dest))| { + mmsghdr_for_packet( + packet, + dest, + i, + addr_in_len as u32, + addr_in6_len as u32, + &mut iovs, + &mut addr_in, + &mut addr_in6, + ) + }) + .collect(); + + let npkts = match unsafe { sendmmsg(sock_fd, &mut hdrs[0], packets.len() as u32, 0) } { + -1 => return Err(io::Error::last_os_error()), + n => n as usize, + }; + Ok(npkts) +} + +#[cfg(not(target_os = "linux"))] +pub fn multicast(sock: &UdpSocket, packet: &mut [u8], dests: &[&SocketAddr]) -> io::Result { + let count = dests.len(); + for a in dests { + sock.send_to(packet, a)?; + } + + Ok(count) +} + +#[cfg(target_os = "linux")] +pub fn multicast(sock: &UdpSocket, packet: &mut [u8], dests: &[&SocketAddr]) -> io::Result { + use libc::{sendmmsg, socklen_t}; + use std::mem; + use std::os::unix::io::AsRawFd; + + // The vectors are allocated with capacity, as later code inserts elements + // at specific indices, and uses the address of the vector index in hdrs + let mut iovs: Vec = Vec::with_capacity(dests.len()); + let mut addr_in: Vec = Vec::with_capacity(dests.len()); + let mut addr_in6: Vec = Vec::with_capacity(dests.len()); + + let addr_in_len = mem::size_of_val(&addr_in) as socklen_t; + let addr_in6_len = mem::size_of_val(&addr_in6) as socklen_t; + let sock_fd = sock.as_raw_fd(); + + let mut hdrs: Vec = dests + .iter() + .enumerate() + .map(|(i, dest)| { + mmsghdr_for_packet( + packet, + dest, + i, + addr_in_len as u32, + addr_in6_len as u32, + &mut iovs, + &mut addr_in, + &mut addr_in6, + ) + }) + .collect(); + + let npkts = match unsafe { sendmmsg(sock_fd, &mut hdrs[0], dests.len() as u32, 0) } { + -1 => return Err(io::Error::last_os_error()), + n => n as usize, + }; + Ok(npkts) +} + +#[cfg(test)] +mod tests { + use crate::packet::Packet; + use crate::recvmmsg::recv_mmsg; + use crate::sendmmsg::{multicast, send_mmsg}; + use solana_sdk::packet::PACKET_DATA_SIZE; + use std::net::UdpSocket; + + #[test] + pub fn test_send_mmsg_one_dest() { + let reader = UdpSocket::bind("127.0.0.1:0").expect("bind"); + let addr = reader.local_addr().unwrap(); + let sender = UdpSocket::bind("127.0.0.1:0").expect("bind"); + + let mut packets: Vec<_> = (0..32) + .map(|_| (vec![0u8; PACKET_DATA_SIZE], &addr)) + .collect(); + + let sent = send_mmsg(&sender, &mut packets); + assert_matches!(sent, Ok(32)); + + let mut packets = vec![Packet::default(); 32]; + let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; + assert_eq!(32, recv); + } + + #[test] + pub fn test_send_mmsg_multi_dest() { + let reader = UdpSocket::bind("127.0.0.1:0").expect("bind"); + let addr = reader.local_addr().unwrap(); + + let reader2 = UdpSocket::bind("127.0.0.1:0").expect("bind"); + let addr2 = reader2.local_addr().unwrap(); + + let sender = UdpSocket::bind("127.0.0.1:0").expect("bind"); + + let mut packets: Vec<_> = (0..32) + .map(|i| { + if i < 16 { + (vec![0u8; PACKET_DATA_SIZE], &addr) + } else { + (vec![0u8; PACKET_DATA_SIZE], &addr2) + } + }) + .collect(); + + let sent = send_mmsg(&sender, &mut packets); + assert_matches!(sent, Ok(32)); + + let mut packets = vec![Packet::default(); 32]; + let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; + assert_eq!(16, recv); + + let mut packets = vec![Packet::default(); 32]; + let recv = recv_mmsg(&reader2, &mut packets[..]).unwrap().1; + assert_eq!(16, recv); + } + + #[test] + pub fn test_multicast_msg() { + let reader = UdpSocket::bind("127.0.0.1:0").expect("bind"); + let addr = reader.local_addr().unwrap(); + + let reader2 = UdpSocket::bind("127.0.0.1:0").expect("bind"); + let addr2 = reader2.local_addr().unwrap(); + + let reader3 = UdpSocket::bind("127.0.0.1:0").expect("bind"); + let addr3 = reader3.local_addr().unwrap(); + + let reader4 = UdpSocket::bind("127.0.0.1:0").expect("bind"); + let addr4 = reader4.local_addr().unwrap(); + + let sender = UdpSocket::bind("127.0.0.1:0").expect("bind"); + + let mut packet = Packet::default(); + + let sent = multicast( + &sender, + &mut packet.data[..packet.meta.size], + &[&addr, &addr2, &addr3, &addr4], + ); + assert_matches!(sent, Ok(4)); + + let mut packets = vec![Packet::default(); 32]; + let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; + assert_eq!(1, recv); + + let mut packets = vec![Packet::default(); 32]; + let recv = recv_mmsg(&reader2, &mut packets[..]).unwrap().1; + assert_eq!(1, recv); + + let mut packets = vec![Packet::default(); 32]; + let recv = recv_mmsg(&reader3, &mut packets[..]).unwrap().1; + assert_eq!(1, recv); + + let mut packets = vec![Packet::default(); 32]; + let recv = recv_mmsg(&reader4, &mut packets[..]).unwrap().1; + assert_eq!(1, recv); + } +} diff --git a/core/src/shred.rs b/core/src/shred.rs index 179572da12a55f..35e7d45d9fb6ca 100644 --- a/core/src/shred.rs +++ b/core/src/shred.rs @@ -1,29 +1,38 @@ //! The `shred` module defines data structures and methods to pull MTU sized data frames from the network. +use crate::blocktree::BlocktreeError; +use crate::entry::create_ticks; +use crate::entry::Entry; use crate::erasure::Session; use crate::result; use crate::result::Error; use bincode::serialized_size; use core::cell::RefCell; use lazy_static::lazy_static; -use rayon::iter::{IntoParallelRefMutIterator, ParallelIterator}; +use rayon::iter::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; +use rayon::slice::ParallelSlice; use rayon::ThreadPool; use serde::{Deserialize, Serialize}; use solana_rayon_threadlimit::get_thread_count; +use solana_sdk::hash::Hash; use solana_sdk::packet::PACKET_DATA_SIZE; use solana_sdk::pubkey::Pubkey; use solana_sdk::signature::{Keypair, KeypairUtil, Signature}; use std::io; -use std::io::{Error as IOError, ErrorKind, Write}; +use std::io::{Error as IOError, ErrorKind}; use std::sync::Arc; +use std::time::Instant; lazy_static! { - static ref SIZE_OF_CODING_SHRED_HEADER: usize = + pub static ref SIZE_OF_CODING_SHRED_HEADER: usize = { serialized_size(&CodingShredHeader::default()).unwrap() as usize }; - static ref SIZE_OF_DATA_SHRED_HEADER: usize = + pub static ref SIZE_OF_DATA_SHRED_HEADER: usize = { serialized_size(&DataShredHeader::default()).unwrap() as usize }; + pub static ref SIZE_OF_SHRED_HEADER: usize = + { serialized_size(&ShredHeader::default()).unwrap() as usize }; static ref SIZE_OF_SIGNATURE: usize = { bincode::serialized_size(&Signature::default()).unwrap() as usize }; - pub static ref SIZE_OF_SHRED_TYPE: usize = { bincode::serialized_size(&0u8).unwrap() as usize }; + pub static ref SIZE_OF_SHRED_TYPE: usize = + { bincode::serialized_size(&ShredType(DATA_SHRED)).unwrap() as usize }; } thread_local!(static PAR_THREAD_POOL: RefCell = RefCell::new(rayon::ThreadPoolBuilder::new() @@ -37,7 +46,7 @@ pub const CODING_SHRED: u8 = 0b0101_1010; /// This limit comes from reed solomon library, but unfortunately they don't have /// a public constant defined for it. -const MAX_DATA_SHREDS_PER_FEC_BLOCK: u32 = 16; +pub const MAX_DATA_SHREDS_PER_FEC_BLOCK: u32 = 16; /// Based on rse benchmarks, the optimal erasure config uses 16 data shreds and 4 coding shreds pub const RECOMMENDED_FEC_RATE: f32 = 0.25; @@ -45,7 +54,10 @@ pub const RECOMMENDED_FEC_RATE: f32 = 0.25; const LAST_SHRED_IN_SLOT: u8 = 0b0000_0001; const DATA_COMPLETE_SHRED: u8 = 0b0000_0010; -/// A common header that is present at start of every shred +#[derive(Serialize, Clone, Deserialize, PartialEq, Debug)] +pub struct ShredType(u8); + +/// A common header that is present in data and code shred headers #[derive(Serialize, Clone, Deserialize, Default, PartialEq, Debug)] pub struct ShredCommonHeader { pub signature: Signature, @@ -53,82 +65,115 @@ pub struct ShredCommonHeader { pub index: u32, } -/// A common header that is present at start of every data shred -#[derive(Serialize, Clone, Deserialize, PartialEq, Debug)] +/// The data shred header has parent offset and flags +#[derive(Serialize, Clone, Default, Deserialize, PartialEq, Debug)] pub struct DataShredHeader { - pub common_header: CodingShredHeader, - pub data_header: ShredCommonHeader, + pub common_header: ShredCommonHeader, pub parent_offset: u16, pub flags: u8, } /// The coding shred header has FEC information -#[derive(Serialize, Clone, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Clone, Default, Deserialize, PartialEq, Debug)] pub struct CodingShredHeader { - pub shred_type: u8, - pub coding_header: ShredCommonHeader, + pub common_header: ShredCommonHeader, pub num_data_shreds: u16, pub num_coding_shreds: u16, pub position: u16, } -impl Default for DataShredHeader { - fn default() -> Self { - DataShredHeader { - common_header: CodingShredHeader { - shred_type: DATA_SHRED, - ..CodingShredHeader::default() - }, - data_header: ShredCommonHeader::default(), - parent_offset: 0, - flags: 0, - } - } +/// A common header that is present at start of every shred +#[derive(Serialize, Clone, Deserialize, PartialEq, Debug)] +pub struct ShredHeader { + pub shred_type: ShredType, + pub coding_header: CodingShredHeader, + pub data_header: DataShredHeader, } -impl Default for CodingShredHeader { +impl Default for ShredHeader { fn default() -> Self { - CodingShredHeader { - shred_type: CODING_SHRED, - coding_header: ShredCommonHeader::default(), - num_data_shreds: 0, - num_coding_shreds: 0, - position: 0, + ShredHeader { + shred_type: ShredType(DATA_SHRED), + coding_header: CodingShredHeader::default(), + data_header: DataShredHeader::default(), } } } #[derive(Clone, Debug, PartialEq)] pub struct Shred { - pub headers: DataShredHeader, + pub headers: ShredHeader, pub payload: Vec, } impl Shred { - fn new(header: DataShredHeader, shred_buf: Vec) -> Self { + fn new(header: ShredHeader, shred_buf: Vec) -> Self { Shred { headers: header, payload: shred_buf, } } + pub fn new_from_data( + slot: u64, + index: u32, + parent_offset: u16, + data: Option<&[u8]>, + is_last_data: bool, + is_last_in_slot: bool, + ) -> Self { + let mut shred_buf = vec![0; PACKET_DATA_SIZE]; + let mut header = ShredHeader::default(); + header.data_header.common_header.slot = slot; + header.data_header.common_header.index = index; + header.data_header.parent_offset = parent_offset; + header.data_header.flags = 0; + + if is_last_data { + header.data_header.flags |= DATA_COMPLETE_SHRED + } + + if is_last_in_slot { + header.data_header.flags |= LAST_SHRED_IN_SLOT + } + + if let Some(data) = data { + bincode::serialize_into(&mut shred_buf[..*SIZE_OF_SHRED_HEADER], &header) + .expect("Failed to write header into shred buffer"); + shred_buf[*SIZE_OF_SHRED_HEADER..*SIZE_OF_SHRED_HEADER + data.len()] + .clone_from_slice(data); + } + + Self::new(header, shred_buf) + } + pub fn new_from_serialized_shred(shred_buf: Vec) -> result::Result { - let shred_type: u8 = bincode::deserialize(&shred_buf[..*SIZE_OF_SHRED_TYPE])?; - let header = if shred_type == CODING_SHRED { - let end = *SIZE_OF_CODING_SHRED_HEADER; - let mut header = DataShredHeader::default(); - header.common_header = bincode::deserialize(&shred_buf[..end])?; + let shred_type: ShredType = bincode::deserialize(&shred_buf[..*SIZE_OF_SHRED_TYPE])?; + let mut header = if shred_type == ShredType(CODING_SHRED) { + let start = *SIZE_OF_SHRED_TYPE; + let end = start + *SIZE_OF_CODING_SHRED_HEADER; + let mut header = ShredHeader::default(); + header.coding_header = bincode::deserialize(&shred_buf[start..end])?; + header + } else if shred_type == ShredType(DATA_SHRED) { + let start = *SIZE_OF_CODING_SHRED_HEADER + *SIZE_OF_SHRED_TYPE; + let end = start + *SIZE_OF_DATA_SHRED_HEADER; + let mut header = ShredHeader::default(); + header.data_header = bincode::deserialize(&shred_buf[start..end])?; header } else { - let end = *SIZE_OF_DATA_SHRED_HEADER; - bincode::deserialize(&shred_buf[..end])? + return Err(Error::BlocktreeError(BlocktreeError::InvalidShredData( + Box::new(bincode::ErrorKind::Custom("Invalid shred type".to_string())), + ))); }; + header.shred_type = shred_type; + Ok(Self::new(header, shred_buf)) } - pub fn new_empty_from_header(headers: DataShredHeader) -> Self { + pub fn new_empty_from_header(headers: ShredHeader) -> Self { let mut payload = vec![0; PACKET_DATA_SIZE]; - let mut wr = io::Cursor::new(&mut payload[..*SIZE_OF_DATA_SHRED_HEADER]); + let mut wr = io::Cursor::new(&mut payload[..*SIZE_OF_SHRED_HEADER]); bincode::serialize_into(&mut wr, &headers).expect("Failed to serialize shred"); Shred { headers, payload } } @@ -136,23 +181,23 @@ impl Shred { pub fn new_empty_data_shred() -> Self { let mut payload = vec![0; PACKET_DATA_SIZE]; payload[0] = DATA_SHRED; - let headers = DataShredHeader::default(); + let headers = ShredHeader::default(); Shred { headers, payload } } - fn header(&self) -> &ShredCommonHeader { + pub fn header(&self) -> &ShredCommonHeader { if self.is_data() { - &self.headers.data_header + &self.headers.data_header.common_header } else { - &self.headers.common_header.coding_header + &self.headers.coding_header.common_header } } pub fn header_mut(&mut self) -> &mut ShredCommonHeader { if self.is_data() { - &mut self.headers.data_header + &mut self.headers.data_header.common_header } else { - &mut self.headers.common_header.coding_header + &mut self.headers.coding_header.common_header } } @@ -162,7 +207,8 @@ impl Shred { pub fn parent(&self) -> u64 { if self.is_data() { - self.headers.data_header.slot - u64::from(self.headers.parent_offset) + self.headers.data_header.common_header.slot + - u64::from(self.headers.data_header.parent_offset) } else { std::u64::MAX } @@ -197,12 +243,12 @@ impl Shred { } pub fn is_data(&self) -> bool { - self.headers.common_header.shred_type == DATA_SHRED + self.headers.shred_type == ShredType(DATA_SHRED) } pub fn last_in_slot(&self) -> bool { if self.is_data() { - self.headers.flags & LAST_SHRED_IN_SLOT == LAST_SHRED_IN_SLOT + self.headers.data_header.flags & LAST_SHRED_IN_SLOT == LAST_SHRED_IN_SLOT } else { false } @@ -212,13 +258,13 @@ impl Shred { /// Use this only for test code which doesn't care about actual shred pub fn set_last_in_slot(&mut self) { if self.is_data() { - self.headers.flags |= LAST_SHRED_IN_SLOT + self.headers.data_header.flags |= LAST_SHRED_IN_SLOT } } pub fn data_complete(&self) -> bool { if self.is_data() { - self.headers.flags & DATA_COMPLETE_SHRED == DATA_COMPLETE_SHRED + self.headers.data_header.flags & DATA_COMPLETE_SHRED == DATA_COMPLETE_SHRED } else { false } @@ -226,7 +272,7 @@ impl Shred { pub fn coding_params(&self) -> Option<(u16, u16, u16)> { if !self.is_data() { - let header = &self.headers.common_header; + let header = &self.headers.coding_header; Some(( header.num_data_shreds, header.num_coding_shreds, @@ -239,7 +285,7 @@ impl Shred { pub fn verify(&self, pubkey: &Pubkey) -> bool { let signed_payload_offset = if self.is_data() { - *SIZE_OF_CODING_SHRED_HEADER + *SIZE_OF_CODING_SHRED_HEADER + *SIZE_OF_SHRED_TYPE } else { *SIZE_OF_SHRED_TYPE } + *SIZE_OF_SIGNATURE; @@ -251,50 +297,18 @@ impl Shred { #[derive(Debug)] pub struct Shredder { slot: u64, - pub index: u32, - fec_set_index: u32, - parent_offset: u16, + parent_slot: u64, fec_rate: f32, - signer: Arc, - pub shreds: Vec, - fec_set_shred_start: usize, - active_shred: Vec, - active_shred_header: DataShredHeader, - active_offset: usize, -} - -impl Write for Shredder { - fn write(&mut self, buf: &[u8]) -> io::Result { - let offset = self.active_offset + *SIZE_OF_DATA_SHRED_HEADER; - let slice_len = std::cmp::min(buf.len(), PACKET_DATA_SIZE - offset); - self.active_shred[offset..offset + slice_len].copy_from_slice(&buf[..slice_len]); - let capacity = PACKET_DATA_SIZE - offset - slice_len; - - if buf.len() > slice_len || capacity == 0 { - self.finalize_data_shred(); - } else { - self.active_offset += slice_len; - } - - if self.index - self.fec_set_index >= MAX_DATA_SHREDS_PER_FEC_BLOCK { - self.sign_unsigned_shreds_and_generate_codes(); - } - - Ok(slice_len) - } - - fn flush(&mut self) -> io::Result<()> { - unimplemented!() - } + keypair: Arc, + pub signing_coding_time: u128, } impl Shredder { pub fn new( slot: u64, - parent: u64, + parent_slot: u64, fec_rate: f32, - signer: &Arc, - index: u32, + keypair: Arc, ) -> result::Result { if fec_rate > 1.0 || fec_rate < 0.0 { Err(Error::IO(IOError::new( @@ -304,96 +318,120 @@ impl Shredder { fec_rate ), ))) - } else if slot < parent || slot - parent > u64::from(std::u16::MAX) { + } else if slot < parent_slot || slot - parent_slot > u64::from(std::u16::MAX) { Err(Error::IO(IOError::new( ErrorKind::Other, format!( "Current slot {:?} must be > Parent slot {:?}, but the difference must not be > {:?}", - slot, parent, std::u16::MAX + slot, parent_slot, std::u16::MAX ), ))) } else { - let mut header = DataShredHeader::default(); - header.data_header.slot = slot; - header.data_header.index = index; - header.parent_offset = (slot - parent) as u16; - let active_shred = vec![0; PACKET_DATA_SIZE]; Ok(Shredder { slot, - index, - fec_set_index: index, - parent_offset: (slot - parent) as u16, + parent_slot, fec_rate, - signer: signer.clone(), - shreds: vec![], - fec_set_shred_start: 0, - active_shred, - active_shred_header: header, - active_offset: 0, + keypair, + signing_coding_time: 0, }) } } - pub fn sign_shred(signer: &Arc, shred_info: &mut Shred, signature_offset: usize) { - let data_offset = signature_offset + *SIZE_OF_SIGNATURE; - let signature = signer.sign_message(&shred_info.payload[data_offset..]); - let serialized_signature = - bincode::serialize(&signature).expect("Failed to generate serialized signature"); - shred_info.payload[signature_offset..signature_offset + serialized_signature.len()] - .copy_from_slice(&serialized_signature); - shred_info.header_mut().signature = signature; - } + pub fn entries_to_shreds( + &self, + entries: &[Entry], + is_last_in_slot: bool, + next_shred_index: u32, + ) -> (Vec, Vec, u32) { + let now = Instant::now(); + let serialized_shreds = + bincode::serialize(entries).expect("Expect to serialize all entries"); + let serialize_time = now.elapsed().as_millis(); + + let no_header_size = PACKET_DATA_SIZE - *SIZE_OF_SHRED_HEADER; + let num_shreds = (serialized_shreds.len() + no_header_size - 1) / no_header_size; + let last_shred_index = next_shred_index + num_shreds as u32 - 1; + + // 1) Generate data shreds + let data_shreds: Vec = PAR_THREAD_POOL.with(|thread_pool| { + thread_pool.borrow().install(|| { + serialized_shreds + .par_chunks(no_header_size) + .enumerate() + .map(|(i, shred_data)| { + let shred_index = next_shred_index + i as u32; + + let (is_last_data, is_last_in_slot) = { + if shred_index == last_shred_index { + (true, is_last_in_slot) + } else { + (false, false) + } + }; + + let mut shred = Shred::new_from_data( + self.slot, + shred_index, + (self.slot - self.parent_slot) as u16, + Some(shred_data), + is_last_data, + is_last_in_slot, + ); + + Shredder::sign_shred( + &self.keypair, + &mut shred, + *SIZE_OF_CODING_SHRED_HEADER + *SIZE_OF_SHRED_TYPE, + ); + shred + }) + .collect() + }) + }); - fn sign_unsigned_shreds_and_generate_codes(&mut self) { - let signature_offset = *SIZE_OF_CODING_SHRED_HEADER; - let signer = self.signer.clone(); - PAR_THREAD_POOL.with(|thread_pool| { + // 2) Generate coding shreds + let mut coding_shreds: Vec<_> = PAR_THREAD_POOL.with(|thread_pool| { thread_pool.borrow().install(|| { - self.shreds[self.fec_set_shred_start..] - .par_iter_mut() - .for_each(|d| Self::sign_shred(&signer, d, signature_offset)); + data_shreds + .par_chunks(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize) + .flat_map(|shred_data_batch| { + Shredder::generate_coding_shreds(self.slot, self.fec_rate, shred_data_batch) + }) + .collect() }) }); - let unsigned_coding_shred_start = self.shreds.len(); - - if self.fec_rate > 0.0 { - self.generate_coding_shreds(); - let signature_offset = *SIZE_OF_SHRED_TYPE; - PAR_THREAD_POOL.with(|thread_pool| { - thread_pool.borrow().install(|| { - self.shreds[unsigned_coding_shred_start..] - .par_iter_mut() - .for_each(|d| Self::sign_shred(&signer, d, signature_offset)); - }) - }); - } else { - self.fec_set_index = self.index; - } - self.fec_set_shred_start = self.shreds.len(); - } - /// Finalize a data shred. Update the shred index for the next shred - fn finalize_data_shred(&mut self) { - self.active_offset = 0; - self.index += 1; + // 3) Sign coding shreds + PAR_THREAD_POOL.with(|thread_pool| { + thread_pool.borrow().install(|| { + coding_shreds.par_iter_mut().for_each(|mut coding_shred| { + Shredder::sign_shred(&self.keypair, &mut coding_shred, *SIZE_OF_SHRED_TYPE); + }) + }) + }); - // Swap header - let mut header = DataShredHeader::default(); - header.data_header.slot = self.slot; - header.data_header.index = self.index; - header.parent_offset = self.parent_offset; - std::mem::swap(&mut header, &mut self.active_shred_header); + let elapsed = now.elapsed().as_millis(); - // Swap shred buffer - let mut shred_buf = vec![0; PACKET_DATA_SIZE]; - std::mem::swap(&mut shred_buf, &mut self.active_shred); + datapoint_debug!( + "shredding-stats", + ("slot", self.slot as i64, i64), + ("num_data_shreds", data_shreds.len() as i64, i64), + ("num_coding_shreds", coding_shreds.len() as i64, i64), + ("signing_coding", (elapsed - serialize_time) as i64, i64), + ("serialzing", serialize_time as i64, i64), + ); - let mut wr = io::Cursor::new(&mut shred_buf[..*SIZE_OF_DATA_SHRED_HEADER]); - bincode::serialize_into(&mut wr, &header) - .expect("Failed to write header into shred buffer"); + (data_shreds, coding_shreds, last_shred_index + 1) + } - let shred = Shred::new(header, shred_buf); - self.shreds.push(shred); + pub fn sign_shred(signer: &Arc, shred_info: &mut Shred, signature_offset: usize) { + let data_offset = signature_offset + *SIZE_OF_SIGNATURE; + let signature = signer.sign_message(&shred_info.payload[data_offset..]); + let serialized_signature = + bincode::serialize(&signature).expect("Failed to generate serialized signature"); + shred_info.payload[signature_offset..signature_offset + serialized_signature.len()] + .copy_from_slice(&serialized_signature); + shred_info.header_mut().signature = signature; } pub fn new_coding_shred_header( @@ -402,30 +440,35 @@ impl Shredder { num_data: usize, num_code: usize, position: usize, - ) -> DataShredHeader { - let mut header = DataShredHeader::default(); - header.common_header.shred_type = CODING_SHRED; - header.common_header.coding_header.index = index; - header.common_header.coding_header.slot = slot; - header.common_header.num_coding_shreds = num_code as u16; - header.common_header.num_data_shreds = num_data as u16; - header.common_header.position = position as u16; + ) -> ShredHeader { + let mut header = ShredHeader::default(); + header.shred_type = ShredType(CODING_SHRED); + header.coding_header.common_header.index = index; + header.coding_header.common_header.slot = slot; + header.coding_header.num_coding_shreds = num_code as u16; + header.coding_header.num_data_shreds = num_data as u16; + header.coding_header.position = position as u16; header } /// Generates coding shreds for the data shreds in the current FEC set - fn generate_coding_shreds(&mut self) { - if self.fec_rate != 0.0 { - let num_data = (self.index - self.fec_set_index) as usize; + pub fn generate_coding_shreds( + slot: u64, + fec_rate: f32, + data_shred_batch: &[Shred], + ) -> Vec { + assert!(!data_shred_batch.is_empty()); + if fec_rate != 0.0 { + let num_data = data_shred_batch.len(); // always generate at least 1 coding shred even if the fec_rate doesn't allow it - let num_coding = 1.max((self.fec_rate * num_data as f32) as usize); + let num_coding = Self::calculate_num_coding_shreds(num_data as f32, fec_rate); let session = Session::new(num_data, num_coding).expect("Failed to create erasure session"); - let start_index = self.index - num_data as u32; + let start_index = data_shred_batch[0].header().index; // All information after coding shred field in a data shred is encoded - let coding_block_offset = *SIZE_OF_CODING_SHRED_HEADER; - let data_ptrs: Vec<_> = self.shreds[self.fec_set_shred_start..] + let coding_block_offset = *SIZE_OF_CODING_SHRED_HEADER + *SIZE_OF_SHRED_TYPE; + let data_ptrs: Vec<_> = data_shred_batch .iter() .map(|data| &data.payload[coding_block_offset..]) .collect(); @@ -434,7 +477,7 @@ impl Shredder { let mut coding_shreds = Vec::with_capacity(num_coding); (0..num_coding).for_each(|i| { let header = Self::new_coding_shred_header( - self.slot, + slot, start_index + i as u32, num_data, num_coding, @@ -456,43 +499,27 @@ impl Shredder { .expect("Failed in erasure encode"); // append to the shred list - coding_shreds.into_iter().enumerate().for_each(|(i, code)| { - let header = Self::new_coding_shred_header( - self.slot, - start_index + i as u32, - num_data, - num_coding, - i, - ); - self.shreds.push(Shred::new(header, code)); - }); - self.fec_set_index = self.index; - } - } - - /// Create the final data shred for the current FEC set or slot - /// If there's an active data shred, morph it into the final shred - /// If the current active data shred is first in slot, finalize it and create a new shred - fn make_final_data_shred(&mut self, last_in_slot: u8) { - if self.active_shred_header.data_header.index == 0 { - self.finalize_data_shred(); - } - self.active_shred_header.flags |= DATA_COMPLETE_SHRED; - if last_in_slot == LAST_SHRED_IN_SLOT { - self.active_shred_header.flags |= LAST_SHRED_IN_SLOT; + coding_shreds + .into_iter() + .enumerate() + .map(|(i, code)| { + let header = Self::new_coding_shred_header( + slot, + start_index + i as u32, + num_data, + num_coding, + i, + ); + Shred::new(header, code) + }) + .collect() + } else { + vec![] } - self.finalize_data_shred(); - self.sign_unsigned_shreds_and_generate_codes(); } - /// Finalize the current FEC block, and generate coding shreds - pub fn finalize_data(&mut self) { - self.make_final_data_shred(0); - } - - /// Finalize the current slot (i.e. add last slot shred) and generate coding shreds - pub fn finalize_slot(&mut self) { - self.make_final_data_shred(LAST_SHRED_IN_SLOT); + fn calculate_num_coding_shreds(num_data_shreds: f32, fec_rate: f32) -> usize { + 1.max((fec_rate * num_data_shreds) as usize) } fn fill_in_missing_shreds( @@ -534,8 +561,9 @@ impl Shredder { ) -> Result, reed_solomon_erasure::Error> { let mut recovered_data = vec![]; let fec_set_size = num_data + num_coding; + if num_coding > 0 && shreds.len() < fec_set_size { - let coding_block_offset = *SIZE_OF_CODING_SHRED_HEADER; + let coding_block_offset = *SIZE_OF_CODING_SHRED_HEADER + *SIZE_OF_SHRED_TYPE; // Let's try recovering missing shreds using erasure let mut present = &mut vec![true; fec_set_size]; @@ -644,7 +672,7 @@ impl Shredder { data_shred_bufs[..num_data] .iter() .flat_map(|data| { - let offset = *SIZE_OF_DATA_SHRED_HEADER; + let offset = *SIZE_OF_SHRED_HEADER; data[offset as usize..].iter() }) .cloned() @@ -652,9 +680,26 @@ impl Shredder { } } +pub fn max_ticks_per_n_shreds(num_shreds: u64) -> u64 { + let ticks = create_ticks(1, Hash::default()); + max_entries_per_n_shred(&ticks[0], num_shreds) +} + +pub fn max_entries_per_n_shred(entry: &Entry, num_shreds: u64) -> u64 { + let shred_data_size = (PACKET_DATA_SIZE - *SIZE_OF_SHRED_HEADER) as u64; + let vec_size = bincode::serialized_size(&vec![entry]).unwrap(); + let entry_size = bincode::serialized_size(entry).unwrap(); + let count_size = vec_size - entry_size; + + (shred_data_size * num_shreds - count_size) / entry_size +} + #[cfg(test)] -mod tests { +pub mod tests { use super::*; + use solana_sdk::system_transaction; + use std::collections::HashSet; + use std::convert::TryInto; fn verify_test_data_shred( shred: &Shred, @@ -663,6 +708,8 @@ mod tests { parent: u64, pk: &Pubkey, verify: bool, + is_last_in_slot: bool, + is_last_in_fec_set: bool, ) { assert_eq!(shred.payload.len(), PACKET_DATA_SIZE); assert!(shred.is_data()); @@ -670,6 +717,16 @@ mod tests { assert_eq!(shred.slot(), slot); assert_eq!(shred.parent(), parent); assert_eq!(verify, shred.verify(pk)); + if is_last_in_slot { + assert!(shred.last_in_slot()); + } else { + assert!(!shred.last_in_slot()); + } + if is_last_in_fec_set { + assert!(shred.data_complete()); + } else { + assert!(!shred.data_complete()); + } } fn verify_test_code_shred(shred: &Shred, index: u32, slot: u64, pk: &Pubkey, verify: bool) { @@ -686,154 +743,111 @@ mod tests { let slot = 0x123456789abcdef0; // Test that parent cannot be > current slot - assert_matches!(Shredder::new(slot, slot + 1, 1.001, &keypair, 0), Err(_)); + assert_matches!( + Shredder::new(slot, slot + 1, 1.001, keypair.clone()), + Err(_) + ); // Test that slot - parent cannot be > u16 MAX assert_matches!( - Shredder::new(slot, slot - 1 - 0xffff, 1.001, &keypair, 0), + Shredder::new(slot, slot - 1 - 0xffff, 1.001, keypair.clone()), Err(_) ); - let mut shredder = - Shredder::new(slot, slot - 5, 0.0, &keypair, 0).expect("Failed in creating shredder"); - - assert!(shredder.shreds.is_empty()); - assert_eq!(shredder.active_offset, 0); - - // Test0: Write some data to shred. Not enough to create a signed shred - let data: Vec = (0..25).collect(); - assert_eq!(shredder.write(&data).unwrap(), data.len()); - assert!(shredder.shreds.is_empty()); - assert_eq!(shredder.active_offset, 25); - - // Test1: Write some more data to shred. Not enough to create a signed shred - assert_eq!(shredder.write(&data).unwrap(), data.len()); - assert!(shredder.shreds.is_empty()); - assert_eq!(shredder.active_offset, 50); - - // Test2: Write enough data to create a shred (> PACKET_DATA_SIZE) - let data: Vec<_> = (0..PACKET_DATA_SIZE).collect(); - let data: Vec = data.iter().map(|x| *x as u8).collect(); - let offset = shredder.write(&data).unwrap(); - assert_ne!(offset, data.len()); - // Assert that we have atleast one signed shred - assert!(!shredder.shreds.is_empty()); - // Assert that the new active shred was not populated - assert_eq!(shredder.active_offset, 0); - - // Test3: Assert that the first shred in slot was created (since we gave a parent to shredder) - let shred = &shredder.shreds[0]; - // Test4: assert that it matches the original shred - // The shreds are not signed yet, as the data is not finalized - verify_test_data_shred(&shred, 0, slot, slot - 5, &keypair.pubkey(), false); - - let seed0 = shred.seed(); - // Test that same seed is generated for a given shred - assert_eq!(seed0, shred.seed()); - - // Test5: Write left over data, and assert that a data shred is being created - shredder.write(&data[offset..]).unwrap(); - - // Test6: Let's finalize the FEC block. That should result in the current shred to morph into - // a signed LastInFECBlock shred - shredder.finalize_data(); - - // We should have a new signed shred - assert!(!shredder.shreds.is_empty()); - - // Must be Last in FEC Set - let shred = &shredder.shreds[1]; - verify_test_data_shred(&shred, 1, slot, slot - 5, &keypair.pubkey(), true); - - // Test that same seed is NOT generated for two different shreds - assert_ne!(seed0, shred.seed()); - - // Test7: Let's write some more data to the shredder. - // Now we should get a new FEC block - let data: Vec<_> = (0..PACKET_DATA_SIZE).collect(); - let data: Vec = data.iter().map(|x| *x as u8).collect(); - let offset = shredder.write(&data).unwrap(); - assert_ne!(offset, data.len()); - - // We should have a new signed shred - assert!(!shredder.shreds.is_empty()); - - let shred = &shredder.shreds[2]; - verify_test_data_shred(&shred, 2, slot, slot - 5, &keypair.pubkey(), false); - - // Test8: Write more data to generate an intermediate data shred - let offset = shredder.write(&data).unwrap(); - assert_ne!(offset, data.len()); - - // We should have a new signed shred - assert!(!shredder.shreds.is_empty()); - - // Must be a Data shred - let shred = &shredder.shreds[3]; - verify_test_data_shred(&shred, 3, slot, slot - 5, &keypair.pubkey(), false); - - // Test9: Write some data to shredder - let data: Vec = (0..25).collect(); - assert_eq!(shredder.write(&data).unwrap(), data.len()); - - // And, finish the slot - shredder.finalize_slot(); - - // We should have a new signed shred - assert!(!shredder.shreds.is_empty()); - - // Must be LastInSlot - let shred = &shredder.shreds[4]; - verify_test_data_shred(&shred, 4, slot, slot - 5, &keypair.pubkey(), true); - } - - #[test] - fn test_small_data_shredder() { - let keypair = Arc::new(Keypair::new()); + let fec_rate = 0.25; + let parent_slot = slot - 5; + let shredder = Shredder::new(slot, parent_slot, fec_rate, keypair.clone()) + .expect("Failed in creating shredder"); - let slot = 0x123456789abcdef0; - let mut shredder = - Shredder::new(slot, slot - 5, 0.0, &keypair, 0).expect("Failed in creating shredder"); + let entries: Vec<_> = (0..5) + .map(|_| { + let keypair0 = Keypair::new(); + let keypair1 = Keypair::new(); + let tx0 = + system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); + Entry::new(&Hash::default(), 1, vec![tx0]) + }) + .collect(); - assert!(shredder.shreds.is_empty()); - assert_eq!(shredder.active_offset, 0); + let size = serialized_size(&entries).unwrap(); + let no_header_size = (PACKET_DATA_SIZE - *SIZE_OF_SHRED_HEADER) as u64; + let num_expected_data_shreds = (size + no_header_size - 1) / no_header_size; + let num_expected_coding_shreds = + Shredder::calculate_num_coding_shreds(num_expected_data_shreds as f32, fec_rate); + + let start_index = 0; + let (data_shreds, coding_shreds, next_index) = + shredder.entries_to_shreds(&entries, true, start_index); + assert_eq!(next_index as u64, num_expected_data_shreds); + + let mut data_shred_indexes = HashSet::new(); + let mut coding_shred_indexes = HashSet::new(); + for shred in data_shreds.iter() { + assert_eq!(shred.headers.shred_type, ShredType(DATA_SHRED)); + let index = shred.headers.data_header.common_header.index; + let is_last = index as u64 == num_expected_data_shreds - 1; + verify_test_data_shred( + shred, + index, + slot, + parent_slot, + &keypair.pubkey(), + true, + is_last, + is_last, + ); + assert!(!data_shred_indexes.contains(&index)); + data_shred_indexes.insert(index); + } - let data: Vec<_> = (0..25).collect(); - let data: Vec = data.iter().map(|x| *x as u8).collect(); - let _ = shredder.write(&data).unwrap(); + for shred in coding_shreds.iter() { + let index = shred.headers.data_header.common_header.index; + assert_eq!(shred.headers.shred_type, ShredType(CODING_SHRED)); + verify_test_code_shred(shred, index, slot, &keypair.pubkey(), true); + assert!(!coding_shred_indexes.contains(&index)); + coding_shred_indexes.insert(index); + } - // We should have 0 shreds now - assert_eq!(shredder.shreds.len(), 0); + for i in start_index..start_index + num_expected_data_shreds as u32 { + assert!(data_shred_indexes.contains(&i)); + } - shredder.finalize_data(); + for i in start_index..start_index + num_expected_coding_shreds as u32 { + assert!(coding_shred_indexes.contains(&i)); + } - // We should have 1 shred now - assert_eq!(shredder.shreds.len(), 2); + assert_eq!(data_shred_indexes.len() as u64, num_expected_data_shreds); + assert_eq!(coding_shred_indexes.len(), num_expected_coding_shreds); - let shred = shredder.shreds.remove(0); - verify_test_data_shred(&shred, 0, slot, slot - 5, &keypair.pubkey(), true); + // Test reassembly + let deshred_payload = Shredder::deshred(&data_shreds).unwrap(); + let deshred_entries: Vec = bincode::deserialize(&deshred_payload).unwrap(); + assert_eq!(entries, deshred_entries); + } - let shred = shredder.shreds.remove(0); - verify_test_data_shred(&shred, 1, slot, slot - 5, &keypair.pubkey(), true); + #[test] + fn test_deserialize_shred_payload() { + let keypair = Arc::new(Keypair::new()); + let slot = 1; - let mut shredder = Shredder::new(0x123456789abcdef0, slot - 5, 0.0, &keypair, 2) + let parent_slot = 0; + let shredder = Shredder::new(slot, parent_slot, 0.0, keypair.clone()) .expect("Failed in creating shredder"); - assert!(shredder.shreds.is_empty()); - assert_eq!(shredder.active_offset, 0); - - let data: Vec<_> = (0..25).collect(); - let data: Vec = data.iter().map(|x| *x as u8).collect(); - let _ = shredder.write(&data).unwrap(); - - // We should have 0 shreds now - assert_eq!(shredder.shreds.len(), 0); + let entries: Vec<_> = (0..5) + .map(|_| { + let keypair0 = Keypair::new(); + let keypair1 = Keypair::new(); + let tx0 = + system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); + Entry::new(&Hash::default(), 1, vec![tx0]) + }) + .collect(); - shredder.finalize_data(); + let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0; - // We should have 1 shred now (LastInFECBlock) - assert_eq!(shredder.shreds.len(), 1); - let shred = shredder.shreds.remove(0); - verify_test_data_shred(&shred, 2, slot, slot - 5, &keypair.pubkey(), true); + let deserialized_shred = + Shred::new_from_serialized_shred(data_shreds.last().unwrap().payload.clone()).unwrap(); + assert_eq!(deserialized_shred, *data_shreds.last().unwrap()); } #[test] @@ -842,97 +856,92 @@ mod tests { let slot = 0x123456789abcdef0; // Test that FEC rate cannot be > 1.0 - assert_matches!(Shredder::new(slot, slot - 5, 1.001, &keypair, 0), Err(_)); + assert_matches!( + Shredder::new(slot, slot - 5, 1.001, keypair.clone()), + Err(_) + ); - let mut shredder = Shredder::new(0x123456789abcdef0, slot - 5, 1.0, &keypair, 0) + let shredder = Shredder::new(0x123456789abcdef0, slot - 5, 1.0, keypair.clone()) .expect("Failed in creating shredder"); - assert!(shredder.shreds.is_empty()); - assert_eq!(shredder.active_offset, 0); - - // Write enough data to create a shred (> PACKET_DATA_SIZE) - let data: Vec<_> = (0..PACKET_DATA_SIZE).collect(); - let data: Vec = data.iter().map(|x| *x as u8).collect(); - let _ = shredder.write(&data).unwrap(); - let _ = shredder.write(&data).unwrap(); - - // We should have 2 shreds now - assert_eq!(shredder.shreds.len(), 2); - - shredder.finalize_data(); - - // Finalize must have created 1 final data shred and 3 coding shreds - // assert_eq!(shredder.shreds.len(), 6); - let shred = shredder.shreds.remove(0); - verify_test_data_shred(&shred, 0, slot, slot - 5, &keypair.pubkey(), true); - - let shred = shredder.shreds.remove(0); - verify_test_data_shred(&shred, 1, slot, slot - 5, &keypair.pubkey(), true); - - let shred = shredder.shreds.remove(0); - verify_test_data_shred(&shred, 2, slot, slot - 5, &keypair.pubkey(), true); - - let shred = shredder.shreds.remove(0); - verify_test_code_shred(&shred, 0, slot, &keypair.pubkey(), true); + // Create enough entries to make > 1 shred + let num_entries = max_ticks_per_n_shreds(1) + 1; + let entries: Vec<_> = (0..num_entries) + .map(|_| { + let keypair0 = Keypair::new(); + let keypair1 = Keypair::new(); + let tx0 = + system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); + Entry::new(&Hash::default(), 1, vec![tx0]) + }) + .collect(); - let shred = shredder.shreds.remove(0); - verify_test_code_shred(&shred, 1, slot, &keypair.pubkey(), true); + let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 0); - let shred = shredder.shreds.remove(0); - verify_test_code_shred(&shred, 2, slot, &keypair.pubkey(), true); - } + // Must have created an equal number of coding and data shreds + assert_eq!(data_shreds.len(), coding_shreds.len()); - #[test] - fn test_large_data_shredder() { - let keypair = Arc::new(Keypair::new()); - let mut shredder = - Shredder::new(1, 0, 0.0, &keypair, 0).expect("Failed in creating shredder"); + for (i, s) in data_shreds.iter().enumerate() { + verify_test_data_shred( + s, + s.index(), + slot, + slot - 5, + &keypair.pubkey(), + true, + i == data_shreds.len() - 1, + i == data_shreds.len() - 1, + ); + } - let data = vec![0u8; 1000 * 1000]; - bincode::serialize_into(&mut shredder, &data).unwrap(); - assert!(shredder.shreds.len() > data.len() / PACKET_DATA_SIZE); + for s in coding_shreds { + verify_test_code_shred(&s, s.index(), slot, &keypair.pubkey(), true); + } } #[test] fn test_recovery_and_reassembly() { let keypair = Arc::new(Keypair::new()); let slot = 0x123456789abcdef0; - let mut shredder = - Shredder::new(slot, slot - 5, 1.0, &keypair, 0).expect("Failed in creating shredder"); - - assert!(shredder.shreds.is_empty()); - assert_eq!(shredder.active_offset, 0); - - let data: Vec<_> = (0..4000).collect(); - let data: Vec = data.iter().map(|x| *x as u8).collect(); - let mut offset = shredder.write(&data).unwrap(); - let approx_shred_payload_size = offset; - offset += shredder.write(&data[offset..]).unwrap(); - offset += shredder.write(&data[offset..]).unwrap(); - offset += shredder.write(&data[offset..]).unwrap(); - offset += shredder.write(&data[offset..]).unwrap(); - - // We should have some shreds now - assert_eq!( - shredder.shreds.len(), - data.len() / approx_shred_payload_size - ); - assert_eq!(offset, data.len()); + let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone()) + .expect("Failed in creating shredder"); - shredder.finalize_data(); + let keypair0 = Keypair::new(); + let keypair1 = Keypair::new(); + let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); + let entry = Entry::new(&Hash::default(), 1, vec![tx0]); + + let num_data_shreds: usize = 5; + let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64); + let entries: Vec<_> = (0..num_entries) + .map(|_| { + let keypair0 = Keypair::new(); + let keypair1 = Keypair::new(); + let tx0 = + system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); + Entry::new(&Hash::default(), 1, vec![tx0]) + }) + .collect(); + + let serialized_entries = bincode::serialize(&entries).unwrap(); + let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 0); - // We should have 10 shreds now (one additional final shred, and equal number of coding shreds) - let expected_shred_count = ((data.len() / approx_shred_payload_size) + 1) * 2; - assert_eq!(shredder.shreds.len(), expected_shred_count); + // We should have 10 shreds now, an equal number of coding shreds + assert_eq!(data_shreds.len(), num_data_shreds); + assert_eq!(coding_shreds.len(), num_data_shreds); - let shred_infos = shredder.shreds.clone(); + let all_shreds = data_shreds + .iter() + .cloned() + .chain(coding_shreds.iter().cloned()) + .collect::>(); // Test0: Try recovery/reassembly with only data shreds, but not all data shreds. Hint: should fail assert_matches!( Shredder::try_recovery( - shred_infos[..3].to_vec(), - expected_shred_count / 2, - expected_shred_count / 2, + data_shreds[..data_shreds.len() - 1].to_vec(), + num_data_shreds, + num_data_shreds, 0, slot ), @@ -941,21 +950,17 @@ mod tests { // Test1: Try recovery/reassembly with only data shreds. Hint: should work let recovered_data = Shredder::try_recovery( - shred_infos[..4].to_vec(), - expected_shred_count / 2, - expected_shred_count / 2, + data_shreds[..].to_vec(), + num_data_shreds, + num_data_shreds, 0, slot, ) .unwrap(); assert!(recovered_data.is_empty()); - let result = Shredder::deshred(&shred_infos[..4]).unwrap(); - assert!(result.len() >= data.len()); - assert_eq!(data[..], result[..data.len()]); // Test2: Try recovery/reassembly with missing data shreds + coding shreds. Hint: should work - let mut shred_info: Vec = shredder - .shreds + let mut shred_info: Vec = all_shreds .iter() .enumerate() .filter_map(|(i, b)| if i % 2 == 0 { Some(b.clone()) } else { None }) @@ -963,8 +968,8 @@ mod tests { let mut recovered_data = Shredder::try_recovery( shred_info.clone(), - expected_shred_count / 2, - expected_shred_count / 2, + num_data_shreds, + num_data_shreds, 0, slot, ) @@ -972,107 +977,80 @@ mod tests { assert_eq!(recovered_data.len(), 2); // Data shreds 1 and 3 were missing let recovered_shred = recovered_data.remove(0); - verify_test_data_shred(&recovered_shred, 1, slot, slot - 5, &keypair.pubkey(), true); + verify_test_data_shred( + &recovered_shred, + 1, + slot, + slot - 5, + &keypair.pubkey(), + true, + false, + false, + ); shred_info.insert(1, recovered_shred); let recovered_shred = recovered_data.remove(0); - verify_test_data_shred(&recovered_shred, 3, slot, slot - 5, &keypair.pubkey(), true); - shred_info.insert(3, recovered_shred); - - let result = Shredder::deshred(&shred_info[..4]).unwrap(); - assert!(result.len() >= data.len()); - assert_eq!(data[..], result[..data.len()]); - - // Test3: Try recovery/reassembly with 3 missing data shreds + 2 coding shreds. Hint: should work - let mut shred_info: Vec = shredder - .shreds - .iter() - .enumerate() - .filter_map(|(i, b)| if i % 2 != 0 { Some(b.clone()) } else { None }) - .collect(); - - let mut recovered_data = Shredder::try_recovery( - shred_info.clone(), - expected_shred_count / 2, - expected_shred_count / 2, - 0, + verify_test_data_shred( + &recovered_shred, + 3, slot, - ) - .unwrap(); - - assert_eq!(recovered_data.len(), 2); // Data shreds 0, 2 were missing - let recovered_shred = recovered_data.remove(0); - verify_test_data_shred(&recovered_shred, 0, slot, slot - 5, &keypair.pubkey(), true); - shred_info.insert(0, recovered_shred); - - let recovered_shred = recovered_data.remove(0); - verify_test_data_shred(&recovered_shred, 2, slot, slot - 5, &keypair.pubkey(), true); - shred_info.insert(2, recovered_shred); - - let result = Shredder::deshred(&shred_info[..4]).unwrap(); - assert!(result.len() >= data.len()); - assert_eq!(data[..], result[..data.len()]); - - // Test4: Try recovery/reassembly full slot with 3 missing data shreds + 2 coding shreds. Hint: should work - let mut shredder = - Shredder::new(slot, slot - 5, 1.0, &keypair, 0).expect("Failed in creating shredder"); - - let mut offset = shredder.write(&data).unwrap(); - let approx_shred_payload_size = offset; - offset += shredder.write(&data[offset..]).unwrap(); - offset += shredder.write(&data[offset..]).unwrap(); - offset += shredder.write(&data[offset..]).unwrap(); - offset += shredder.write(&data[offset..]).unwrap(); - - // We should have some shreds now - assert_eq!( - shredder.shreds.len(), - data.len() / approx_shred_payload_size + slot - 5, + &keypair.pubkey(), + true, + false, + false, ); - assert_eq!(offset, data.len()); - - shredder.finalize_slot(); + shred_info.insert(3, recovered_shred); - // We should have 10 shreds now (one additional final shred, and equal number of coding shreds) - let expected_shred_count = ((data.len() / approx_shred_payload_size) + 1) * 2; - assert_eq!(shredder.shreds.len(), expected_shred_count); + let result = Shredder::deshred(&shred_info[..num_data_shreds]).unwrap(); + assert!(result.len() >= serialized_entries.len()); + assert_eq!(serialized_entries[..], result[..serialized_entries.len()]); - let mut shred_info: Vec = shredder - .shreds + // Test3: Try recovery/reassembly with 3 missing data shreds + 2 coding shreds. Hint: should work + let mut shred_info: Vec = all_shreds .iter() .enumerate() .filter_map(|(i, b)| if i % 2 != 0 { Some(b.clone()) } else { None }) .collect(); - let mut recovered_data = Shredder::try_recovery( + let recovered_data = Shredder::try_recovery( shred_info.clone(), - expected_shred_count / 2, - expected_shred_count / 2, + num_data_shreds, + num_data_shreds, 0, slot, ) .unwrap(); - assert_eq!(recovered_data.len(), 2); // Data shreds 0, 2 were missing - let recovered_shred = recovered_data.remove(0); - verify_test_data_shred(&recovered_shred, 0, slot, slot - 5, &keypair.pubkey(), true); - shred_info.insert(0, recovered_shred); + assert_eq!(recovered_data.len(), 3); // Data shreds 0, 2, 4 were missing + for (i, recovered_shred) in recovered_data.into_iter().enumerate() { + let index = i * 2; + verify_test_data_shred( + &recovered_shred, + index.try_into().unwrap(), + slot, + slot - 5, + &keypair.pubkey(), + true, + recovered_shred.index() as usize == num_data_shreds - 1, + recovered_shred.index() as usize == num_data_shreds - 1, + ); - let recovered_shred = recovered_data.remove(0); - verify_test_data_shred(&recovered_shred, 2, slot, slot - 5, &keypair.pubkey(), true); - shred_info.insert(2, recovered_shred); + shred_info.insert(i * 2, recovered_shred); + } - let result = Shredder::deshred(&shred_info[..4]).unwrap(); - assert!(result.len() >= data.len()); - assert_eq!(data[..], result[..data.len()]); + let result = Shredder::deshred(&shred_info[..num_data_shreds]).unwrap(); + assert!(result.len() >= serialized_entries.len()); + assert_eq!(serialized_entries[..], result[..serialized_entries.len()]); - // Test5: Try recovery/reassembly with 3 missing data shreds + 3 coding shreds. Hint: should fail - let shreds: Vec = shredder - .shreds + // Test4: Try reassembly with 2 missing data shreds, but keeping the last + // data shred. Hint: should fail + let shreds: Vec = all_shreds[..num_data_shreds] .iter() .enumerate() .filter_map(|(i, s)| { - if (i < 4 && i % 2 != 0) || (i >= 4 && i % 2 == 0) { + if (i < 4 && i % 2 != 0) || i == num_data_shreds - 1 { + // Keep 1, 3, 4 Some(s.clone()) } else { None @@ -1080,111 +1058,89 @@ mod tests { }) .collect(); - assert_eq!(shreds.len(), 4); + assert_eq!(shreds.len(), 3); assert_matches!( Shredder::deshred(&shreds), Err(reed_solomon_erasure::Error::TooFewDataShards) ); - // Test6: Try recovery/reassembly with non zero index full slot with 3 missing data shreds + 2 coding shreds. Hint: should work - let mut shredder = - Shredder::new(slot, slot - 5, 1.0, &keypair, 25).expect("Failed in creating shredder"); - - let mut offset = shredder.write(&data).unwrap(); - let approx_shred_payload_size = offset; - offset += shredder.write(&data[offset..]).unwrap(); - offset += shredder.write(&data[offset..]).unwrap(); - offset += shredder.write(&data[offset..]).unwrap(); - offset += shredder.write(&data[offset..]).unwrap(); - - // We should have some shreds now - assert_eq!( - shredder.shreds.len(), - data.len() / approx_shred_payload_size - ); - assert_eq!(offset, data.len()); + // Test5: Try recovery/reassembly with non zero index full slot with 3 missing data shreds + // and 2 missing coding shreds. Hint: should work + let serialized_entries = bincode::serialize(&entries).unwrap(); + let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 25); - shredder.finalize_slot(); + // We should have 10 shreds now, an equal number of coding shreds + assert_eq!(data_shreds.len(), num_data_shreds); + assert_eq!(coding_shreds.len(), num_data_shreds); - // We should have 10 shreds now (one additional final shred, and equal number of coding shreds) - let expected_shred_count = ((data.len() / approx_shred_payload_size) + 1) * 2; - assert_eq!(shredder.shreds.len(), expected_shred_count); + let all_shreds = data_shreds + .iter() + .cloned() + .chain(coding_shreds.iter().cloned()) + .collect::>(); - let mut shred_info: Vec = shredder - .shreds + let mut shred_info: Vec = all_shreds .iter() .enumerate() .filter_map(|(i, b)| if i % 2 != 0 { Some(b.clone()) } else { None }) .collect(); - let mut recovered_data = Shredder::try_recovery( + let recovered_data = Shredder::try_recovery( shred_info.clone(), - expected_shred_count / 2, - expected_shred_count / 2, + num_data_shreds, + num_data_shreds, 25, slot, ) .unwrap(); - assert_eq!(recovered_data.len(), 2); // Data shreds 0, 2 were missing - let recovered_shred = recovered_data.remove(0); - verify_test_data_shred( - &recovered_shred, - 25, - slot, - slot - 5, - &keypair.pubkey(), - true, - ); - shred_info.insert(0, recovered_shred); + assert_eq!(recovered_data.len(), 3); // Data shreds 25, 27, 29 were missing + for (i, recovered_shred) in recovered_data.into_iter().enumerate() { + let index = 25 + (i * 2); + verify_test_data_shred( + &recovered_shred, + index.try_into().unwrap(), + slot, + slot - 5, + &keypair.pubkey(), + true, + index == 25 + num_data_shreds - 1, + index == 25 + num_data_shreds - 1, + ); - let recovered_shred = recovered_data.remove(0); - verify_test_data_shred( - &recovered_shred, - 27, - slot, - slot - 5, - &keypair.pubkey(), - true, - ); - shred_info.insert(2, recovered_shred); + shred_info.insert(i * 2, recovered_shred); + } - let result = Shredder::deshred(&shred_info[..4]).unwrap(); - assert!(result.len() >= data.len()); - assert_eq!(data[..], result[..data.len()]); + let result = Shredder::deshred(&shred_info[..num_data_shreds]).unwrap(); + assert!(result.len() >= serialized_entries.len()); + assert_eq!(serialized_entries[..], result[..serialized_entries.len()]); - // Test7: Try recovery/reassembly with incorrect slot. Hint: does not recover any shreds + // Test6: Try recovery/reassembly with incorrect slot. Hint: does not recover any shreds let recovered_data = Shredder::try_recovery( shred_info.clone(), - expected_shred_count / 2, - expected_shred_count / 2, + num_data_shreds, + num_data_shreds, 25, slot + 1, ) .unwrap(); assert!(recovered_data.is_empty()); - // Test8: Try recovery/reassembly with incorrect index. Hint: does not recover any shreds + // Test7: Try recovery/reassembly with incorrect index. Hint: does not recover any shreds assert_matches!( Shredder::try_recovery( shred_info.clone(), - expected_shred_count / 2, - expected_shred_count / 2, + num_data_shreds, + num_data_shreds, 15, slot, ), Err(reed_solomon_erasure::Error::TooFewShardsPresent) ); - // Test9: Try recovery/reassembly with incorrect index. Hint: does not recover any shreds + // Test8: Try recovery/reassembly with incorrect index. Hint: does not recover any shreds assert_matches!( - Shredder::try_recovery( - shred_info, - expected_shred_count / 2, - expected_shred_count / 2, - 35, - slot, - ), + Shredder::try_recovery(shred_info, num_data_shreds, num_data_shreds, 35, slot,), Err(reed_solomon_erasure::Error::TooFewShardsPresent) ); } @@ -1193,43 +1149,87 @@ mod tests { fn test_multi_fec_block_coding() { let keypair = Arc::new(Keypair::new()); let slot = 0x123456789abcdef0; - let mut shredder = - Shredder::new(slot, slot - 5, 1.0, &keypair, 0).expect("Failed in creating shredder"); - - assert!(shredder.shreds.is_empty()); - assert_eq!(shredder.active_offset, 0); - - let data: Vec<_> = (0..MAX_DATA_SHREDS_PER_FEC_BLOCK * 1200 * 3).collect(); - let data: Vec = data.iter().map(|x| *x as u8).collect(); - let mut offset = shredder.write(&data).unwrap(); - let approx_shred_payload_size = offset; - while offset < data.len() { - offset += shredder.write(&data[offset..]).unwrap(); - } + let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone()) + .expect("Failed in creating shredder"); - // We should have some shreds now - assert!(shredder.shreds.len() > data.len() / approx_shred_payload_size); - assert_eq!(offset, data.len()); + let num_fec_sets = 100; + let num_data_shreds = (MAX_DATA_SHREDS_PER_FEC_BLOCK * num_fec_sets) as usize; + let keypair0 = Keypair::new(); + let keypair1 = Keypair::new(); + let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); + let entry = Entry::new(&Hash::default(), 1, vec![tx0]); + let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64); + + let entries: Vec<_> = (0..num_entries) + .map(|_| { + let keypair0 = Keypair::new(); + let keypair1 = Keypair::new(); + let tx0 = + system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); + Entry::new(&Hash::default(), 1, vec![tx0]) + }) + .collect(); - shredder.finalize_data(); - let expected_shred_count = ((data.len() / approx_shred_payload_size) + 1) * 2; - assert_eq!(shredder.shreds.len(), expected_shred_count); + let serialized_entries = bincode::serialize(&entries).unwrap(); + let (data_shreds, coding_shreds, next_index) = + shredder.entries_to_shreds(&entries, true, 0); + assert_eq!(next_index as usize, num_data_shreds); + assert_eq!(data_shreds.len(), num_data_shreds); + assert_eq!(coding_shreds.len(), num_data_shreds); - let mut index = 0; + for c in &coding_shreds { + assert!(!c.is_data()); + } - while index < shredder.shreds.len() { - let num_data_shreds = std::cmp::min( - MAX_DATA_SHREDS_PER_FEC_BLOCK as usize, - (shredder.shreds.len() - index) / 2, - ); - let coding_start = index + num_data_shreds; - shredder.shreds[index..coding_start] + let mut all_shreds = vec![]; + for i in 0..num_fec_sets { + let shred_start_index = (MAX_DATA_SHREDS_PER_FEC_BLOCK * i) as usize; + let end_index = shred_start_index + MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - 1; + let fec_set_shreds = data_shreds[shred_start_index..=end_index] .iter() - .for_each(|s| assert!(s.is_data())); - index = coding_start + num_data_shreds; - shredder.shreds[coding_start..index] + .cloned() + .chain(coding_shreds[shred_start_index..=end_index].iter().cloned()) + .collect::>(); + + let mut shred_info: Vec = fec_set_shreds .iter() - .for_each(|s| assert!(!s.is_data())); + .enumerate() + .filter_map(|(i, b)| if i % 2 != 0 { Some(b.clone()) } else { None }) + .collect(); + + let recovered_data = Shredder::try_recovery( + shred_info.clone(), + MAX_DATA_SHREDS_PER_FEC_BLOCK as usize, + MAX_DATA_SHREDS_PER_FEC_BLOCK as usize, + shred_start_index, + slot, + ) + .unwrap(); + + for (i, recovered_shred) in recovered_data.into_iter().enumerate() { + let index = shred_start_index + (i * 2); + verify_test_data_shred( + &recovered_shred, + index.try_into().unwrap(), + slot, + slot - 5, + &keypair.pubkey(), + true, + index == end_index, + index == end_index, + ); + + shred_info.insert(i * 2, recovered_shred); + } + + all_shreds.extend( + shred_info + .into_iter() + .take(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize), + ); } + + let result = Shredder::deshred(&all_shreds[..]).unwrap(); + assert_eq!(serialized_entries[..], result[..serialized_entries.len()]); } } diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs new file mode 100644 index 00000000000000..8c3922878f92e5 --- /dev/null +++ b/core/src/shred_fetch_stage.rs @@ -0,0 +1,117 @@ +//! The `shred_fetch_stage` pulls shreds from UDP sockets and sends it to a channel. + +use crate::cuda_runtime::PinnedVec; +use crate::packet::Packet; +use crate::recycler::Recycler; +use crate::service::Service; +use crate::streamer::{self, PacketReceiver, PacketSender}; +use std::net::UdpSocket; +use std::sync::atomic::AtomicBool; +use std::sync::mpsc::channel; +use std::sync::Arc; +use std::thread::{self, Builder, JoinHandle}; + +pub struct ShredFetchStage { + thread_hdls: Vec>, +} + +impl ShredFetchStage { + // updates packets received on a channel and sends them on another channel + fn modify_packets(recvr: PacketReceiver, sendr: PacketSender, modify: F) + where + F: Fn(&mut Packet), + { + while let Some(mut p) = recvr.iter().next() { + p.packets.iter_mut().for_each(|p| modify(p)); + if sendr.send(p).is_err() { + break; + } + } + } + + fn packet_modifier( + sockets: Vec>, + exit: &Arc, + sender: PacketSender, + recycler: Recycler>, + modify: F, + ) -> (Vec>, JoinHandle<()>) + where + F: Fn(&mut Packet) + Send + 'static, + { + let (packet_sender, packet_receiver) = channel(); + let streamers = sockets + .into_iter() + .map(|s| { + streamer::receiver( + s, + &exit, + packet_sender.clone(), + recycler.clone(), + "packet_modifier", + ) + }) + .collect(); + let sender = sender.clone(); + let modifier_hdl = Builder::new() + .name("solana-tvu-fetch-stage-packet-modifier".to_string()) + .spawn(|| Self::modify_packets(packet_receiver, sender, modify)) + .unwrap(); + (streamers, modifier_hdl) + } + + pub fn new( + sockets: Vec>, + forward_sockets: Vec>, + repair_socket: Arc, + sender: &PacketSender, + exit: &Arc, + ) -> Self { + let recycler = Recycler::default(); + let tvu_threads = sockets.into_iter().map(|socket| { + streamer::receiver( + socket, + &exit, + sender.clone(), + recycler.clone(), + "shred_fetch_stage", + ) + }); + + let (tvu_forwards_threads, fwd_thread_hdl) = Self::packet_modifier( + forward_sockets, + &exit, + sender.clone(), + recycler.clone(), + |p| p.meta.forward = true, + ); + + let (repair_receiver, repair_handler) = Self::packet_modifier( + vec![repair_socket], + &exit, + sender.clone(), + recycler.clone(), + |p| p.meta.repair = true, + ); + + let mut thread_hdls: Vec<_> = tvu_threads + .chain(tvu_forwards_threads.into_iter()) + .collect(); + thread_hdls.extend(repair_receiver.into_iter()); + thread_hdls.push(fwd_thread_hdl); + thread_hdls.push(repair_handler); + + Self { thread_hdls } + } +} + +impl Service for ShredFetchStage { + type JoinReturnType = (); + + fn join(self) -> thread::Result<()> { + for thread_hdl in self.thread_hdls { + thread_hdl.join()?; + } + Ok(()) + } +} diff --git a/core/src/sigverify.rs b/core/src/sigverify.rs index b8d737ff98309f..e31eb0737909b2 100644 --- a/core/src/sigverify.rs +++ b/core/src/sigverify.rs @@ -143,7 +143,6 @@ fn do_get_packet_offsets(packet: &Packet, current_offset: u32) -> UnsanitizedPac // Using msg_start_offset which is based on sig_len_untrusted introduces uncertainty. // Ultimately, the actual sigverify will determine the uncertainty. let sig_len_maybe_trusted = packet.data[msg_start_offset] as usize; - let msg_header_size = serialized_size(&MessageHeader::default()).unwrap() as usize; // This directly reads the length of Message.account_keys (serialized with short_vec) @@ -281,7 +280,7 @@ pub fn ed25519_verify( // micro-benchmarks show GPU time for smallest batch around 15-20ms // and CPU speed for 64-128 sigverifies around 10-20ms. 64 is a nice // power-of-two number around that accounting for the fact that the CPU - // may be busy doing other things while being a real fullnode + // may be busy doing other things while being a real validator // TODO: dynamically adjust this crossover if count < 64 { return ed25519_verify_cpu(batches); diff --git a/core/src/sigverify_stage.rs b/core/src/sigverify_stage.rs index 5918be3f29e0da..79529108db8ad8 100644 --- a/core/src/sigverify_stage.rs +++ b/core/src/sigverify_stage.rs @@ -16,7 +16,7 @@ use crate::sigverify::TxOffset; use crate::streamer::{self, PacketReceiver}; use crossbeam_channel::Sender as CrossbeamSender; use solana_measure::measure::Measure; -use solana_metrics::{datapoint_info, inc_new_counter_info}; +use solana_metrics::{datapoint_debug, inc_new_counter_info}; use solana_sdk::timing; use std::sync::mpsc::{Receiver, RecvTimeoutError}; use std::sync::{Arc, Mutex}; @@ -110,7 +110,7 @@ impl SigVerifyStage { (len as f32 / verify_batch_time.as_s()) ); - datapoint_info!( + datapoint_debug!( "sigverify_stage-total_verify_time", ("batch_len", batch_len, i64), ("len", len, i64), diff --git a/core/src/snapshot_utils.rs b/core/src/snapshot_utils.rs index ec5858badf9969..1f1ba3217483fe 100644 --- a/core/src/snapshot_utils.rs +++ b/core/src/snapshot_utils.rs @@ -136,6 +136,7 @@ pub fn add_snapshot>(snapshot_path: P, bank: &Bank) -> Result<()> bank.slot(), snapshot_file_path, ); + let snapshot_file = File::create(&snapshot_file_path)?; // snapshot writer let mut snapshot_stream = BufWriter::new(snapshot_file); @@ -194,9 +195,7 @@ pub fn bank_from_archive>( )?; if !bank.verify_hash_internal_state() { - warn!("Invalid snapshot hash value!"); - } else { - info!("Snapshot hash value matches."); + panic!("Snapshot bank failed to verify"); } // Move the unpacked snapshots into `snapshot_config.snapshot_path` diff --git a/core/src/staking_utils.rs b/core/src/staking_utils.rs index bc29d395cbc7ba..7cd47ee325e906 100644 --- a/core/src/staking_utils.rs +++ b/core/src/staking_utils.rs @@ -211,27 +211,27 @@ pub(crate) mod tests { ..Stake::default() }; - let first_stakers_epoch = bank.get_stakers_epoch(bank.slot()); - // find the first slot in the next staker's epoch + let first_leader_schedule_epoch = bank.get_leader_schedule_epoch(bank.slot()); + // find the first slot in the next leader schedule epoch let mut slot = bank.slot(); loop { slot += 1; - if bank.get_stakers_epoch(slot) != first_stakers_epoch { + if bank.get_leader_schedule_epoch(slot) != first_leader_schedule_epoch { break; } } let bank = new_from_parent(&Arc::new(bank), slot); - let next_stakers_epoch = bank.get_stakers_epoch(slot); + let next_leader_schedule_epoch = bank.get_leader_schedule_epoch(slot); - let result: Vec<_> = epoch_stakes_and_lockouts(&bank, first_stakers_epoch); + let result: Vec<_> = epoch_stakes_and_lockouts(&bank, first_leader_schedule_epoch); assert_eq!( result, - vec![(leader_stake.stake(first_stakers_epoch, None), None)] + vec![(leader_stake.stake(first_leader_schedule_epoch, None), None)] ); // epoch stakes and lockouts are saved off for the future epoch, should // match current bank state - let mut result: Vec<_> = epoch_stakes_and_lockouts(&bank, next_stakers_epoch); + let mut result: Vec<_> = epoch_stakes_and_lockouts(&bank, next_leader_schedule_epoch); result.sort(); let stake_history = StakeHistory::from_account(&bank.get_account(&stake_history::id()).unwrap()).unwrap(); diff --git a/core/src/storage_stage.rs b/core/src/storage_stage.rs index c13ea5fabb1cae..14b30353fa9747 100644 --- a/core/src/storage_stage.rs +++ b/core/src/storage_stage.rs @@ -641,6 +641,7 @@ mod tests { use solana_sdk::hash::{Hash, Hasher}; use solana_sdk::pubkey::Pubkey; use solana_sdk::signature::{Keypair, KeypairUtil}; + use solana_storage_api::storage_instruction::StorageAccountType; use std::cmp::{max, min}; use std::fs::remove_dir_all; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; @@ -736,14 +737,14 @@ mod tests { let mut last_bank = bank; let rooted_banks = (slot..slot + last_bank.slots_per_segment() + 1) .map(|i| { - let bank = Bank::new_from_parent(&last_bank, &keypair.pubkey(), i); + let bank = Arc::new(Bank::new_from_parent(&last_bank, &keypair.pubkey(), i)); blocktree_processor::process_entries( &bank, &entry::create_ticks(64, bank.last_blockhash()), true, ) .expect("failed process entries"); - last_bank = Arc::new(bank); + last_bank = bank; last_bank.clone() }) .collect::>(); @@ -822,11 +823,12 @@ mod tests { // create accounts let bank = Arc::new(Bank::new_from_parent(&bank, &keypair.pubkey(), 1)); - let account_ix = storage_instruction::create_replicator_storage_account( + let account_ix = storage_instruction::create_storage_account( &mint_keypair.pubkey(), &Pubkey::new_rand(), &replicator_keypair.pubkey(), 1, + StorageAccountType::Replicator, ); let account_tx = Transaction::new_signed_instructions( &[&mint_keypair], diff --git a/core/src/streamer.rs b/core/src/streamer.rs index ef773df5bddce7..edb6d6c579de53 100644 --- a/core/src/streamer.rs +++ b/core/src/streamer.rs @@ -2,6 +2,7 @@ //! use crate::packet::{Blob, Packets, PacketsRecycler, SharedBlobs, PACKETS_PER_BATCH}; +use crate::recvmmsg::NUM_RCVMMSGS; use crate::result::{Error, Result}; use solana_sdk::timing::duration_as_ms; use std::net::UdpSocket; @@ -23,6 +24,10 @@ fn recv_loop( recycler: &PacketsRecycler, name: &'static str, ) -> Result<()> { + let mut recv_count = 0; + let mut call_count = 0; + let mut now = Instant::now(); + let mut num_max_received = 0; // Number of times maximum packets were received loop { let mut msgs = Packets::new_with_recycler(recycler.clone(), PACKETS_PER_BATCH, name); loop { @@ -31,11 +36,29 @@ fn recv_loop( if exit.load(Ordering::Relaxed) { return Ok(()); } - if let Ok(_len) = msgs.recv_from(sock) { + if let Ok(len) = msgs.recv_from(sock) { + if len == NUM_RCVMMSGS { + num_max_received += 1; + } + recv_count += len; + call_count += 1; channel.send(msgs)?; break; } } + if recv_count > 1024 { + datapoint_debug!( + "receiver-stats", + ("received", recv_count as i64, i64), + ("call_count", i64::from(call_count), i64), + ("elapsed", now.elapsed().as_millis() as i64, i64), + ("max_received", i64::from(num_max_received), i64), + ); + recv_count = 0; + call_count = 0; + now = Instant::now(); + num_max_received = 0; + } } } diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 394cf873c1528c..19eb7d3d725c5d 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -13,7 +13,6 @@ //! - Generating the keys used to encrypt the ledger and sample it for storage mining. use crate::bank_forks::BankForks; -use crate::blob_fetch_stage::BlobFetchStage; use crate::blockstream_service::BlockstreamService; use crate::blocktree::{Blocktree, CompletedSlotsReceiver}; use crate::cluster_info::ClusterInfo; @@ -25,6 +24,7 @@ use crate::replay_stage::ReplayStage; use crate::retransmit_stage::RetransmitStage; use crate::rpc_subscriptions::RpcSubscriptions; use crate::service::Service; +use crate::shred_fetch_stage::ShredFetchStage; use crate::snapshot_package::SnapshotPackagerService; use crate::storage_stage::{StorageStage, StorageState}; use solana_sdk::pubkey::Pubkey; @@ -37,7 +37,7 @@ use std::sync::{Arc, Mutex, RwLock}; use std::thread; pub struct Tvu { - fetch_stage: BlobFetchStage, + fetch_stage: ShredFetchStage, retransmit_stage: RetransmitStage, replay_stage: ReplayStage, blockstream_service: Option, @@ -49,7 +49,7 @@ pub struct Tvu { pub struct Sockets { pub fetch: Vec, pub repair: UdpSocket, - pub retransmit: UdpSocket, + pub retransmit: Vec, pub forwards: Vec, } @@ -92,21 +92,20 @@ impl Tvu { let Sockets { repair: repair_socket, fetch: fetch_sockets, - retransmit: retransmit_socket, + retransmit: retransmit_sockets, forwards: tvu_forward_sockets, } = sockets; let (fetch_sender, fetch_receiver) = channel(); let repair_socket = Arc::new(repair_socket); - let mut blob_sockets: Vec> = - fetch_sockets.into_iter().map(Arc::new).collect(); - blob_sockets.push(repair_socket.clone()); - let blob_forward_sockets: Vec> = + let fetch_sockets: Vec> = fetch_sockets.into_iter().map(Arc::new).collect(); + let forward_sockets: Vec> = tvu_forward_sockets.into_iter().map(Arc::new).collect(); - let fetch_stage = BlobFetchStage::new_multi_socket_packet( - blob_sockets, - blob_forward_sockets, + let fetch_stage = ShredFetchStage::new( + fetch_sockets, + forward_sockets, + repair_socket.clone(), &fetch_sender, &exit, ); @@ -119,7 +118,7 @@ impl Tvu { leader_schedule_cache, blocktree.clone(), &cluster_info, - Arc::new(retransmit_socket), + Arc::new(retransmit_sockets), repair_socket, fetch_receiver, &exit, @@ -271,7 +270,7 @@ pub mod tests { { Sockets { repair: target1.sockets.repair, - retransmit: target1.sockets.retransmit, + retransmit: target1.sockets.retransmit_sockets, fetch: target1.sockets.tvu, forwards: target1.sockets.tvu_forwards, } diff --git a/core/src/validator.rs b/core/src/validator.rs index 949f1f13fc04d2..e2ac6d90843ff2 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1,4 +1,4 @@ -//! The `fullnode` module hosts all the fullnode microservices. +//! The `validator` module hosts all the validator microservices. use crate::bank_forks::{BankForks, SnapshotConfig}; use crate::blocktree::{Blocktree, CompletedSlotsReceiver}; @@ -158,6 +158,7 @@ impl Validator { let bank_info = &bank_forks_info[0]; let bank = bank_forks[bank_info.bank_slot].clone(); let bank_forks = Arc::new(RwLock::new(bank_forks)); + let fork_confidence_cache = Arc::new(RwLock::new(ForkConfidenceCache::default())); let mut validator_exit = ValidatorExit::default(); let exit_ = exit.clone(); @@ -185,6 +186,7 @@ impl Validator { storage_state.clone(), config.rpc_config.clone(), bank_forks.clone(), + fork_confidence_cache.clone(), ledger_path, genesis_blockhash, &validator_exit, @@ -274,9 +276,10 @@ impl Validator { .expect("Failed to clone repair socket"), retransmit: node .sockets - .retransmit - .try_clone() - .expect("Failed to clone retransmit socket"), + .retransmit_sockets + .iter() + .map(|s| s.try_clone().expect("Failed to clone retransmit socket")) + .collect(), fetch: node .sockets .tvu @@ -297,7 +300,6 @@ impl Validator { Some(voting_keypair) }; - let fork_confidence_cache = Arc::new(RwLock::new(ForkConfidenceCache::default())); let tvu = Tvu::new( vote_account, voting_keypair, @@ -378,7 +380,7 @@ impl Validator { ); info!( "local retransmit address: {}", - node.sockets.retransmit.local_addr().unwrap() + node.sockets.retransmit_sockets[0].local_addr().unwrap() ); } } @@ -417,8 +419,11 @@ fn get_bank_forks( return blocktree_processor::process_blocktree_from_root( blocktree, Arc::new(deserialized_bank), - verify_ledger, - dev_halt_at_slot, + &blocktree_processor::ProcessOptions { + verify_ledger, + dev_halt_at_slot, + ..blocktree_processor::ProcessOptions::default() + }, ) .expect("processing blocktree after loading snapshot failed"); } else { @@ -433,8 +438,11 @@ fn get_bank_forks( &genesis_block, &blocktree, account_paths, - verify_ledger, - dev_halt_at_slot, + blocktree_processor::ProcessOptions { + verify_ledger, + dev_halt_at_slot, + ..blocktree_processor::ProcessOptions::default() + }, ) .expect("process_blocktree failed") } diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 7383d9ffa14d17..88aa7934995d09 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -296,8 +296,8 @@ mod test { shred::SIZE_OF_SHRED_TYPE, }; use rand::{seq::SliceRandom, thread_rng}; - use solana_runtime::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH; use solana_sdk::{ + epoch_schedule::MINIMUM_SLOTS_PER_EPOCH, hash::Hash, signature::{Keypair, KeypairUtil}, }; @@ -311,17 +311,14 @@ mod test { }; fn local_entries_to_shred( - entries: Vec, + entries: &[Entry], slot: u64, parent: u64, keypair: &Arc, ) -> Vec { - let mut shredder = - Shredder::new(slot, parent, 0.0, keypair, 0).expect("Failed to create entry shredder"); - bincode::serialize_into(&mut shredder, &entries) - .expect("Expect to write all entries to shreds"); - shredder.finalize_slot(); - shredder.shreds.drain(..).collect() + let shredder = Shredder::new(slot, parent, 0.0, keypair.clone()) + .expect("Failed to create entry shredder"); + shredder.entries_to_shreds(&entries, true, 0).0 } #[test] @@ -330,8 +327,7 @@ mod test { let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap()); let num_entries = 10; let original_entries = create_ticks(num_entries, Hash::default()); - let mut shreds = - local_entries_to_shred(original_entries.clone(), 0, 0, &Arc::new(Keypair::new())); + let mut shreds = local_entries_to_shred(&original_entries, 0, 0, &Arc::new(Keypair::new())); shreds.reverse(); blocktree .insert_shreds(shreds, None) @@ -356,7 +352,7 @@ mod test { )); let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let mut shreds = local_entries_to_shred(vec![Entry::default()], 0, 0, &leader_keypair); + let mut shreds = local_entries_to_shred(&[Entry::default()], 0, 0, &leader_keypair); // with a Bank for slot 0, blob continues assert_eq!( @@ -408,8 +404,7 @@ mod test { // with a shred where shred.slot() == root, blob gets thrown out let slot = MINIMUM_SLOTS_PER_EPOCH as u64 * 3; - let shreds = - local_entries_to_shred(vec![Entry::default()], slot, slot - 1, &leader_keypair); + let shreds = local_entries_to_shred(&[Entry::default()], slot, slot - 1, &leader_keypair); assert_eq!( should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, slot), false @@ -418,7 +413,7 @@ mod test { // with a shred where shred.parent() < root, blob gets thrown out let slot = MINIMUM_SLOTS_PER_EPOCH as u64 * 3; let shreds = - local_entries_to_shred(vec![Entry::default()], slot + 1, slot - 1, &leader_keypair); + local_entries_to_shred(&[Entry::default()], slot + 1, slot - 1, &leader_keypair); assert_eq!( should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, slot), false diff --git a/core/tests/cluster_info.rs b/core/tests/cluster_info.rs index 1cd9407bb235cd..34c93f3f1de3a3 100644 --- a/core/tests/cluster_info.rs +++ b/core/tests/cluster_info.rs @@ -50,15 +50,16 @@ fn retransmit( } }); seed[0..4].copy_from_slice(&blob.to_le_bytes()); - let (neighbors, children) = compute_retransmit_peers(fanout, my_index, shuffled_nodes); - children.iter().for_each(|p| { - let s = senders.get(&p.id).unwrap(); + let shuffled_indices = (0..shuffled_nodes.len()).collect(); + let (neighbors, children) = compute_retransmit_peers(fanout, my_index, shuffled_indices); + children.into_iter().for_each(|i| { + let s = senders.get(&shuffled_nodes[i].id).unwrap(); let _ = s.send((blob, retransmit)); }); if retransmit { - neighbors.iter().for_each(|p| { - let s = senders.get(&p.id).unwrap(); + neighbors.into_iter().for_each(|i| { + let s = senders.get(&shuffled_nodes[i].id).unwrap(); let _ = s.send((blob, false)); }); } @@ -113,8 +114,18 @@ fn run_simulation(stakes: &[u64], fanout: usize) { .map(|i| { let mut seed = [0; 32]; seed[0..4].copy_from_slice(&i.to_le_bytes()); - let (_, peers) = cluster_info - .shuffle_peers_and_index(Some(&staked_nodes), ChaChaRng::from_seed(seed)); + let (peers, stakes_and_index) = + cluster_info.sorted_retransmit_peers_and_stakes(Some(&staked_nodes)); + let (_, shuffled_stakes_and_indexes) = ClusterInfo::shuffle_peers_and_index( + &cluster_info.id(), + &peers, + &stakes_and_index, + ChaChaRng::from_seed(seed), + ); + let peers = shuffled_stakes_and_indexes + .into_iter() + .map(|(_, i)| peers[i].clone()) + .collect(); peers }) .collect(); diff --git a/core/tests/gossip.rs b/core/tests/gossip.rs index 1fbf7b1cde04a5..c4a53b577bbaa3 100644 --- a/core/tests/gossip.rs +++ b/core/tests/gossip.rs @@ -177,7 +177,8 @@ pub fn cluster_info_retransmit() -> result::Result<()> { let mut p = Packet::default(); p.meta.size = 10; let peers = c1.read().unwrap().retransmit_peers(); - ClusterInfo::retransmit_to(&c1, &peers, &p, None, &tn1, false)?; + let retransmit_peers: Vec<_> = peers.iter().collect(); + ClusterInfo::retransmit_to(&retransmit_peers, &mut p, None, &tn1, false)?; let res: Vec<_> = [tn1, tn2, tn3] .into_par_iter() .map(|s| { diff --git a/drone/src/bin/drone.rs b/drone/src/bin/drone.rs index 968cb06f80eed2..2bfc98945496f3 100644 --- a/drone/src/bin/drone.rs +++ b/drone/src/bin/drone.rs @@ -1,7 +1,7 @@ use clap::{crate_description, crate_name, crate_version, App, Arg}; use solana_drone::drone::{run_drone, Drone, DRONE_PORT}; use solana_drone::socketaddr; -use solana_sdk::signature::read_keypair; +use solana_sdk::signature::read_keypair_file; use std::error; use std::net::{Ipv4Addr, SocketAddr}; use std::sync::{Arc, Mutex}; @@ -38,8 +38,8 @@ fn main() -> Result<(), Box> { ) .get_matches(); - let mint_keypair = - read_keypair(matches.value_of("keypair").unwrap()).expect("failed to read client keypair"); + let mint_keypair = read_keypair_file(matches.value_of("keypair").unwrap()) + .expect("failed to read client keypair"); let time_slice: Option; if let Some(secs) = matches.value_of("slice") { diff --git a/genesis/Cargo.toml b/genesis/Cargo.toml index c7cd3f8b710189..ebda097c1f2a08 100644 --- a/genesis/Cargo.toml +++ b/genesis/Cargo.toml @@ -10,12 +10,11 @@ homepage = "https://solana.com/" [dependencies] base64 = "0.10.1" -bincode = "1.2.0" clap = "2.33.0" serde = "1.0.101" serde_derive = "1.0.101" serde_json = "1.0.41" -serde_yaml = "0.8.9" +serde_yaml = "0.8.11" solana-core = { path = "../core", version = "0.20.0" } solana-genesis-programs = { path = "../genesis_programs", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.0" } diff --git a/genesis/src/lib.rs b/genesis/src/lib.rs index 794faba5e5b460..cae7466fdc6e47 100644 --- a/genesis/src/lib.rs +++ b/genesis/src/lib.rs @@ -1,7 +1,8 @@ use serde::{Deserialize, Serialize}; +/// An account where the data is encoded as a Base64 string. #[derive(Serialize, Deserialize, Debug)] -pub struct PrimordialAccountDetails { +pub struct Base64Account { pub balance: u64, pub owner: String, pub data: String, diff --git a/genesis/src/main.rs b/genesis/src/main.rs index 01b64ba0d14be0..406a52789ca5d3 100644 --- a/genesis/src/main.rs +++ b/genesis/src/main.rs @@ -1,30 +1,25 @@ //! A command-line executable for generating the chain's genesis block. -use base64; use clap::{crate_description, crate_name, crate_version, value_t_or_exit, App, Arg}; use solana_core::blocktree::create_new_ledger; -use solana_genesis::PrimordialAccountDetails; -use solana_sdk::account::Account; -use solana_sdk::clock; -use solana_sdk::fee_calculator::FeeCalculator; -use solana_sdk::genesis_block::Builder; -use solana_sdk::hash::{hash, Hash}; -use solana_sdk::poh_config::PohConfig; -use solana_sdk::pubkey::Pubkey; -use solana_sdk::rent_calculator::RentCalculator; -use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil}; -use solana_sdk::system_program; -use solana_sdk::timing; +use solana_core::poh::compute_hashes_per_tick; +use solana_genesis::Base64Account; +use solana_sdk::{ + account::Account, + clock, + epoch_schedule::EpochSchedule, + fee_calculator::FeeCalculator, + genesis_block::GenesisBlock, + poh_config::PohConfig, + pubkey::Pubkey, + rent_calculator::RentCalculator, + signature::{read_keypair_file, Keypair, KeypairUtil}, + system_program, timing, +}; use solana_stake_api::stake_state; use solana_storage_api::storage_contract; use solana_vote_api::vote_state; -use std::collections::HashMap; -use std::error; -use std::fs::File; -use std::io; -use std::path::PathBuf; -use std::str::FromStr; -use std::time::{Duration, Instant}; +use std::{collections::HashMap, error, fs::File, io, path::PathBuf, str::FromStr, time::Duration}; pub const BOOTSTRAP_LEADER_LAMPORTS: u64 = 42; @@ -33,18 +28,18 @@ pub enum AccountFileFormat { Keypair, } -pub fn append_primordial_accounts( +pub fn add_genesis_accounts( file: &str, file_format: AccountFileFormat, - mut builder: Builder, -) -> io::Result<(Builder)> { + genesis_block: &mut GenesisBlock, +) -> io::Result<()> { let accounts_file = File::open(file.to_string())?; - let primordial_accounts: HashMap = + let genesis_accounts: HashMap = serde_yaml::from_reader(accounts_file) .map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?; - for (account, account_details) in primordial_accounts { + for (account, account_details) in genesis_accounts { let pubkey = match file_format { AccountFileFormat::Pubkey => Pubkey::from_str(account.as_str()).map_err(|err| { io::Error::new( @@ -76,10 +71,10 @@ pub fn append_primordial_accounts( } account.executable = account_details.executable; - builder = builder.account(pubkey, account); + genesis_block.add_account(pubkey, account); } - Ok(builder) + Ok(()) } fn main() -> Result<(), Box> { @@ -87,18 +82,18 @@ fn main() -> Result<(), Box> { let default_target_lamports_per_signature = &FeeCalculator::default() .target_lamports_per_signature .to_string(); + let default_target_signatures_per_slot = &FeeCalculator::default() + .target_signatures_per_slot + .to_string(); let default_lamports_per_byte_year = &RentCalculator::default().lamports_per_byte_year.to_string(); let default_rent_exemption_threshold = &RentCalculator::default().exemption_threshold.to_string(); let default_rent_burn_percentage = &RentCalculator::default().burn_percent.to_string(); - let default_target_signatures_per_slot = &FeeCalculator::default() - .target_signatures_per_slot - .to_string(); let default_target_tick_duration = &timing::duration_as_ms(&PohConfig::default().target_tick_duration).to_string(); let default_ticks_per_slot = &clock::DEFAULT_TICKS_PER_SLOT.to_string(); - let default_slots_per_epoch = &clock::DEFAULT_SLOTS_PER_EPOCH.to_string(); + let default_slots_per_epoch = &EpochSchedule::default().slots_per_epoch.to_string(); let matches = App::new(crate_name!()) .about(crate_description!()) @@ -302,11 +297,11 @@ fn main() -> Result<(), Box> { let bootstrap_leader_stake_lamports = value_t_or_exit!(matches, "bootstrap_leader_stake_lamports", u64); - let bootstrap_leader_keypair = read_keypair(bootstrap_leader_keypair_file)?; - let bootstrap_vote_keypair = read_keypair(bootstrap_vote_keypair_file)?; - let bootstrap_stake_keypair = read_keypair(bootstrap_stake_keypair_file)?; - let bootstrap_storage_keypair = read_keypair(bootstrap_storage_keypair_file)?; - let mint_keypair = read_keypair(mint_keypair_file)?; + let bootstrap_leader_keypair = read_keypair_file(bootstrap_leader_keypair_file)?; + let bootstrap_vote_keypair = read_keypair_file(bootstrap_vote_keypair_file)?; + let bootstrap_stake_keypair = read_keypair_file(bootstrap_stake_keypair_file)?; + let bootstrap_storage_keypair = read_keypair_file(bootstrap_storage_keypair_file)?; + let mint_keypair = read_keypair_file(mint_keypair_file)?; let vote_account = vote_state::create_account( &bootstrap_vote_keypair.pubkey(), @@ -321,47 +316,44 @@ fn main() -> Result<(), Box> { bootstrap_leader_stake_lamports, ); - let mut builder = Builder::new() - .accounts(&[ - // the mint - ( - mint_keypair.pubkey(), - Account::new(lamports, 0, &system_program::id()), - ), - // node needs an account to issue votes from - ( + let accounts = vec![ + // the mint + ( + mint_keypair.pubkey(), + Account::new(lamports, 0, &system_program::id()), + ), + // node needs an account to issue votes from + ( + bootstrap_leader_keypair.pubkey(), + Account::new(bootstrap_leader_lamports, 0, &system_program::id()), + ), + // where votes go to + (bootstrap_vote_keypair.pubkey(), vote_account), + // passive bootstrap leader stake + (bootstrap_stake_keypair.pubkey(), stake_account), + ( + bootstrap_storage_keypair.pubkey(), + storage_contract::create_validator_storage_account( bootstrap_leader_keypair.pubkey(), - Account::new(bootstrap_leader_lamports, 0, &system_program::id()), + 1, ), - // where votes go to - (bootstrap_vote_keypair.pubkey(), vote_account), - // passive bootstrap leader stake - (bootstrap_stake_keypair.pubkey(), stake_account), - ( - bootstrap_storage_keypair.pubkey(), - storage_contract::create_validator_storage_account( - bootstrap_leader_keypair.pubkey(), - 1, - ), - ), - ]) - .native_instruction_processors(&solana_genesis_programs::get()) - .ticks_per_slot(value_t_or_exit!(matches, "ticks_per_slot", u64)) - .slots_per_epoch(value_t_or_exit!(matches, "slots_per_epoch", u64)); - - let mut fee_calculator = FeeCalculator::default(); - fee_calculator.target_lamports_per_signature = - value_t_or_exit!(matches, "target_lamports_per_signature", u64); - fee_calculator.target_signatures_per_slot = - value_t_or_exit!(matches, "target_signatures_per_slot", usize); - builder = builder.fee_calculator(FeeCalculator::new_derived(&fee_calculator, 0)); + ), + ]; + + let ticks_per_slot = value_t_or_exit!(matches, "ticks_per_slot", u64); + let slots_per_epoch = value_t_or_exit!(matches, "slots_per_epoch", u64); + let epoch_schedule = EpochSchedule::new(slots_per_epoch); + + let fee_calculator = FeeCalculator::new( + value_t_or_exit!(matches, "target_lamports_per_signature", u64), + value_t_or_exit!(matches, "target_signatures_per_slot", usize), + ); let rent_calculator = RentCalculator { lamports_per_byte_year: value_t_or_exit!(matches, "lamports_per_byte_year", u64), exemption_threshold: value_t_or_exit!(matches, "rent_exemption_threshold", f64), burn_percent: value_t_or_exit!(matches, "rent_burn_percentage", u8), }; - builder = builder.rent_calculator(rent_calculator); let mut poh_config = PohConfig::default(); poh_config.target_tick_duration = @@ -369,17 +361,8 @@ fn main() -> Result<(), Box> { match matches.value_of("hashes_per_tick").unwrap() { "auto" => { - let mut v = Hash::default(); - println!("Running 1 million hashes..."); - let start = Instant::now(); - for _ in 0..1_000_000 { - v = hash(&v.as_ref()); - } - let end = Instant::now(); - let elapsed = end.duration_since(start).as_millis(); - let hashes_per_tick = - (poh_config.target_tick_duration.as_millis() * 1_000_000 / elapsed) as u64; + compute_hashes_per_tick(poh_config.target_tick_duration, 1_000_000); println!("Hashes per tick: {}", hashes_per_tick); poh_config.hashes_per_tick = Some(hashes_per_tick); } @@ -390,28 +373,38 @@ fn main() -> Result<(), Box> { poh_config.hashes_per_tick = Some(value_t_or_exit!(matches, "hashes_per_tick", u64)); } } - builder = builder.poh_config(poh_config); + + let mut genesis_block = GenesisBlock { + accounts, + native_instruction_processors: solana_genesis_programs::get(), + ticks_per_slot, + epoch_schedule, + fee_calculator, + rent_calculator, + poh_config, + ..GenesisBlock::default() + }; if let Some(file) = matches.value_of("primordial_accounts_file") { - builder = append_primordial_accounts(file, AccountFileFormat::Pubkey, builder)?; + add_genesis_accounts(file, AccountFileFormat::Pubkey, &mut genesis_block)?; } if let Some(file) = matches.value_of("primordial_keypairs_file") { - builder = append_primordial_accounts(file, AccountFileFormat::Keypair, builder)?; + add_genesis_accounts(file, AccountFileFormat::Keypair, &mut genesis_block)?; } // add genesis stuff from storage and stake - builder = solana_storage_api::rewards_pools::genesis(builder); - builder = solana_stake_api::genesis(builder); + solana_storage_api::rewards_pools::add_genesis_accounts(&mut genesis_block); + solana_stake_api::add_genesis_accounts(&mut genesis_block); - create_new_ledger(&ledger_path, &builder.build())?; + create_new_ledger(&ledger_path, &genesis_block)?; Ok(()) } #[cfg(test)] mod tests { use super::*; - use solana_sdk::genesis_block::Builder; + use solana_sdk::genesis_block::GenesisBlock; use solana_sdk::pubkey::Pubkey; use std::collections::HashMap; use std::fs::remove_file; @@ -422,37 +415,37 @@ mod tests { #[test] fn test_append_primordial_accounts_to_genesis() { // Test invalid file returns error - assert!(append_primordial_accounts( + assert!(add_genesis_accounts( "unknownfile", AccountFileFormat::Pubkey, - Builder::new() + &mut GenesisBlock::default() ) .is_err()); - let mut builder = Builder::new(); + let mut genesis_block = GenesisBlock::default(); - let mut primordial_accounts = HashMap::new(); - primordial_accounts.insert( + let mut genesis_accounts = HashMap::new(); + genesis_accounts.insert( Pubkey::new_rand().to_string(), - PrimordialAccountDetails { + Base64Account { owner: Pubkey::new_rand().to_string(), balance: 2 as u64, executable: false, data: String::from("aGVsbG8="), }, ); - primordial_accounts.insert( + genesis_accounts.insert( Pubkey::new_rand().to_string(), - PrimordialAccountDetails { + Base64Account { owner: Pubkey::new_rand().to_string(), balance: 1 as u64, executable: true, data: String::from("aGVsbG8gd29ybGQ="), }, ); - primordial_accounts.insert( + genesis_accounts.insert( Pubkey::new_rand().to_string(), - PrimordialAccountDetails { + Base64Account { owner: Pubkey::new_rand().to_string(), balance: 3 as u64, executable: true, @@ -460,15 +453,15 @@ mod tests { }, ); - let serialized = serde_yaml::to_string(&primordial_accounts).unwrap(); + let serialized = serde_yaml::to_string(&genesis_accounts).unwrap(); let path = Path::new("test_append_primordial_accounts_to_genesis.yml"); let mut file = File::create(path).unwrap(); file.write_all(&serialized.into_bytes()).unwrap(); - builder = append_primordial_accounts( + add_genesis_accounts( "test_append_primordial_accounts_to_genesis.yml", AccountFileFormat::Pubkey, - builder, + &mut genesis_block, ) .expect("test_append_primordial_accounts_to_genesis.yml"); // Test valid file returns ok @@ -476,57 +469,56 @@ mod tests { remove_file(path).unwrap(); { - let genesis_block = builder.clone().build(); // Test all accounts were added - assert_eq!(genesis_block.accounts.len(), primordial_accounts.len()); + assert_eq!(genesis_block.accounts.len(), genesis_accounts.len()); // Test account data matches - (0..primordial_accounts.len()).for_each(|i| { + (0..genesis_accounts.len()).for_each(|i| { assert_eq!( - primordial_accounts[&genesis_block.accounts[i].0.to_string()].owner, + genesis_accounts[&genesis_block.accounts[i].0.to_string()].owner, genesis_block.accounts[i].1.owner.to_string() ); assert_eq!( - primordial_accounts[&genesis_block.accounts[i].0.to_string()].balance, + genesis_accounts[&genesis_block.accounts[i].0.to_string()].balance, genesis_block.accounts[i].1.lamports ); assert_eq!( - primordial_accounts[&genesis_block.accounts[i].0.to_string()].executable, + genesis_accounts[&genesis_block.accounts[i].0.to_string()].executable, genesis_block.accounts[i].1.executable ); assert_eq!( - primordial_accounts[&genesis_block.accounts[i].0.to_string()].data, + genesis_accounts[&genesis_block.accounts[i].0.to_string()].data, base64::encode(&genesis_block.accounts[i].1.data) ); }); } // Test more accounts can be appended - let mut primordial_accounts1 = HashMap::new(); - primordial_accounts1.insert( + let mut genesis_accounts1 = HashMap::new(); + genesis_accounts1.insert( Pubkey::new_rand().to_string(), - PrimordialAccountDetails { + Base64Account { owner: Pubkey::new_rand().to_string(), balance: 6 as u64, executable: true, data: String::from("eW91IGFyZQ=="), }, ); - primordial_accounts1.insert( + genesis_accounts1.insert( Pubkey::new_rand().to_string(), - PrimordialAccountDetails { + Base64Account { owner: Pubkey::new_rand().to_string(), balance: 5 as u64, executable: false, data: String::from("bWV0YSBzdHJpbmc="), }, ); - primordial_accounts1.insert( + genesis_accounts1.insert( Pubkey::new_rand().to_string(), - PrimordialAccountDetails { + Base64Account { owner: Pubkey::new_rand().to_string(), balance: 10 as u64, executable: false, @@ -534,101 +526,100 @@ mod tests { }, ); - let serialized = serde_yaml::to_string(&primordial_accounts1).unwrap(); + let serialized = serde_yaml::to_string(&genesis_accounts1).unwrap(); let path = Path::new("test_append_primordial_accounts_to_genesis.yml"); let mut file = File::create(path).unwrap(); file.write_all(&serialized.into_bytes()).unwrap(); - builder = append_primordial_accounts( + add_genesis_accounts( "test_append_primordial_accounts_to_genesis.yml", AccountFileFormat::Pubkey, - builder, + &mut genesis_block, ) .expect("test_append_primordial_accounts_to_genesis.yml"); remove_file(path).unwrap(); - let genesis_block = builder.clone().build(); // Test total number of accounts is correct assert_eq!( genesis_block.accounts.len(), - primordial_accounts.len() + primordial_accounts1.len() + genesis_accounts.len() + genesis_accounts1.len() ); // Test old accounts are still there - (0..primordial_accounts.len()).for_each(|i| { + (0..genesis_accounts.len()).for_each(|i| { assert_eq!( - primordial_accounts[&genesis_block.accounts[i].0.to_string()].balance, + genesis_accounts[&genesis_block.accounts[i].0.to_string()].balance, genesis_block.accounts[i].1.lamports, ); }); // Test new account data matches - (0..primordial_accounts1.len()).for_each(|i| { + (0..genesis_accounts1.len()).for_each(|i| { assert_eq!( - primordial_accounts1[&genesis_block.accounts[primordial_accounts.len() + i] + genesis_accounts1[&genesis_block.accounts[genesis_accounts.len() + i] .0 .to_string()] .owner, - genesis_block.accounts[primordial_accounts.len() + i] + genesis_block.accounts[genesis_accounts.len() + i] .1 .owner .to_string(), ); assert_eq!( - primordial_accounts1[&genesis_block.accounts[primordial_accounts.len() + i] + genesis_accounts1[&genesis_block.accounts[genesis_accounts.len() + i] .0 .to_string()] .balance, - genesis_block.accounts[primordial_accounts.len() + i] + genesis_block.accounts[genesis_accounts.len() + i] .1 .lamports, ); assert_eq!( - primordial_accounts1[&genesis_block.accounts[primordial_accounts.len() + i] + genesis_accounts1[&genesis_block.accounts[genesis_accounts.len() + i] .0 .to_string()] .executable, - genesis_block.accounts[primordial_accounts.len() + i] + genesis_block.accounts[genesis_accounts.len() + i] .1 .executable, ); assert_eq!( - primordial_accounts1[&genesis_block.accounts[primordial_accounts.len() + i] + genesis_accounts1[&genesis_block.accounts[genesis_accounts.len() + i] .0 .to_string()] .data, - base64::encode(&genesis_block.accounts[primordial_accounts.len() + i].1.data), + base64::encode(&genesis_block.accounts[genesis_accounts.len() + i].1.data), ); }); // Test accounts from keypairs can be appended let account_keypairs: Vec<_> = (0..3).map(|_| Keypair::new()).collect(); - let mut primordial_accounts2 = HashMap::new(); - primordial_accounts2.insert( + let mut genesis_accounts2 = HashMap::new(); + genesis_accounts2.insert( serde_json::to_string(&account_keypairs[0].to_bytes().to_vec()).unwrap(), - PrimordialAccountDetails { + Base64Account { owner: Pubkey::new_rand().to_string(), balance: 20 as u64, executable: true, data: String::from("Y2F0IGRvZw=="), }, ); - primordial_accounts2.insert( + genesis_accounts2.insert( serde_json::to_string(&account_keypairs[1].to_bytes().to_vec()).unwrap(), - PrimordialAccountDetails { + Base64Account { owner: Pubkey::new_rand().to_string(), balance: 15 as u64, executable: false, data: String::from("bW9ua2V5IGVsZXBoYW50"), }, ); - primordial_accounts2.insert( + genesis_accounts2.insert( serde_json::to_string(&account_keypairs[2].to_bytes().to_vec()).unwrap(), - PrimordialAccountDetails { + Base64Account { owner: Pubkey::new_rand().to_string(), balance: 30 as u64, executable: true, @@ -636,80 +627,79 @@ mod tests { }, ); - let serialized = serde_yaml::to_string(&primordial_accounts2).unwrap(); + let serialized = serde_yaml::to_string(&genesis_accounts2).unwrap(); let path = Path::new("test_append_primordial_accounts_to_genesis.yml"); let mut file = File::create(path).unwrap(); file.write_all(&serialized.into_bytes()).unwrap(); - builder = append_primordial_accounts( + add_genesis_accounts( "test_append_primordial_accounts_to_genesis.yml", AccountFileFormat::Keypair, - builder, + &mut genesis_block, ) - .expect("builder"); + .expect("genesis"); - builder = solana_storage_api::rewards_pools::genesis(builder); + solana_storage_api::rewards_pools::add_genesis_accounts(&mut genesis_block); remove_file(path).unwrap(); - let genesis_block = builder.clone().build(); // Test total number of accounts is correct assert_eq!( genesis_block.accounts.len(), - primordial_accounts.len() + primordial_accounts1.len() + primordial_accounts2.len() + genesis_accounts.len() + genesis_accounts1.len() + genesis_accounts2.len() ); // Test old accounts are still there - (0..primordial_accounts.len()).for_each(|i| { + (0..genesis_accounts.len()).for_each(|i| { assert_eq!( - primordial_accounts[&genesis_block.accounts[i].0.to_string()].balance, + genesis_accounts[&genesis_block.accounts[i].0.to_string()].balance, genesis_block.accounts[i].1.lamports, ); }); // Test new account data matches - (0..primordial_accounts1.len()).for_each(|i| { + (0..genesis_accounts1.len()).for_each(|i| { assert_eq!( - primordial_accounts1[&genesis_block.accounts[primordial_accounts.len() + i] + genesis_accounts1[&genesis_block.accounts[genesis_accounts.len() + i] .0 .to_string()] .owner, - genesis_block.accounts[primordial_accounts.len() + i] + genesis_block.accounts[genesis_accounts.len() + i] .1 .owner .to_string(), ); assert_eq!( - primordial_accounts1[&genesis_block.accounts[primordial_accounts.len() + i] + genesis_accounts1[&genesis_block.accounts[genesis_accounts.len() + i] .0 .to_string()] .balance, - genesis_block.accounts[primordial_accounts.len() + i] + genesis_block.accounts[genesis_accounts.len() + i] .1 .lamports, ); assert_eq!( - primordial_accounts1[&genesis_block.accounts[primordial_accounts.len() + i] + genesis_accounts1[&genesis_block.accounts[genesis_accounts.len() + i] .0 .to_string()] .executable, - genesis_block.accounts[primordial_accounts.len() + i] + genesis_block.accounts[genesis_accounts.len() + i] .1 .executable, ); assert_eq!( - primordial_accounts1[&genesis_block.accounts[primordial_accounts.len() + i] + genesis_accounts1[&genesis_block.accounts[genesis_accounts.len() + i] .0 .to_string()] .data, - base64::encode(&genesis_block.accounts[primordial_accounts.len() + i].1.data), + base64::encode(&genesis_block.accounts[genesis_accounts.len() + i].1.data), ); }); - let offset = primordial_accounts.len() + primordial_accounts1.len(); + let offset = genesis_accounts.len() + genesis_accounts1.len(); // Test account data for keypairs matches account_keypairs.iter().for_each(|keypair| { let mut i = 0; @@ -722,25 +712,25 @@ mod tests { assert_ne!(i, 0); assert_eq!( - primordial_accounts2[&serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap()] + genesis_accounts2[&serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap()] .owner, genesis_block.accounts[i].1.owner.to_string(), ); assert_eq!( - primordial_accounts2[&serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap()] + genesis_accounts2[&serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap()] .balance, genesis_block.accounts[i].1.lamports, ); assert_eq!( - primordial_accounts2[&serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap()] + genesis_accounts2[&serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap()] .executable, genesis_block.accounts[i].1.executable, ); assert_eq!( - primordial_accounts2[&serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap()] + genesis_accounts2[&serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap()] .data, base64::encode(&genesis_block.accounts[i].1.data), ); @@ -748,7 +738,7 @@ mod tests { } #[test] - fn test_primordial_account_struct_compatibility() { + fn test_genesis_account_struct_compatibility() { let yaml_string_pubkey = "--- 98frSc8R8toHoS3tQ1xWSvHCvGEADRM9hAm5qmUKjSDX: balance: 4 @@ -776,13 +766,15 @@ mod tests { let mut file = File::create(path).unwrap(); file.write_all(yaml_string_pubkey.as_bytes()).unwrap(); - let builder = Builder::new(); - let builder = - append_primordial_accounts(path.to_str().unwrap(), AccountFileFormat::Pubkey, builder) - .expect("builder"); + let mut genesis_block = GenesisBlock::default(); + add_genesis_accounts( + path.to_str().unwrap(), + AccountFileFormat::Pubkey, + &mut genesis_block, + ) + .expect("genesis"); remove_file(path).unwrap(); - let genesis_block = builder.clone().build(); assert_eq!(genesis_block.accounts.len(), 4); let yaml_string_keypair = "--- @@ -807,13 +799,15 @@ mod tests { let mut file = File::create(path).unwrap(); file.write_all(yaml_string_keypair.as_bytes()).unwrap(); - let builder = Builder::new(); - let builder = - append_primordial_accounts(path.to_str().unwrap(), AccountFileFormat::Keypair, builder) - .expect("builder"); + let mut genesis_block = GenesisBlock::default(); + add_genesis_accounts( + path.to_str().unwrap(), + AccountFileFormat::Keypair, + &mut genesis_block, + ) + .expect("genesis"); remove_file(path).unwrap(); - let genesis_block = builder.clone().build(); assert_eq!(genesis_block.accounts.len(), 3); } } diff --git a/gossip/src/main.rs b/gossip/src/main.rs index 99d7855dd1a44d..9ea3bd9f9c86ce 100644 --- a/gossip/src/main.rs +++ b/gossip/src/main.rs @@ -229,7 +229,7 @@ fn main() -> Result<(), Box> { } println!("\nSending stop request to node {:?}", pubkey); - let result = RpcClient::new_socket(node.rpc).fullnode_exit()?; + let result = RpcClient::new_socket(node.rpc).validator_exit()?; if result { println!("Stop signal accepted"); } else { diff --git a/install/Cargo.toml b/install/Cargo.toml index c699431b85d657..b64eb3227209db 100644 --- a/install/Cargo.toml +++ b/install/Cargo.toml @@ -7,6 +7,7 @@ version = "0.20.0" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" +default-run = "solana-install" [dependencies] atty = "0.2.11" @@ -22,11 +23,11 @@ indicatif = "0.12.0" lazy_static = "1.4.0" log = "0.4.8" nix = "0.15.0" -reqwest = { version = "0.9.21", default-features = false, features = ["rustls-tls"] } +reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tls"] } semver = "0.9.0" serde = "1.0.101" serde_derive = "1.0.101" -serde_yaml = "0.8.9" +serde_yaml = "0.8.11" sha2 = "0.8.0" solana-client = { path = "../client", version = "0.20.0" } solana-config-api = { path = "../programs/config_api", version = "0.20.0" } diff --git a/install/src/command.rs b/install/src/command.rs index b42337721c8a1e..fdfec06242f082 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -9,7 +9,7 @@ use solana_client::rpc_client::RpcClient; use solana_config_api::{config_instruction, get_config_data}; use solana_sdk::message::Message; use solana_sdk::pubkey::Pubkey; -use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil, Signable}; +use solana_sdk::signature::{read_keypair_file, Keypair, KeypairUtil, Signable}; use solana_sdk::transaction::Transaction; use std::fs::{self, File}; use std::io::{self, BufReader, Read}; @@ -626,9 +626,9 @@ pub fn deploy( download_url: &str, update_manifest_keypair_file: &str, ) -> Result<(), String> { - let from_keypair = read_keypair(from_keypair_file) + let from_keypair = read_keypair_file(from_keypair_file) .map_err(|err| format!("Unable to read {}: {}", from_keypair_file, err))?; - let update_manifest_keypair = read_keypair(update_manifest_keypair_file) + let update_manifest_keypair = read_keypair_file(update_manifest_keypair_file) .map_err(|err| format!("Unable to read {}: {}", update_manifest_keypair_file, err))?; println_name_value("JSON RPC URL:", json_rpc_url); diff --git a/keygen/src/keygen.rs b/keygen/src/keygen.rs index 8ed5646f43aef8..914bf14955d368 100644 --- a/keygen/src/keygen.rs +++ b/keygen/src/keygen.rs @@ -3,7 +3,10 @@ use clap::{ crate_description, crate_name, crate_version, App, AppSettings, Arg, ArgMatches, SubCommand, }; use solana_sdk::pubkey::write_pubkey; -use solana_sdk::signature::{keypair_from_seed, read_keypair, write_keypair, KeypairUtil}; +use solana_sdk::signature::{ + keypair_from_seed, read_keypair, read_keypair_file, write_keypair, write_keypair_file, Keypair, + KeypairUtil, +}; use std::error; use std::path::Path; use std::process::exit; @@ -18,6 +21,21 @@ fn check_for_overwrite(outfile: &str, matches: &ArgMatches) { } } +fn output_keypair( + keypair: &Keypair, + outfile: &str, + source: &str, +) -> Result<(), Box> { + if outfile == "-" { + let mut stdout = std::io::stdout(); + write_keypair(&keypair, &mut stdout)?; + } else { + write_keypair_file(&keypair, outfile)?; + eprintln!("Wrote {} keypair to {}", source, outfile); + } + Ok(()) +} + fn main() -> Result<(), Box> { let matches = App::new(crate_name!()) .about(crate_description!()) @@ -45,7 +63,7 @@ fn main() -> Result<(), Box> { Arg::with_name("silent") .short("s") .long("silent") - .help("Do not display mnemonic phrase"), + .help("Do not display mnemonic phrase. Useful when piping output to other programs that prompt for user input, like gpg"), ), ) .subcommand( @@ -104,7 +122,12 @@ fn main() -> Result<(), Box> { path.extend(&[".config", "solana", "id.json"]); path.to_str().unwrap() }; - let keypair = read_keypair(infile)?; + let keypair = if infile == "-" { + let mut stdin = std::io::stdin(); + read_keypair(&mut stdin)? + } else { + read_keypair_file(infile)? + }; if matches.is_present("outfile") { let outfile = matches.value_of("outfile").unwrap(); @@ -132,17 +155,12 @@ fn main() -> Result<(), Box> { let seed = Seed::new(&mnemonic, NO_PASSPHRASE); let keypair = keypair_from_seed(seed.as_bytes())?; - let serialized_keypair = write_keypair(&keypair, outfile)?; - if outfile == "-" { - println!("{}", serialized_keypair); - } else { - println!("Wrote new keypair to {}", outfile); - } + output_keypair(&keypair, &outfile, "new")?; let silent = matches.is_present("silent"); if !silent { let divider = String::from_utf8(vec![b'='; phrase.len()]).unwrap(); - println!( + eprintln!( "{}\nSave this mnemonic phrase to recover your new keypair:\n{}\n{}", ÷r, phrase, ÷r ); @@ -161,17 +179,12 @@ fn main() -> Result<(), Box> { check_for_overwrite(&outfile, &matches); } - let phrase = rpassword::prompt_password_stdout("Mnemonic recovery phrase: ").unwrap(); + let phrase = rpassword::prompt_password_stderr("Mnemonic recovery phrase: ").unwrap(); let mnemonic = Mnemonic::from_phrase(phrase.trim(), Language::English)?; let seed = Seed::new(&mnemonic, NO_PASSPHRASE); let keypair = keypair_from_seed(seed.as_bytes())?; - let serialized_keypair = write_keypair(&keypair, outfile)?; - if outfile == "-" { - println!("{}", serialized_keypair); - } else { - println!("Wrote recovered keypair to {}", outfile); - } + output_keypair(&keypair, &outfile, "recovered")?; } _ => unreachable!(), } diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index 2a7ae374b0132e..f7941da4f2dd96 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -14,7 +14,7 @@ clap = "2.33.0" serde = "1.0.101" serde_derive = "1.0.101" serde_json = "1.0.41" -serde_yaml = "0.8.9" +serde_yaml = "0.8.11" solana-core = { path = "../core", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.0" } solana-runtime = { path = "../runtime", version = "0.20.0" } diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index f8b840d796a78d..fbb50420f99910 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1,6 +1,6 @@ use clap::{crate_description, crate_name, crate_version, value_t_or_exit, App, Arg, SubCommand}; use solana_core::blocktree::Blocktree; -use solana_core::blocktree_processor::process_blocktree; +use solana_core::blocktree_processor::{process_blocktree, ProcessOptions}; use solana_sdk::clock::Slot; use solana_sdk::genesis_block::GenesisBlock; use std::collections::BTreeMap; @@ -168,7 +168,11 @@ fn main() { } ("verify", _) => { println!("Verifying ledger..."); - match process_blocktree(&genesis_block, &blocktree, None, true, None) { + let options = ProcessOptions { + verify_ledger: true, + ..ProcessOptions::default() + }; + match process_blocktree(&genesis_block, &blocktree, None, options) { Ok((_bank_forks, bank_forks_info, _)) => { println!("{:?}", bank_forks_info); } diff --git a/local_cluster/Cargo.toml b/local_cluster/Cargo.toml index 74bfbd5ccc34ee..6548b470c65f19 100644 --- a/local_cluster/Cargo.toml +++ b/local_cluster/Cargo.toml @@ -36,4 +36,4 @@ serial_test = "0.2.0" serial_test_derive = "0.2.0" [features] -move = ["solana-move-loader-api", "solana-move-loader-program"] +move = ["solana-bench-tps/move", "solana-move-loader-api", "solana-move-loader-program"] diff --git a/local_cluster/src/cluster_tests.rs b/local_cluster/src/cluster_tests.rs index d04c971326c90c..d81639f81931fc 100644 --- a/local_cluster/src/cluster_tests.rs +++ b/local_cluster/src/cluster_tests.rs @@ -6,16 +6,16 @@ use solana_client::thin_client::create_client; /// discover the rest of the network. use solana_core::{ blocktree::Blocktree, - cluster_info::FULLNODE_PORT_RANGE, + cluster_info::VALIDATOR_PORT_RANGE, consensus::VOTE_THRESHOLD_DEPTH, contact_info::ContactInfo, entry::{Entry, EntrySlice}, gossip_service::discover_cluster, }; -use solana_runtime::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH; use solana_sdk::{ client::SyncClient, clock::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, NUM_CONSECUTIVE_LEADER_SLOTS}, + epoch_schedule::MINIMUM_SLOTS_PER_EPOCH, hash::Hash, poh_config::PohConfig, pubkey::Pubkey, @@ -47,7 +47,7 @@ pub fn spend_and_verify_all_nodes( continue; } let random_keypair = Keypair::new(); - let client = create_client(ingress_node.client_facing_addr(), FULLNODE_PORT_RANGE); + let client = create_client(ingress_node.client_facing_addr(), VALIDATOR_PORT_RANGE); let bal = client .poll_get_balance(&funding_keypair.pubkey()) .expect("balance in source"); @@ -63,7 +63,7 @@ pub fn spend_and_verify_all_nodes( if ignore_nodes.contains(&validator.id) { continue; } - let client = create_client(validator.client_facing_addr(), FULLNODE_PORT_RANGE); + let client = create_client(validator.client_facing_addr(), VALIDATOR_PORT_RANGE); client.poll_for_signature_confirmation(&sig, confs).unwrap(); } } @@ -73,7 +73,7 @@ pub fn verify_balances( expected_balances: HashMap, node: &ContactInfo, ) { - let client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE); + let client = create_client(node.client_facing_addr(), VALIDATOR_PORT_RANGE); for (pk, b) in expected_balances { let bal = client.poll_get_balance(&pk).expect("balance in source"); assert_eq!(bal, b); @@ -86,7 +86,7 @@ pub fn send_many_transactions( max_tokens_per_transfer: u64, num_txs: u64, ) -> HashMap { - let client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE); + let client = create_client(node.client_facing_addr(), VALIDATOR_PORT_RANGE); let mut expected_balances = HashMap::new(); for _ in 0..num_txs { let random_keypair = Keypair::new(); @@ -114,17 +114,17 @@ pub fn send_many_transactions( expected_balances } -pub fn fullnode_exit(entry_point_info: &ContactInfo, nodes: usize) { +pub fn validator_exit(entry_point_info: &ContactInfo, nodes: usize) { let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap(); assert!(cluster_nodes.len() >= nodes); for node in &cluster_nodes { - let client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE); - assert!(client.fullnode_exit().unwrap()); + let client = create_client(node.client_facing_addr(), VALIDATOR_PORT_RANGE); + assert!(client.validator_exit().unwrap()); } sleep(Duration::from_millis(DEFAULT_SLOT_MILLIS)); for node in &cluster_nodes { - let client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE); - assert!(client.fullnode_exit().is_err()); + let client = create_client(node.client_facing_addr(), VALIDATOR_PORT_RANGE); + assert!(client.validator_exit().is_err()); } } @@ -183,7 +183,7 @@ pub fn kill_entry_and_spend_and_verify_rest( solana_logger::setup(); let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap(); assert!(cluster_nodes.len() >= nodes); - let client = create_client(entry_point_info.client_facing_addr(), FULLNODE_PORT_RANGE); + let client = create_client(entry_point_info.client_facing_addr(), VALIDATOR_PORT_RANGE); let first_two_epoch_slots = MINIMUM_SLOTS_PER_EPOCH * 3; for ingress_node in &cluster_nodes { @@ -198,7 +198,7 @@ pub fn kill_entry_and_spend_and_verify_rest( )); info!("done sleeping for first 2 warmup epochs"); info!("killing entry point: {}", entry_point_info.id); - assert!(client.fullnode_exit().unwrap()); + assert!(client.validator_exit().unwrap()); info!("sleeping for some time"); sleep(Duration::from_millis( slot_millis * NUM_CONSECUTIVE_LEADER_SLOTS, @@ -210,7 +210,7 @@ pub fn kill_entry_and_spend_and_verify_rest( continue; } - let client = create_client(ingress_node.client_facing_addr(), FULLNODE_PORT_RANGE); + let client = create_client(ingress_node.client_facing_addr(), VALIDATOR_PORT_RANGE); let balance = client .poll_get_balance(&funding_keypair.pubkey()) .expect("balance in source"); @@ -275,7 +275,7 @@ fn poll_all_nodes_for_signature( if validator.id == entry_point_info.id { continue; } - let client = create_client(validator.client_facing_addr(), FULLNODE_PORT_RANGE); + let client = create_client(validator.client_facing_addr(), VALIDATOR_PORT_RANGE); client.poll_for_signature_confirmation(&sig, confs)?; } diff --git a/local_cluster/src/local_cluster.rs b/local_cluster/src/local_cluster.rs index d24a3115bfce35..ea9c8d665e2c80 100644 --- a/local_cluster/src/local_cluster.rs +++ b/local_cluster/src/local_cluster.rs @@ -2,7 +2,7 @@ use crate::cluster::{Cluster, ClusterValidatorInfo, ValidatorInfo}; use solana_client::thin_client::{create_client, ThinClient}; use solana_core::{ blocktree::create_new_tmp_ledger, - cluster_info::{Node, FULLNODE_PORT_RANGE}, + cluster_info::{Node, VALIDATOR_PORT_RANGE}, contact_info::ContactInfo, genesis_utils::{create_genesis_block_with_leader, GenesisBlockInfo}, gossip_service::discover_cluster, @@ -13,6 +13,7 @@ use solana_core::{ use solana_sdk::{ client::SyncClient, clock::{DEFAULT_SLOTS_PER_EPOCH, DEFAULT_SLOTS_PER_SEGMENT, DEFAULT_TICKS_PER_SLOT}, + epoch_schedule::EpochSchedule, genesis_block::GenesisBlock, message::Message, poh_config::PohConfig, @@ -25,7 +26,10 @@ use solana_stake_api::{ config as stake_config, stake_instruction, stake_state::{Authorized as StakeAuthorized, StakeState}, }; -use solana_storage_api::{storage_contract, storage_instruction}; +use solana_storage_api::{ + storage_contract, + storage_instruction::{self, StorageAccountType}, +}; use solana_vote_api::{ vote_instruction, vote_state::{VoteInit, VoteState}, @@ -54,7 +58,7 @@ impl ReplicatorInfo { #[derive(Clone, Debug)] pub struct ClusterConfig { - /// The fullnode config that should be applied to every node in the cluster + /// The validator config that should be applied to every node in the cluster pub validator_configs: Vec, /// Number of replicators in the cluster /// Note- replicators will timeout if ticks_per_slot is much larger than the default 8 @@ -96,9 +100,9 @@ pub struct LocalCluster { pub funding_keypair: Keypair, /// Entry point from which the rest of the network can be discovered pub entry_point_info: ContactInfo, - pub fullnode_infos: HashMap, + pub validator_infos: HashMap, pub listener_infos: HashMap, - fullnodes: HashMap, + validators: HashMap, pub genesis_block: GenesisBlock, replicators: Vec, pub replicator_infos: HashMap, @@ -135,9 +139,9 @@ impl LocalCluster { config.node_stakes[0], ); genesis_block.ticks_per_slot = config.ticks_per_slot; - genesis_block.slots_per_epoch = config.slots_per_epoch; genesis_block.slots_per_segment = config.slots_per_segment; - genesis_block.stakers_slot_offset = config.stakers_slot_offset; + genesis_block.epoch_schedule = + EpochSchedule::custom(config.slots_per_epoch, config.stakers_slot_offset, true); genesis_block.poh_config = config.poh_config.clone(); genesis_block .native_instruction_processors @@ -152,7 +156,12 @@ impl LocalCluster { storage_contract::create_validator_storage_account(leader_pubkey, 1), )); - // override staking config + // Replace staking config + genesis_block.accounts = genesis_block + .accounts + .into_iter() + .filter(|(pubkey, _)| *pubkey != stake_config::id()) + .collect(); genesis_block.accounts.push(( stake_config::id(), stake_config::create_account( @@ -181,9 +190,9 @@ impl LocalCluster { &config.validator_configs[0], ); - let mut fullnodes = HashMap::new(); - let mut fullnode_infos = HashMap::new(); - fullnodes.insert(leader_pubkey, leader_server); + let mut validators = HashMap::new(); + let mut validator_infos = HashMap::new(); + validators.insert(leader_pubkey, leader_server); let leader_info = ValidatorInfo { keypair: leader_keypair, voting_keypair: leader_voting_keypair, @@ -195,15 +204,15 @@ impl LocalCluster { let cluster_leader = ClusterValidatorInfo::new(leader_info, config.validator_configs[0].clone()); - fullnode_infos.insert(leader_pubkey, cluster_leader); + validator_infos.insert(leader_pubkey, cluster_leader); let mut cluster = Self { funding_keypair: mint_keypair, entry_point_info: leader_contact_info, - fullnodes, + validators, replicators: vec![], genesis_block, - fullnode_infos, + validator_infos, replicator_infos: HashMap::new(), listener_infos: HashMap::new(), }; @@ -241,14 +250,14 @@ impl LocalCluster { } pub fn exit(&mut self) { - for node in self.fullnodes.values_mut() { + for node in self.validators.values_mut() { node.exit(); } } pub fn close_preserve_ledgers(&mut self) { self.exit(); - for (_, node) in self.fullnodes.drain() { + for (_, node) in self.validators.drain() { node.join().unwrap(); } @@ -260,7 +269,7 @@ impl LocalCluster { pub fn add_validator(&mut self, validator_config: &ValidatorConfig, stake: u64) { let client = create_client( self.entry_point_info.client_facing_addr(), - FULLNODE_PORT_RANGE, + VALIDATOR_PORT_RANGE, ); // Must have enough tokens to fund vote account and set delegate @@ -313,7 +322,7 @@ impl LocalCluster { &validator_config, ); - self.fullnodes + self.validators .insert(validator_keypair.pubkey(), validator_server); let validator_pubkey = validator_keypair.pubkey(); let validator_info = ClusterValidatorInfo::new( @@ -330,7 +339,8 @@ impl LocalCluster { if validator_config.voting_disabled { self.listener_infos.insert(validator_pubkey, validator_info); } else { - self.fullnode_infos.insert(validator_pubkey, validator_info); + self.validator_infos + .insert(validator_pubkey, validator_info); } } @@ -341,7 +351,7 @@ impl LocalCluster { let storage_pubkey = storage_keypair.pubkey(); let client = create_client( self.entry_point_info.client_facing_addr(), - FULLNODE_PORT_RANGE, + VALIDATOR_PORT_RANGE, ); // Give the replicator some lamports to setup its storage accounts @@ -375,7 +385,7 @@ impl LocalCluster { fn close(&mut self) { self.close_preserve_ledgers(); for ledger_path in self - .fullnode_infos + .validator_infos .values() .map(|f| &f.info.ledger_path) .chain(self.replicator_infos.values().map(|info| &info.ledger_path)) @@ -388,7 +398,7 @@ impl LocalCluster { pub fn transfer(&self, source_keypair: &Keypair, dest_pubkey: &Pubkey, lamports: u64) -> u64 { let client = create_client( self.entry_point_info.client_facing_addr(), - FULLNODE_PORT_RANGE, + VALIDATOR_PORT_RANGE, ); Self::transfer_with_client(&client, source_keypair, dest_pubkey, lamports) } @@ -532,22 +542,19 @@ impl LocalCluster { from_keypair: &Arc, replicator: bool, ) -> Result<()> { + let storage_account_type = if replicator { + StorageAccountType::Replicator + } else { + StorageAccountType::Validator + }; let message = Message::new_with_payer( - if replicator { - storage_instruction::create_replicator_storage_account( - &from_keypair.pubkey(), - &from_keypair.pubkey(), - &storage_keypair.pubkey(), - 1, - ) - } else { - storage_instruction::create_validator_storage_account( - &from_keypair.pubkey(), - &from_keypair.pubkey(), - &storage_keypair.pubkey(), - 1, - ) - }, + storage_instruction::create_storage_account( + &from_keypair.pubkey(), + &from_keypair.pubkey(), + &storage_keypair.pubkey(), + 1, + storage_account_type, + ), Some(&from_keypair.pubkey()), ); let signer_keys = vec![from_keypair.as_ref()]; @@ -561,26 +568,26 @@ impl LocalCluster { impl Cluster for LocalCluster { fn get_node_pubkeys(&self) -> Vec { - self.fullnodes.keys().cloned().collect() + self.validators.keys().cloned().collect() } fn get_validator_client(&self, pubkey: &Pubkey) -> Option { - self.fullnode_infos.get(pubkey).map(|f| { + self.validator_infos.get(pubkey).map(|f| { create_client( f.info.contact_info.client_facing_addr(), - FULLNODE_PORT_RANGE, + VALIDATOR_PORT_RANGE, ) }) } fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo { - let mut node = self.fullnodes.remove(&pubkey).unwrap(); + let mut node = self.validators.remove(&pubkey).unwrap(); - // Shut down the fullnode + // Shut down the validator node.exit(); node.join().unwrap(); - self.fullnode_infos.remove(&pubkey).unwrap() + self.validator_infos.remove(&pubkey).unwrap() } fn restart_node(&mut self, pubkey: &Pubkey, mut cluster_validator_info: ClusterValidatorInfo) { @@ -598,22 +605,22 @@ impl Cluster for LocalCluster { }; // Restart the node - let fullnode_info = &cluster_validator_info.info; + let validator_info = &cluster_validator_info.info; let restarted_node = Validator::new( node, - &fullnode_info.keypair, - &fullnode_info.ledger_path, - &fullnode_info.voting_keypair.pubkey(), - &fullnode_info.voting_keypair, - &fullnode_info.storage_keypair, + &validator_info.keypair, + &validator_info.ledger_path, + &validator_info.voting_keypair.pubkey(), + &validator_info.voting_keypair, + &validator_info.storage_keypair, entry_point_info, true, &cluster_validator_info.config, ); - self.fullnodes.insert(*pubkey, restarted_node); - self.fullnode_infos.insert(*pubkey, cluster_validator_info); + self.validators.insert(*pubkey, restarted_node); + self.validator_infos.insert(*pubkey, cluster_validator_info); } fn exit_restart_node(&mut self, pubkey: &Pubkey, validator_config: ValidatorConfig) { @@ -633,14 +640,14 @@ impl Drop for LocalCluster { mod test { use super::*; use solana_core::storage_stage::SLOTS_PER_TURN_TEST; - use solana_runtime::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH; + use solana_sdk::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH; #[test] fn test_local_cluster_start_and_exit() { solana_logger::setup(); let num_nodes = 1; let cluster = LocalCluster::new_with_equal_stakes(num_nodes, 100, 3); - assert_eq!(cluster.fullnodes.len(), num_nodes); + assert_eq!(cluster.validators.len(), num_nodes); assert_eq!(cluster.replicators.len(), 0); } @@ -648,7 +655,7 @@ mod test { fn test_local_cluster_start_and_exit_with_config() { solana_logger::setup(); let mut validator_config = ValidatorConfig::default(); - validator_config.rpc_config.enable_fullnode_exit = true; + validator_config.rpc_config.enable_validator_exit = true; validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST; const NUM_NODES: usize = 1; let num_replicators = 1; @@ -662,7 +669,7 @@ mod test { ..ClusterConfig::default() }; let cluster = LocalCluster::new(&config); - assert_eq!(cluster.fullnodes.len(), NUM_NODES); + assert_eq!(cluster.validators.len(), NUM_NODES); assert_eq!(cluster.replicators.len(), num_replicators); } } diff --git a/local_cluster/src/tests/bench_tps.rs b/local_cluster/src/tests/bench_tps.rs index 2e9298b2e02d29..10cec9ca76a3f9 100644 --- a/local_cluster/src/tests/bench_tps.rs +++ b/local_cluster/src/tests/bench_tps.rs @@ -3,7 +3,7 @@ use serial_test_derive::serial; use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs}; use solana_bench_tps::cli::Config; use solana_client::thin_client::create_client; -use solana_core::cluster_info::FULLNODE_PORT_RANGE; +use solana_core::cluster_info::VALIDATOR_PORT_RANGE; use solana_core::validator::ValidatorConfig; use solana_drone::drone::run_local_drone; #[cfg(feature = "move")] @@ -38,7 +38,7 @@ fn test_bench_tps_local_cluster(config: Config) { let client = create_client( (cluster.entry_point_info.rpc, cluster.entry_point_info.tpu), - FULLNODE_PORT_RANGE, + VALIDATOR_PORT_RANGE, ); let (addr_sender, addr_receiver) = channel(); diff --git a/local_cluster/src/tests/local_cluster.rs b/local_cluster/src/tests/local_cluster.rs index bbd82f027ffce7..c0b737787aa65d 100644 --- a/local_cluster/src/tests/local_cluster.rs +++ b/local_cluster/src/tests/local_cluster.rs @@ -9,11 +9,13 @@ use solana_core::{ bank_forks::SnapshotConfig, blocktree::Blocktree, broadcast_stage::BroadcastStageType, gossip_service::discover_cluster, snapshot_utils, validator::ValidatorConfig, }; -use solana_runtime::{ - accounts_db::AccountsDB, +use solana_runtime::accounts_db::AccountsDB; +use solana_sdk::{ + client::SyncClient, + clock, epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH}, + poh_config::PohConfig, }; -use solana_sdk::{client::SyncClient, clock, poh_config::PohConfig}; use std::path::{Path, PathBuf}; use std::{ collections::{HashMap, HashSet}, @@ -50,7 +52,7 @@ fn test_ledger_cleanup_service() { ); cluster.close_preserve_ledgers(); //check everyone's ledgers and make sure only ~100 slots are stored - for (_, info) in &cluster.fullnode_infos { + for (_, info) in &cluster.validator_infos { let mut slots = 0; let blocktree = Blocktree::open(&info.info.ledger_path).unwrap(); blocktree @@ -128,22 +130,22 @@ fn test_spend_and_verify_all_nodes_env_num_nodes() { #[allow(unused_attributes)] #[test] #[should_panic] -fn test_fullnode_exit_default_config_should_panic() { +fn test_validator_exit_default_config_should_panic() { solana_logger::setup(); - error!("test_fullnode_exit_default_config_should_panic"); + error!("test_validator_exit_default_config_should_panic"); let num_nodes = 2; let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100); - cluster_tests::fullnode_exit(&local.entry_point_info, num_nodes); + cluster_tests::validator_exit(&local.entry_point_info, num_nodes); } #[test] #[serial] -fn test_fullnode_exit_2() { +fn test_validator_exit_2() { solana_logger::setup(); - error!("test_fullnode_exit_2"); + error!("test_validator_exit_2"); let num_nodes = 2; let mut validator_config = ValidatorConfig::default(); - validator_config.rpc_config.enable_fullnode_exit = true; + validator_config.rpc_config.enable_validator_exit = true; let config = ClusterConfig { cluster_lamports: 10_000, node_stakes: vec![100; num_nodes], @@ -151,7 +153,7 @@ fn test_fullnode_exit_2() { ..ClusterConfig::default() }; let local = LocalCluster::new(&config); - cluster_tests::fullnode_exit(&local.entry_point_info, num_nodes); + cluster_tests::validator_exit(&local.entry_point_info, num_nodes); } // Cluster needs a supermajority to remain, so the minimum size for this test is 4 @@ -162,7 +164,7 @@ fn test_leader_failure_4() { error!("test_leader_failure_4"); let num_nodes = 4; let mut validator_config = ValidatorConfig::default(); - validator_config.rpc_config.enable_fullnode_exit = true; + validator_config.rpc_config.enable_validator_exit = true; let config = ClusterConfig { cluster_lamports: 10_000, node_stakes: vec![100; 4], @@ -187,7 +189,7 @@ fn test_two_unbalanced_stakes() { let num_ticks_per_slot = 10; let num_slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64; - validator_config.rpc_config.enable_fullnode_exit = true; + validator_config.rpc_config.enable_validator_exit = true; let mut cluster = LocalCluster::new(&ClusterConfig { node_stakes: vec![999_990, 3], cluster_lamports: 1_000_000, @@ -206,7 +208,7 @@ fn test_two_unbalanced_stakes() { ); cluster.close_preserve_ledgers(); let leader_pubkey = cluster.entry_point_info.id; - let leader_ledger = cluster.fullnode_infos[&leader_pubkey] + let leader_ledger = cluster.validator_infos[&leader_pubkey] .info .ledger_path .clone(); @@ -428,7 +430,7 @@ fn test_snapshots_blocktree_floor() { // Check the validator ledger doesn't contain any slots < slot_floor cluster.close_preserve_ledgers(); - let validator_ledger_path = &cluster.fullnode_infos[&validator_id]; + let validator_ledger_path = &cluster.validator_infos[&validator_id]; let blocktree = Blocktree::open(&validator_ledger_path.info.ledger_path).unwrap(); // Skip the zeroth slot in blocktree that the ledger is initialized with @@ -488,7 +490,7 @@ fn test_snapshots_restart_validity() { let tar = snapshot_utils::get_snapshot_tar_path(&snapshot_package_output_path); wait_for_next_snapshot(&cluster, &tar); - // Create new account paths since fullnode exit is not guaranteed to cleanup RPC threads, + // Create new account paths since validator exit is not guaranteed to cleanup RPC threads, // which may delete the old accounts on exit at any point let (new_account_storage_dirs, new_account_storage_paths) = generate_account_paths(num_account_paths); @@ -548,12 +550,12 @@ fn test_faulty_node(faulty_node_type: BroadcastStageType) { }; let cluster = LocalCluster::new(&cluster_config); - let epoch_schedule = EpochSchedule::new( + let epoch_schedule = EpochSchedule::custom( cluster_config.slots_per_epoch, cluster_config.stakers_slot_offset, true, ); - let num_warmup_epochs = epoch_schedule.get_stakers_epoch(0) + 1; + let num_warmup_epochs = epoch_schedule.get_leader_schedule_epoch(0) + 1; // Wait for the corrupted leader to be scheduled afer the warmup epochs expire cluster_tests::sleep_n_epochs( @@ -564,7 +566,7 @@ fn test_faulty_node(faulty_node_type: BroadcastStageType) { ); let corrupt_node = cluster - .fullnode_infos + .validator_infos .iter() .find(|(_, v)| v.config.broadcast_stage_type == faulty_node_type) .unwrap() @@ -614,7 +616,7 @@ fn run_repairman_catchup(num_repairmen: u64) { // their root could actually be much less than 31. This is why we give a num_root_buffer_slots buffer. let stakers_slot_offset = num_slots_per_epoch + num_root_buffer_slots; - validator_config.rpc_config.enable_fullnode_exit = true; + validator_config.rpc_config.enable_validator_exit = true; let lamports_per_repairman = 1000; @@ -636,8 +638,8 @@ fn run_repairman_catchup(num_repairmen: u64) { }); let repairman_pubkeys: HashSet<_> = cluster.get_node_pubkeys().into_iter().collect(); - let epoch_schedule = EpochSchedule::new(num_slots_per_epoch, stakers_slot_offset, true); - let num_warmup_epochs = epoch_schedule.get_stakers_epoch(0) + 1; + let epoch_schedule = EpochSchedule::custom(num_slots_per_epoch, stakers_slot_offset, true); + let num_warmup_epochs = epoch_schedule.get_leader_schedule_epoch(0) + 1; // Sleep for longer than the first N warmup epochs, with a one epoch buffer for timing issues cluster_tests::sleep_n_epochs( @@ -738,7 +740,7 @@ fn setup_snapshot_validator_config( // Create the validator config let mut validator_config = ValidatorConfig::default(); - validator_config.rpc_config.enable_fullnode_exit = true; + validator_config.rpc_config.enable_validator_exit = true; validator_config.snapshot_config = Some(snapshot_config); validator_config.account_paths = Some(account_storage_paths); diff --git a/local_cluster/src/tests/replicator.rs b/local_cluster/src/tests/replicator.rs index d1a6a90d6b1ee8..0bce9ea6d1753a 100644 --- a/local_cluster/src/tests/replicator.rs +++ b/local_cluster/src/tests/replicator.rs @@ -2,7 +2,7 @@ use crate::local_cluster::{ClusterConfig, LocalCluster}; use serial_test_derive::serial; use solana_client::thin_client::create_client; use solana_core::blocktree::{create_new_tmp_ledger, get_tmp_ledger_path, Blocktree}; -use solana_core::cluster_info::{ClusterInfo, Node, FULLNODE_PORT_RANGE}; +use solana_core::cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE}; use solana_core::contact_info::ContactInfo; use solana_core::gossip_service::discover_cluster; use solana_core::replicator::Replicator; @@ -171,7 +171,7 @@ fn test_account_setup() { // now check that the cluster actually has accounts for the replicator. let client = create_client( cluster.entry_point_info.client_facing_addr(), - FULLNODE_PORT_RANGE, + VALIDATOR_PORT_RANGE, ); cluster.replicator_infos.iter().for_each(|(_, value)| { assert_eq!( diff --git a/metrics/Cargo.toml b/metrics/Cargo.toml index 2e6a123636595b..8ab15edd4f0b87 100644 --- a/metrics/Cargo.toml +++ b/metrics/Cargo.toml @@ -12,7 +12,7 @@ edition = "2018" env_logger = "0.7.0" lazy_static = "1.4.0" log = "0.4.8" -reqwest = { version = "0.9.21", default-features = false, features = ["rustls-tls"] } +reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tls"] } solana-sdk = { path = "../sdk", version = "0.20.0" } sys-info = "0.5.8" diff --git a/metrics/scripts/grafana-provisioning/dashboards/testnet-monitor.json b/metrics/scripts/grafana-provisioning/dashboards/testnet-monitor.json index a469813426d4e1..b66301b52cdf13 100644 --- a/metrics/scripts/grafana-provisioning/dashboards/testnet-monitor.json +++ b/metrics/scripts/grafana-provisioning/dashboards/testnet-monitor.json @@ -4815,7 +4815,7 @@ }, "id": 41, "panels": [], - "title": "Fullnode Streamer", + "title": "Validator Streamer", "type": "row" }, { diff --git a/metrics/src/datapoint.rs b/metrics/src/datapoint.rs index b6e85bc48fc882..72a1a1d797cd26 100644 --- a/metrics/src/datapoint.rs +++ b/metrics/src/datapoint.rs @@ -91,56 +91,40 @@ macro_rules! datapoint { #[macro_export] macro_rules! datapoint_error { ($name:expr) => { - if log::log_enabled!(log::Level::Error) { - $crate::submit($crate::datapoint!(@point $name), log::Level::Error); - } + $crate::submit($crate::datapoint!(@point $name), log::Level::Error); }; ($name:expr, $($fields:tt)+) => { - if log::log_enabled!(log::Level::Error) { - $crate::submit($crate::datapoint!(@point $name, $($fields)+), log::Level::Error); - } + $crate::submit($crate::datapoint!(@point $name, $($fields)+), log::Level::Error); }; } #[macro_export] macro_rules! datapoint_warn { ($name:expr) => { - if log::log_enabled!(log::Level::Warn) { - $crate::submit($crate::datapoint!(@point $name), log::Level::Warn); - } + $crate::submit($crate::datapoint!(@point $name), log::Level::Warn); }; ($name:expr, $($fields:tt)+) => { - if log::log_enabled!(log::Level::Warn) { - $crate::submit($crate::datapoint!(@point $name, $($fields)+), log::Level::Warn); - } + $crate::submit($crate::datapoint!(@point $name, $($fields)+), log::Level::Warn); }; } #[macro_export] macro_rules! datapoint_info { ($name:expr) => { - if log::log_enabled!(log::Level::Info) { - $crate::submit($crate::datapoint!(@point $name), log::Level::Info); - } + $crate::submit($crate::datapoint!(@point $name), log::Level::Info); }; ($name:expr, $($fields:tt)+) => { - if log::log_enabled!(log::Level::Info) { - $crate::submit($crate::datapoint!(@point $name, $($fields)+), log::Level::Info); - } + $crate::submit($crate::datapoint!(@point $name, $($fields)+), log::Level::Info); }; } #[macro_export] macro_rules! datapoint_debug { ($name:expr) => { - if log::log_enabled!(log::Level::Debug) { - $crate::submit($crate::datapoint!(@point $name), log::Level::Debug); - } + $crate::submit($crate::datapoint!(@point $name), log::Level::Debug); }; ($name:expr, $($fields:tt)+) => { - if log::log_enabled!(log::Level::Debug) { - $crate::submit($crate::datapoint!(@point $name, $($fields)+), log::Level::Debug); - } + $crate::submit($crate::datapoint!(@point $name, $($fields)+), log::Level::Debug); }; } diff --git a/multinode-demo/bench-tps.sh b/multinode-demo/bench-tps.sh index 659b29bd487008..45f4b35d407020 100755 --- a/multinode-demo/bench-tps.sh +++ b/multinode-demo/bench-tps.sh @@ -25,6 +25,7 @@ if [[ -z $1 ]]; then # default behavior --drone 127.0.0.1:9900 \ --duration 90 \ --tx_count 50000 \ + --thread-batch-sleep-ms 0 \ else $solana_bench_tps "$@" diff --git a/multinode-demo/bootstrap-leader.sh b/multinode-demo/bootstrap-leader.sh index 1410bdde78e9e6..dbbdcbf5473901 100755 --- a/multinode-demo/bootstrap-leader.sh +++ b/multinode-demo/bootstrap-leader.sh @@ -8,6 +8,11 @@ here=$(dirname "$0") # shellcheck source=multinode-demo/common.sh source "$here"/common.sh +if [[ "$SOLANA_GPU_MISSING" -eq 1 ]]; then + echo "Testnet requires GPUs, but none were found! Aborting..." + exit 1 +fi + if [[ -n $SOLANA_CUDA ]]; then program=$solana_validator_cuda else diff --git a/multinode-demo/validator.sh b/multinode-demo/validator.sh index 95cbe59dc31c60..f06e07ac924bc9 100755 --- a/multinode-demo/validator.sh +++ b/multinode-demo/validator.sh @@ -152,6 +152,11 @@ while [[ -n $1 ]]; do fi done +if [[ "$SOLANA_GPU_MISSING" -eq 1 ]]; then + echo "Testnet requires GPUs, but none were found! Aborting..." + exit 1 +fi + if [[ ${#positional_args[@]} -gt 1 ]]; then usage "$@" fi diff --git a/net/gce.sh b/net/gce.sh index 651190343e9046..0879b91c129877 100755 --- a/net/gce.sh +++ b/net/gce.sh @@ -69,7 +69,7 @@ esac prefix=testnet-dev-${USER//[^A-Za-z0-9]/} additionalFullNodeCount=2 -clientNodeCount=1 +clientNodeCount=0 replicatorNodeCount=0 blockstreamer=false fullNodeBootDiskSizeInGb=500 @@ -78,6 +78,7 @@ replicatorBootDiskSizeInGb=500 fullNodeAdditionalDiskSizeInGb= externalNodes=false failOnValidatorBootupFailure=true +preemptible=true publicNetwork=false letsEncryptDomainName= @@ -119,7 +120,9 @@ Manage testnet instances zone -x - append to the existing configuration instead of creating a new configuration - -f - Discard validator nodes that didn't bootup successfully + --allow-boot-failures + - Discard from config validator nodes that didn't bootup + successfully create-specific options: -n [number] - Number of additional fullnodes (default: $additionalFullNodeCount) @@ -144,6 +147,11 @@ Manage testnet instances - Add an additional [number] GB SSD to all fullnodes to store the config directory. If not set, config will be written to the boot disk by default. Only supported on GCE. + --dedicated - Use dedicated instances for additional full nodes + (by default preemptible instances are used to reduce + cost). Note that the bootstrap leader, replicator, + blockstreamer and client nodes are always dedicated. + config-specific options: -P - Use public network IP addresses (default: $publicNetwork) @@ -175,6 +183,12 @@ while [[ -n $1 ]]; do elif [[ $1 == --machine-type* || $1 == --custom-cpu* ]]; then # Bypass quoted long args for GPUs shortArgs+=("$1") shift + elif [[ $1 == --allow-boot-failures ]]; then + failOnValidatorBootupFailure=false + shift + elif [[ $1 == --dedicated ]]; then + preemptible=false + shift else usage "Unknown long option: $1" fi @@ -232,9 +246,6 @@ while getopts "h?p:Pn:c:r:z:gG:a:d:uxf" opt "${shortArgs[@]}"; do x) externalNodes=true ;; - f) - failOnValidatorBootupFailure=false - ;; *) usage "unhandled option: $opt" ;; @@ -376,6 +387,8 @@ EOF buildSshOptions + cloud_RestartPreemptedInstances "$prefix" + fetchPrivateKey() { declare nodeName declare nodeIp @@ -485,10 +498,24 @@ EOF fi if [[ $additionalFullNodeCount -gt 0 ]]; then - for zone in "${zones[@]}"; do + numZones=${#zones[@]} + if [[ $additionalFullNodeCount -gt $numZones ]]; then + numNodesPerZone=$((additionalFullNodeCount / numZones)) + numLeftOverNodes=$((additionalFullNodeCount % numZones)) + else + numNodesPerZone=1 + numLeftOverNodes=0 + fi + + for ((i=((numZones - 1)); i >= 0; i--)); do + zone=${zones[i]} + if [[ $i -eq 0 ]]; then + numNodesPerZone=$((numNodesPerZone + numLeftOverNodes)) + fi echo "Looking for additional fullnode instances in $zone ..." cloud_FindInstances "$prefix-$zone-fullnode" - if [[ ${#instances[@]} -gt 0 ]]; then + declare numInstances=${#instances[@]} + if [[ $numInstances -eq $numNodesPerZone || ( ! $failOnValidatorBootupFailure && $numInstances -gt 0 ) ]]; then cloud_ForEachInstance recordInstanceIp "$failOnValidatorBootupFailure" fullnodeIpList else echo "Unable to find additional fullnodes" @@ -559,6 +586,14 @@ delete() { $metricsWriteDatapoint "testnet-deploy net-delete-complete=1" } +create_error_cleanup() { + declare RC=$? + if [[ "$RC" -ne 0 ]]; then + delete + fi + exit $RC +} + case $command in delete) delete @@ -571,6 +606,10 @@ create) $metricsWriteDatapoint "testnet-deploy net-create-begin=1" + if $failOnValidatorBootupFailure; then + trap create_error_cleanup EXIT + fi + rm -rf "$sshPrivateKey"{,.pub} # Note: using rsa because |aws ec2 import-key-pair| seems to fail for ecdsa @@ -697,7 +736,7 @@ EOF cloud_CreateInstances "$prefix" "$prefix-bootstrap-leader" 1 \ "$enableGpu" "$bootstrapLeaderMachineType" "${zones[0]}" "$fullNodeBootDiskSizeInGb" \ "$startupScript" "$bootstrapLeaderAddress" "$bootDiskType" "$fullNodeAdditionalDiskSizeInGb" \ - "$sshPrivateKey" + "never preemptible" "$sshPrivateKey" fi if [[ $additionalFullNodeCount -gt 0 ]]; then @@ -718,7 +757,7 @@ EOF cloud_CreateInstances "$prefix" "$prefix-$zone-fullnode" "$numNodesPerZone" \ "$enableGpu" "$fullNodeMachineType" "$zone" "$fullNodeBootDiskSizeInGb" \ "$startupScript" "" "$bootDiskType" "$fullNodeAdditionalDiskSizeInGb" \ - "$sshPrivateKey" & + "$preemptible" "$sshPrivateKey" & done wait @@ -727,7 +766,7 @@ EOF if [[ $clientNodeCount -gt 0 ]]; then cloud_CreateInstances "$prefix" "$prefix-client" "$clientNodeCount" \ "$enableGpu" "$clientMachineType" "${zones[0]}" "$clientBootDiskSizeInGb" \ - "$startupScript" "" "$bootDiskType" "" "$sshPrivateKey" + "$startupScript" "" "$bootDiskType" "" "never preemptible" "$sshPrivateKey" fi if $blockstreamer; then @@ -739,7 +778,7 @@ EOF if [[ $replicatorNodeCount -gt 0 ]]; then cloud_CreateInstances "$prefix" "$prefix-replicator" "$replicatorNodeCount" \ false "$replicatorMachineType" "${zones[0]}" "$replicatorBootDiskSizeInGb" \ - "$startupScript" "" "" "" "$sshPrivateKey" + "$startupScript" "" "" "" "never preemptible" "$sshPrivateKey" fi $metricsWriteDatapoint "testnet-deploy net-create-complete=1" diff --git a/net/net.sh b/net/net.sh index a360ee8228cab9..7a997dfa6a5b30 100755 --- a/net/net.sh +++ b/net/net.sh @@ -49,7 +49,13 @@ Operate a configured testnet This will start 2 bench-tps clients, and supply "--tx_count 25000" to the bench-tps client. -n NUM_FULL_NODES - Number of fullnodes to apply command to. - + --gpu-mode GPU_MODE - Specify GPU mode to launch validators with (default: $gpuMode). + MODE must be one of + on - GPU *required*, any vendor * + off - No GPU, CPU-only + auto - Use GPU if available, any vendor * + cuda - GPU *required*, Nvidia CUDA only + * Currently, Nvidia CUDA is the only supported GPU vendor --hashes-per-tick NUM_HASHES|sleep|auto - Override the default --hashes-per-tick for the cluster --no-airdrop @@ -130,6 +136,7 @@ maybeSkipLedgerVerify="" maybeDisableAirdrops="" debugBuild=false doBuild=true +gpuMode=auto command=$1 [[ -n $command ]] || usage @@ -187,6 +194,17 @@ while [[ -n $1 ]]; do elif [[ $1 = --debug ]]; then debugBuild=true shift 1 + elif [[ $1 = --gpu-mode ]]; then + gpuMode=$2 + case "$gpuMode" in + on|off|auto|cuda) + ;; + *) + echo "Unexpected GPU mode: \"$gpuMode\"" + exit 1 + ;; + esac + shift 2 else usage "Unknown long option: $1" fi @@ -424,6 +442,7 @@ startBootstrapLeader() { $numBenchExchangeClients \"$benchExchangeExtraArgs\" \ \"$genesisOptions\" \ \"$maybeNoSnapshot $maybeSkipLedgerVerify $maybeLimitLedgerSize\" \ + \"$gpuMode\" \ " ) >> "$logFile" 2>&1 || { cat "$logFile" @@ -488,6 +507,7 @@ startNode() { $numBenchExchangeClients \"$benchExchangeExtraArgs\" \ \"$genesisOptions\" \ \"$maybeNoSnapshot $maybeSkipLedgerVerify $maybeLimitLedgerSize\" \ + \"$gpuMode\" \ " ) >> "$logFile" 2>&1 & declare pid=$! @@ -820,6 +840,32 @@ stop() { echo "Stopping nodes took $SECONDS seconds" } + +checkPremptibleInstances() { + # The fullnodeIpList nodes may be preemptible instances that can disappear at + # any time. Try to detect when a fullnode has been preempted to help the user + # out. + # + # Of course this isn't airtight as an instance could always disappear + # immediately after its successfully pinged. + for ipAddress in "${fullnodeIpList[@]}"; do + ( + set -x + timeout 5s ping -c 1 "$ipAddress" + ) || { + cat < ~/solana/on-reboot <> ~/solana/on-reboot < net-stats.log 2>&1 & echo \$! > net-stats.pid - if [[ -e /dev/nvidia0 ]]; then + if ${GPU_CUDA_OK} && [[ -e /dev/nvidia0 ]]; then echo Selecting solana-validator-cuda export SOLANA_CUDA=1 + elif ${GPU_FAIL_IF_NONE} ; then + echo "Expected GPU, found none!" + export SOLANA_GPU_MISSING=1 fi EOF @@ -196,7 +225,7 @@ EOF args+=($extraNodeArgs) cat >> ~/solana/on-reboot < fullnode.log 2>&1 & + nohup ./multinode-demo/bootstrap-leader.sh ${args[@]} > fullnode.log.\$now 2>&1 & pid=\$! oom_score_adj "\$pid" 1000 disown @@ -304,7 +333,7 @@ EOF # shellcheck disable=SC2206 # Don't want to double quote $extraNodeArgs args+=($extraNodeArgs) cat >> ~/solana/on-reboot < fullnode.log 2>&1 & + nohup multinode-demo/validator.sh ${args[@]} > fullnode.log.\$now 2>&1 & pid=\$! oom_score_adj "\$pid" 1000 disown @@ -351,7 +380,7 @@ EOF fi cat >> ~/solana/on-reboot < fullnode.log 2>&1 & + nohup multinode-demo/replicator.sh ${args[@]} > fullnode.log.\$now 2>&1 & pid=\$! oom_score_adj "\$pid" 1000 disown diff --git a/net/remote/remote-sanity.sh b/net/remote/remote-sanity.sh index f4356d0c5da84a..cb41d056f29e60 100755 --- a/net/remote/remote-sanity.sh +++ b/net/remote/remote-sanity.sh @@ -67,8 +67,10 @@ case $deployMethod in local|tar|skip) PATH="$HOME"/.cargo/bin:"$PATH" export USE_INSTALL=1 + solana_cli=solana solana_gossip=solana-gossip solana_install=solana-install + solana_keygen=solana-keygen ;; *) echo "Unknown deployment method: $deployMethod" @@ -85,6 +87,15 @@ else fi fi +echo "+++ $sanityTargetIp: validators" +( + # Ensure solana-cli has a keypair even though it doesn't really need one... + # TODO: Remove when https://github.com/solana-labs/solana/issues/6375 is fixed + $solana_keygen new --force -o temp-id.json + set -x + $solana_cli --keypair temp-id.json --url http://"$sanityTargetIp":8899 show-validators +) + echo "+++ $sanityTargetIp: node count ($numSanityNodes expected)" ( set -x diff --git a/net/scripts/azure-provider.sh b/net/scripts/azure-provider.sh index ab20dd0d6335b7..f49c7604f93d80 100755 --- a/net/scripts/azure-provider.sh +++ b/net/scripts/azure-provider.sh @@ -8,6 +8,10 @@ cloud_DefaultZone() { echo "westus" } +cloud_RestartPreemptedInstances() { + : # Not implemented +} + # # __cloud_GetConfigValueFromInstanceName # Return a piece of configuration information about an instance diff --git a/net/scripts/colo-node-onacquire-sh b/net/scripts/colo-node-onacquire-sh new file mode 100644 index 00000000000000..b3a7115cb7340a --- /dev/null +++ b/net/scripts/colo-node-onacquire-sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# XXX: This file isn't *quite* a script. It is intended to be passed via stdin +# to a node to requisition logic up node creation. Currently this is done in +# colo_node_requisition using the eval-cat trick. While this gets us what we +# want, care must be taken to ensure variable expansion happens at the right +# time. Any unescaped variable references ($X) in this file will be expanded by +# eval in colo_node_requisition. Escaped variable references (\$X) will be +# expanded upon execution on the remote node. + +if [ ! -f "$SOLANA_LOCK_FILE" ]; then + exec 9>>"$SOLANA_LOCK_FILE" + flock -x -n 9 || exit 1 + [ -n "\$SOLANA_USER" ] && { + echo "export SOLANA_LOCK_USER=\$SOLANA_USER" + echo "export SOLANA_LOCK_INSTANCENAME=$INSTANCE_NAME" + echo "[ -v SSH_TTY -a -f \"\${HOME}/.solana-motd\" ] && cat \"\${HOME}/.solana-motd\" 1>&2" + } >&9 || ( rm "$SOLANA_LOCK_FILE" && false ) + 9>&- + cat > /solana-scratch/id_ecdsa < /solana-scratch/id_ecdsa.pub < /solana-scratch/authorized_keys < /dev/null) +$(cat "${SSH_PRIVATE_KEY}.pub") +EOAK + cp /solana-scratch/id_ecdsa "\${HOME}/.ssh/id_ecdsa" + cp /solana-scratch/id_ecdsa.pub "\${HOME}/.ssh/id_ecdsa.pub" + cp /solana-scratch/authorized_keys "\${HOME}/.ssh/authorized_keys" + cat > "\${HOME}/.solana-motd" <"$SOLANA_LOCK_FILE" + flock -x -n 9 || exit 1 + . "$SOLANA_LOCK_FILE" + if [ "\$SOLANA_LOCK_USER" = "\$SOLANA_USER" ]; then + # Begin running process cleanup + CLEANUP_PID=\$$ + CLEANUP_PIDS=() + CLEANUP_PPIDS=() + get_pids() { + CLEANUP_PIDS=() + CLEANUP_PPIDS=() + declare line maybe_ppid maybe_pid + while read line; do + read maybe_ppid maybe_pid _ _ _ _ _ _ _ _ <<<"\$line" + CLEANUP_PIDS+=( \$maybe_pid ) + CLEANUP_PPIDS+=( \$maybe_ppid ) + done < <(ps jxh | sort -rn -k2,2) + } + + CLEANUP_PROC_CHAINS=() + resolve_chains() { + CLEANUP_PROC_CHAINS=() + declare i pid ppid handled n + for i in "\${!CLEANUP_PIDS[@]}"; do + pid=\${CLEANUP_PIDS[\$i]} + ppid=\${CLEANUP_PPIDS[\$i]} + handled=false + + for j in "\${!CLEANUP_PROC_CHAINS[@]}"; do + if grep -q "^\${ppid}\\\\b" <<<"\${CLEANUP_PROC_CHAINS[\$j]}"; then + CLEANUP_PROC_CHAINS[\$j]="\$pid \${CLEANUP_PROC_CHAINS[\$j]}" + handled=true + break + elif grep -q "\\\\b\${pid}\\$" <<<"\${CLEANUP_PROC_CHAINS[\$j]}"; then + CLEANUP_PROC_CHAINS[\$j]+=" \$ppid" + handled=true + # Don't break, we may be the parent of may proc chains + fi + done + if ! \$handled; then + n=\${#CLEANUP_PROC_CHAINS[@]} + CLEANUP_PROC_CHAINS[\$n]="\$pid \$ppid" + fi + done + } + + # Kill screen sessions + while read SID; do + screen -S "\$SID" -X quit + done < <(screen -wipe 2>&1 | sed -e 's/^\s\+\([^[:space:]]\+\)\s.*/\1/;t;d') + + # Kill tmux sessions + tmux kill-server &> /dev/null + + # Kill other processes + for SIG in INT TERM KILL; do + get_pids + if [[ \${#CLEANUP_PIDS[@]} -eq 0 ]]; then + break + else + resolve_chains + for p in "\${CLEANUP_PROC_CHAINS[@]}"; do + if ! grep -q "\b\$CLEANUP_PID\b" <<<"\$p"; then + read -a TO_KILL <<<"\$p" + N=\${#TO_KILL[@]} + ROOT_PPID="\${TO_KILL[\$((N-1))]}" + if [[ 1 -ne \$ROOT_PPID ]]; then + LAST_PID_IDX=\$((N-2)) + for I in \$(seq 0 \$LAST_PID_IDX); do + pid="\${TO_KILL[\$I]}" + kill -\$SIG \$pid &>/dev/null + done + fi + fi + done + get_pids + if [[ \${#CLEANUP_PIDS[@]} -gt 0 ]]; then + sleep 5 + fi + fi + done + # End running process cleanup + + # Begin filesystem cleanup + git clean -qxdff + rm -f /solana-scratch/* /solana-scratch/.[^.]* + cat > "\${HOME}/.ssh/authorized_keys" < /dev/null) +EOAK + # End filesystem cleanup + RC=true + fi + 9>&- +fi +\$RC + diff --git a/net/scripts/colo-provider.sh b/net/scripts/colo-provider.sh index c1558e5946c20c..a72bd19b8a7f07 100755 --- a/net/scripts/colo-provider.sh +++ b/net/scripts/colo-provider.sh @@ -16,6 +16,10 @@ cloud_DefaultZone() { echo "Denver" } +cloud_RestartPreemptedInstances() { + : # Not implemented +} + # # __cloud_FindInstances # @@ -44,7 +48,6 @@ __cloud_FindInstances() { for AVAIL in "${COLO_RES_AVAILABILITY[@]}"; do IFS=$'\v' read -r HOST_NAME IP PRIV_IP STATUS ZONE LOCK_USER INSTNAME <<<"$AVAIL" if [[ $INSTNAME =~ $filter ]]; then - IP=$PRIV_IP # Colo public IPs are firewalled to only allow UDP(8000-10000). Reuse private IP as public and require VPN printf "%-40s | publicIp=%-16s privateIp=%s zone=%s\n" "$INSTNAME" "$IP" "$PRIV_IP" "$ZONE" 1>&2 echo -e "${INSTNAME}:${IP}:${PRIV_IP}:$ZONE" fi @@ -135,6 +138,7 @@ cloud_Initialize() { # has been provisioned in the GCE region that is hosting `$zone` # bootDiskType - Optional specify SSD or HDD boot disk # additionalDiskSize - Optional specify size of additional storage volume +# preemptible - Optionally request a preemptible instance ("true") # # Tip: use cloud_FindInstances to locate the instances once this function # returns @@ -150,7 +154,8 @@ cloud_CreateInstances() { #declare optionalAddress="$9" # unused #declare optionalBootDiskType="${10}" # unused #declare optionalAdditionalDiskSize="${11}" # unused - declare sshPrivateKey="${12}" + #declare optionalPreemptible="${12}" # unused + declare sshPrivateKey="${13}" declare -a nodes if [[ $numNodes = 1 ]]; then @@ -205,7 +210,7 @@ cloud_CreateInstances() { while [[ $NI -lt $numNodes && $RI -lt $COLO_RES_N ]]; do node="${nodes[$NI]}" RES_MACH="${COLO_RES_MACHINE[$RI]}" - IP="${COLO_RES_IP_PRIV[$RI]}" + IP="${COLO_RES_IP[$RI]}" if colo_machine_types_compatible "$RES_MACH" "$machineType"; then if colo_node_requisition "$IP" "$node" "$sshPrivateKey" >/dev/null; then NI=$((NI+1)) @@ -236,10 +241,10 @@ cloud_DeleteInstances() { # cloud_WaitForInstanceReady() { #declare instanceName="$1" # unused - declare instanceIp="$2" - declare timeout="$4" + #declare instanceIp="$2" # unused + #declare timeout="$4" # unused - timeout "${timeout}"s bash -c "set -o pipefail; until ping -c 3 $instanceIp | tr - _; do echo .; done" + true } # diff --git a/net/scripts/colo-utils.sh b/net/scripts/colo-utils.sh index a0d45fd9b6c007..20e677b5e84c73 100644 --- a/net/scripts/colo-utils.sh +++ b/net/scripts/colo-utils.sh @@ -47,9 +47,9 @@ colo_load_availability() { COLO_RES_AVAILABILITY=() COLO_RES_REQUISITIONED=() while read -r LINE; do - IFS=$'\v' read -r PRIV_IP STATUS LOCK_USER INSTNAME <<< "$LINE" - I=$(colo_res_index_from_ip "$PRIV_IP") - IP="${COLO_RES_IP[$I]}" + IFS=$'\v' read -r IP STATUS LOCK_USER INSTNAME <<< "$LINE" + I=$(colo_res_index_from_ip "$IP") + PRIV_IP="${COLO_RES_IP_PRIV[$I]}" HOST_NAME="${COLO_RES_HOSTNAME[$I]}" ZONE="${COLO_RES_ZONE[$I]}" COLO_RES_AVAILABILITY+=( "$(echo -e "$HOST_NAME\v$IP\v$PRIV_IP\v$STATUS\v$ZONE\v$LOCK_USER\v$INSTNAME")" ) @@ -61,7 +61,7 @@ colo_load_availability() { colo_res_index_from_ip() { declare IP="$1" for i in "${!COLO_RES_IP_PRIV[@]}"; do - if [ "$IP" = "${COLO_RES_IP_PRIV[$i]}" ]; then + if [[ "$IP" = "${COLO_RES_IP[$i]}" || "$IP" = "${COLO_RES_IP_PRIV[$i]}" ]]; then echo "$i" return 0 fi @@ -89,7 +89,7 @@ colo_instance_run_foreach() { CMD="$1" declare IPS=() for I in $(seq 0 $((COLO_RES_N-1))); do - IPS+=( "${COLO_RES_IP_PRIV[$I]}" ) + IPS+=( "${COLO_RES_IP[$I]}" ) done set "${IPS[@]}" "$CMD" fi @@ -175,52 +175,19 @@ colo_node_status_all() { export COLO_RES_REQUISITIONED=() colo_node_requisition() { declare IP=$1 + # shellcheck disable=SC2034 declare INSTANCE_NAME=$2 + # shellcheck disable=SC2034 declare SSH_PRIVATE_KEY="$3" declare INDEX INDEX=$(colo_res_index_from_ip "$IP") declare RC=false - colo_instance_run "$IP" "$( -cat <>"$SOLANA_LOCK_FILE" - flock -x -n 9 || exit 1 - [ -n "\$SOLANA_USER" ] && { - echo "export SOLANA_LOCK_USER=\$SOLANA_USER" - echo "export SOLANA_LOCK_INSTANCENAME=$INSTANCE_NAME" - echo "[ -v SSH_TTY -a -f \"\${HOME}/.solana-motd\" ] && cat \"\${HOME}/.solana-motd\" 1>&2" - } >&9 || ( rm "$SOLANA_LOCK_FILE" && false ) - 9>&- - cat > /solana-scratch/id_ecdsa < /solana-scratch/id_ecdsa.pub < /solana-scratch/authorized_keys < /dev/null) -$(cat "${SSH_PRIVATE_KEY}.pub") -EOAK - cp /solana-scratch/id_ecdsa "\${HOME}/.ssh/id_ecdsa" - cp /solana-scratch/id_ecdsa.pub "\${HOME}/.ssh/id_ecdsa.pub" - cp /solana-scratch/authorized_keys "\${HOME}/.ssh/authorized_keys" - cat > "\${HOME}/.solana-motd" <"$SOLANA_LOCK_FILE" - flock -x -n 9 || exit 1 - . "$SOLANA_LOCK_FILE" - if [ "\$SOLANA_LOCK_USER" = "\$SOLANA_USER" ]; then - git clean -qxdff - rm -f /solana-scratch/* /solana-scratch/.[^.]* - cat > "\${HOME}/.ssh/authorized_keys" < /dev/null) -EOAK - RC=true - fi - 9>&- - fi - \$RC + colo_instance_run "$IP" "$(eval "cat < u32 { // Clock - let clock = Clock::from_account_info(&accounts[2]).unwrap(); + let clock = Clock::from_account_info(&accounts[2]).expect("clock"); assert_eq!(clock.slot, DEFAULT_SLOTS_PER_EPOCH + 1); assert_eq!( clock.segment, @@ -25,20 +25,20 @@ fn process_instruction(_program_id: &Pubkey, accounts: &mut [AccountInfo], _data ); // Fees - let fees = Fees::from_account_info(&accounts[3]).unwrap(); + let fees = Fees::from_account_info(&accounts[3]).expect("fees"); let burn = fees.fee_calculator.burn(42); assert_eq!(burn, (21, 21)); // Rewards - let _ = Rewards::from_account_info(&accounts[4]).unwrap(); + let _rewards = Rewards::from_account_info(&accounts[4]).expect("rewards"); // Slot Hashes - let slot_hashes = SlotHashes::from_account_info(&accounts[5]).unwrap(); - assert_eq!(slot_hashes.len(), 1); + let slot_hashes = SlotHashes::from_account_info(&accounts[5]).expect("slot_hashes"); + assert!(slot_hashes.len() >= 1); // Stake History - let stake_history = StakeHistory::from_account_info(&accounts[6]).unwrap(); - assert_eq!(stake_history.len(), 1); + let stake_history = StakeHistory::from_account_info(&accounts[6]).expect("stake_history"); + assert!(stake_history.len() >= 1); let rent = Rent::from_account_info(&accounts[7]).unwrap(); assert_eq!( diff --git a/programs/bpf/tests/programs.rs b/programs/bpf/tests/programs.rs index 7ac88f01352e44..6f3f927081f14c 100644 --- a/programs/bpf/tests/programs.rs +++ b/programs/bpf/tests/programs.rs @@ -111,15 +111,16 @@ mod bpf { file.read_to_end(&mut elf).unwrap(); let GenesisBlockInfo { - mut genesis_block, + genesis_block, mint_keypair, .. } = create_genesis_block(50); - genesis_block.epoch_warmup = false; let bank = Arc::new(Bank::new(&genesis_block)); // Create bank with specific slot, used by solana_bpf_rust_sysvar test + dbg!(bank.epoch()); let bank = Bank::new_from_parent(&bank, &Pubkey::default(), DEFAULT_SLOTS_PER_EPOCH + 1); + dbg!(bank.epoch()); let bank_client = BankClient::new(bank); // Call user program diff --git a/programs/bpf_loader_api/Cargo.toml b/programs/bpf_loader_api/Cargo.toml index 88baf0a3a0e0cc..67970ccff6850a 100644 --- a/programs/bpf_loader_api/Cargo.toml +++ b/programs/bpf_loader_api/Cargo.toml @@ -11,7 +11,7 @@ edition = "2018" [dependencies] bincode = "1.2.0" byteorder = "1.3.2" -libc = "0.2.62" +libc = "0.2.64" log = "0.4.8" serde = "1.0.101" solana-logger = { path = "../../logger", version = "0.20.0" } diff --git a/programs/btc_spv_bin/Cargo.toml b/programs/btc_spv_bin/Cargo.toml index fd9d3d5363eb13..66c7492348279e 100644 --- a/programs/btc_spv_bin/Cargo.toml +++ b/programs/btc_spv_bin/Cargo.toml @@ -10,7 +10,7 @@ edition = "2018" [dependencies] clap="2.33.0" -reqwest = { version = "0.9.21", default-features = false, features = ["rustls-tls"] } +reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tls"] } serde="1.0.101" serde_derive="1.0.101" serde_json = "1.0.41" diff --git a/programs/config_api/src/config_processor.rs b/programs/config_api/src/config_processor.rs index 553bb8c544cf1a..f016d2dd0bce76 100644 --- a/programs/config_api/src/config_processor.rs +++ b/programs/config_api/src/config_processor.rs @@ -5,6 +5,7 @@ use bincode::deserialize; use log::*; use solana_sdk::account::KeyedAccount; use solana_sdk::instruction::InstructionError; +use solana_sdk::instruction_processor_utils::next_keyed_account; use solana_sdk::pubkey::Pubkey; pub fn process_instruction( @@ -17,10 +18,13 @@ pub fn process_instruction( InstructionError::InvalidInstructionData })?; - let current_data: ConfigKeys = deserialize(&keyed_accounts[0].account.data).map_err(|err| { - error!("Invalid data in account[0]: {:?} {:?}", data, err); - InstructionError::InvalidAccountData - })?; + let keyed_accounts_iter = &mut keyed_accounts.iter_mut(); + let config_keyed_account = &mut next_keyed_account(keyed_accounts_iter)?; + let current_data: ConfigKeys = + deserialize(&config_keyed_account.account.data).map_err(|err| { + error!("Invalid data in account[0]: {:?} {:?}", data, err); + InstructionError::InvalidAccountData + })?; let current_signer_keys: Vec = current_data .keys .iter() @@ -31,23 +35,17 @@ pub fn process_instruction( if current_signer_keys.is_empty() { // Config account keypair must be a signer on account initilization, // or when no signers specified in Config data - if keyed_accounts[0].signer_key().is_none() { + if config_keyed_account.signer_key().is_none() { error!("account[0].signer_key().is_none()"); return Err(InstructionError::MissingRequiredSignature); } } let mut counter = 0; - for (i, (signer, _)) in key_list - .keys - .iter() - .filter(|(_, is_signer)| *is_signer) - .enumerate() - { + for (signer, _) in key_list.keys.iter().filter(|(_, is_signer)| *is_signer) { counter += 1; - if signer != keyed_accounts[0].unsigned_key() { - let account_index = i + 1; - let signer_account = keyed_accounts.get(account_index); + if signer != config_keyed_account.unsigned_key() { + let signer_account = keyed_accounts_iter.next(); if signer_account.is_none() { error!("account {:?} is not in account list", signer); return Err(InstructionError::MissingRequiredSignature); @@ -60,7 +58,7 @@ pub fn process_instruction( if signer_key.unwrap() != signer { error!( "account[{:?}].signer_key() does not match Config data)", - account_index + counter + 1 ); return Err(InstructionError::MissingRequiredSignature); } @@ -74,7 +72,7 @@ pub fn process_instruction( error!("account {:?} is not in stored signer list", signer); return Err(InstructionError::MissingRequiredSignature); } - } else if keyed_accounts[0].signer_key().is_none() { + } else if config_keyed_account.signer_key().is_none() { error!("account[0].signer_key().is_none()"); return Err(InstructionError::MissingRequiredSignature); } @@ -90,12 +88,12 @@ pub fn process_instruction( return Err(InstructionError::MissingRequiredSignature); } - if keyed_accounts[0].account.data.len() < data.len() { + if config_keyed_account.account.data.len() < data.len() { error!("instruction data too large"); return Err(InstructionError::InvalidInstructionData); } - keyed_accounts[0].account.data[0..data.len()].copy_from_slice(&data); + config_keyed_account.account.data[0..data.len()].copy_from_slice(&data); Ok(()) } diff --git a/programs/config_tests/tests/config_processor.rs b/programs/config_tests/tests/config_processor.rs index 0a76b1062b4540..78fbfb02a3ee4e 100644 --- a/programs/config_tests/tests/config_processor.rs +++ b/programs/config_tests/tests/config_processor.rs @@ -10,10 +10,12 @@ use solana_runtime::{bank::Bank, bank_client::BankClient}; use solana_sdk::{ client::SyncClient, genesis_block::create_genesis_block, + instruction::InstructionError, message::Message, pubkey::Pubkey, signature::{Keypair, KeypairUtil}, system_instruction, + transaction::TransactionError, }; #[derive(Serialize, Deserialize, Debug, PartialEq)] @@ -364,3 +366,26 @@ fn test_config_updates_requiring_config() { .send_message(&[&mint_keypair, &config_keypair], message) .unwrap_err(); } + +#[test] +fn test_config_initialize_no_panic() { + let (bank, alice_keypair) = create_bank(3); + let bank_client = BankClient::new(bank); + + let mut instructions = config_instruction::create_account::( + &alice_keypair.pubkey(), + &Pubkey::new_rand(), + 1, + vec![], + ); + instructions[1].accounts = vec![]; //