From 870ec89e9a89dabedde055407df5208691eb2984 Mon Sep 17 00:00:00 2001 From: gabriel klawitter Date: Fri, 14 Sep 2018 15:55:14 +0200 Subject: [PATCH 1/4] Simultaneous platform tests WIP (#9557) * look into commit changes * look into commit changes ii * all on test * build only_releaseable_branches for platforms * allow failure for check during development * windows test typo * fix sh for windows * remove check stage again * debug macos platform --- .gitlab-ci.yml | 43 ++++++++++++++++++++++++++++++++++++++++++ scripts/gitlab/test.sh | 38 ++++++++++++++++++++++++++----------- 2 files changed, 70 insertions(+), 11 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a36c39db7fd..6e1de5f20a4 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -60,6 +60,46 @@ test-rust-stable: &test tags: - rust-stable +test-darwin-macos-x86_64: + stage: test + variables: + CARGO_TARGET: x86_64-apple-darwin + CC: gcc + CXX: g++ + script: + - scripts/gitlab/test.sh stable + tags: + - osx + +test-linux-android-armhf: + stage: test + image: parity/rust-android:gitlab-ci + variables: + CARGO_TARGET: armv7-linux-androideabi + script: + - scripts/gitlab/test.sh stable + tags: + - rust-arm + +test-windows-msvc-x86_64: + stage: test + cache: + key: "%CI_JOB_NAME%" + paths: + - "%CI_PROJECT_DIR%/target/" + - "%CI_PROJECT_DIR%/cargo/" + # No cargo caching, since fetch-locking on Windows gets stuck + variables: + CARGO_TARGET: x86_64-pc-windows-msvc + script: + - sh scripts/gitlab/test.sh stable + tags: + - rust-windows + + + + + .optional_test: &optional_test <<: *test allow_failure: true @@ -139,6 +179,7 @@ build-linux-ubuntu-armhf: build-linux-android-armhf: stage: build + only: *releaseable_branches image: parity/rust-android:gitlab-ci variables: CARGO_TARGET: armv7-linux-androideabi @@ -149,6 +190,7 @@ build-linux-android-armhf: build-darwin-macos-x86_64: stage: build + only: *releaseable_branches variables: CARGO_TARGET: x86_64-apple-darwin CC: gcc @@ -161,6 +203,7 @@ build-darwin-macos-x86_64: build-windows-msvc-x86_64: stage: build + only: *releaseable_branches cache: key: "%CI_JOB_NAME%" paths: diff --git a/scripts/gitlab/test.sh b/scripts/gitlab/test.sh index 98d71e9e590..ad9e0b43c1c 100755 --- a/scripts/gitlab/test.sh +++ b/scripts/gitlab/test.sh @@ -4,25 +4,41 @@ set -e # fail on any error set -u # treat unset variables as error -rustup default $1 -if [[ "$CI_COMMIT_REF_NAME" = "master" || "$CI_COMMIT_REF_NAME" = "beta" || "$CI_COMMIT_REF_NAME" = "stable" ]]; then - export GIT_COMPARE=$CI_COMMIT_REF_NAME~; -else - export GIT_COMPARE=master; -fi +set -x # full command output for development +git log --graph --oneline --all --decorate=short -n 10 + + +case $CI_COMMIT_REF_NAME in + (master|beta|stable) + export GIT_COMPARE=$CI_COMMIT_REF_NAME~ + ;; + (*) + export GIT_COMPARE=master + ;; +esac -export RUST_FILES_MODIFIED="$(git --no-pager diff --name-only $GIT_COMPARE...$CI_COMMIT_SHA | grep -v -e ^\\. -e ^LICENSE -e ^README.md -e ^test.sh -e ^windows/ -e ^scripts/ -e ^mac/ -e ^nsis/ | wc -l)" + +export RUST_FILES_MODIFIED="$(git --no-pager diff --name-only $GIT_COMPARE...$CI_COMMIT_SHA | grep -v -e ^\\. -e ^LICENSE -e ^README.md -e ^test.sh -e ^scripts/ | wc -l | tr -d ' ')" echo "RUST_FILES_MODIFIED: $RUST_FILES_MODIFIED" + +if [ "${RUST_FILES_MODIFIED}" = "0" ] +then + echo "__________Skipping Rust tests since no Rust files modified__________"; + exit 0 +fi + + +rustup default $1 + git submodule update --init --recursive rustup show -if [[ "${RUST_FILES_MODIFIED}" == "0" ]]; -then echo "__________Skipping Rust tests since no Rust files modified__________"; -else ./test.sh || exit $?; -fi + +exec ./test.sh # if [[ "$CI_COMMIT_REF_NAME" == "nightly" ]]; # ### @TODO re-enable fail after https://github.com/paritytech/parity-import-tests/issues/3 # then sh scripts/aura-test.sh; # || exit $?; # fi + From 57d2c8c94a432a375ffc70dd0a03d837f84d5f21 Mon Sep 17 00:00:00 2001 From: gabriel klawitter Date: Fri, 14 Sep 2018 16:16:36 +0200 Subject: [PATCH 2/4] move dockerfile for android build container to scripts repo (#9560) --- docker/android/Dockerfile | 61 --------- docker/android/cargo-config.toml | 9 -- docker/android/libudev.patch | 216 ------------------------------- 3 files changed, 286 deletions(-) delete mode 100644 docker/android/Dockerfile delete mode 100644 docker/android/cargo-config.toml delete mode 100644 docker/android/libudev.patch diff --git a/docker/android/Dockerfile b/docker/android/Dockerfile deleted file mode 100644 index 1dbf15fac7d..00000000000 --- a/docker/android/Dockerfile +++ /dev/null @@ -1,61 +0,0 @@ -FROM ubuntu:xenial -LABEL maintainer="Parity Technologies " - -RUN apt-get update && \ - apt-get install -yq sudo curl file build-essential wget git g++ cmake pkg-config bison flex \ - unzip lib32stdc++6 lib32z1 python autotools-dev automake autoconf libtool \ - gperf xsltproc docbook-xsl - -# Rust & Cargo -RUN curl https://sh.rustup.rs -sSf | sh -s -- -y -ENV PATH /root/.cargo/bin:$PATH -RUN rustup toolchain install stable -RUN rustup target add --toolchain stable arm-linux-androideabi -RUN rustup target add --toolchain stable armv7-linux-androideabi - -# Android NDK and toolchain -RUN cd /usr/local && \ - wget -q https://dl.google.com/android/repository/android-ndk-r16b-linux-x86_64.zip && \ - unzip -q android-ndk-r16b-linux-x86_64.zip && \ - rm android-ndk-r16b-linux-x86_64.zip -ENV NDK_HOME /usr/local/android-ndk-r16b -RUN /usr/local/android-ndk-r16b/build/tools/make-standalone-toolchain.sh \ - --arch=arm --install-dir=/opt/ndk-standalone --stl=libc++ --platform=android-26 -ENV PATH $PATH:/opt/ndk-standalone/bin - -# Compiling libudev for Android -# This is the most hacky part of the process, as we need to apply a patch and pass specific -# options that the compiler environment doesn't define. -RUN cd /root && \ - git clone https://github.com/gentoo/eudev.git -ADD libudev.patch /root -RUN cd /root/eudev && \ - git checkout 83d918449f22720d84a341a05e24b6d109e6d3ae && \ - ./autogen.sh && \ - ./configure --disable-introspection --disable-programs --disable-hwdb \ - --host=arm-linux-androideabi --prefix=/opt/ndk-standalone/sysroot/usr/ \ - --enable-shared=false CC=arm-linux-androideabi-clang \ - CFLAGS="-D LINE_MAX=2048 -D RLIMIT_NLIMITS=15 -D IPTOS_LOWCOST=2 -std=gnu99" \ - CXX=arm-linux-androideabi-clang++ && \ - git apply - < /root/libudev.patch && \ - make && \ - make install -RUN rm -rf /root/eudev -RUN rm /root/libudev.patch - -# Rust-related configuration -ADD cargo-config.toml /root/.cargo/config -ENV ARM_LINUX_ANDROIDEABI_OPENSSL_DIR /opt/ndk-standalone/sysroot/usr -ENV ARMV7_LINUX_ANDROIDEABI_OPENSSL_DIR /opt/ndk-standalone/sysroot/usr -ENV CC_arm_linux_androideabi arm-linux-androideabi-clang -ENV CC_armv7_linux_androideabi arm-linux-androideabi-clang -ENV CXX_arm_linux_androideabi arm-linux-androideabi-clang++ -ENV CXX_armv7_linux_androideabi arm-linux-androideabi-clang++ -ENV AR_arm_linux_androideabi arm-linux-androideabi-ar -ENV AR_armv7_linux_androideabi arm-linux-androideabi-ar -ENV CFLAGS_arm_linux_androideabi -std=gnu11 -fPIC -D OS_ANDROID -ENV CFLAGS_armv7_linux_androideabi -std=gnu11 -fPIC -D OS_ANDROID -ENV CXXFLAGS_arm_linux_androideabi -std=gnu++11 -fPIC -fexceptions -frtti -static-libstdc++ -D OS_ANDROID -ENV CXXFLAGS_armv7_linux_androideabi -std=gnu++11 -fPIC -fexceptions -frtti -static-libstdc++ -D OS_ANDROID -ENV CXXSTDLIB_arm_linux_androideabi "" -ENV CXXSTDLIB_armv7_linux_androideabi "" diff --git a/docker/android/cargo-config.toml b/docker/android/cargo-config.toml deleted file mode 100644 index 7373a7e143d..00000000000 --- a/docker/android/cargo-config.toml +++ /dev/null @@ -1,9 +0,0 @@ -[target.armv7-linux-androideabi] -linker = "arm-linux-androideabi-clang" -ar = "arm-linux-androideabi-ar" -rustflags = ["-C", "link-arg=-lc++_static", "-C", "link-arg=-lc++abi", "-C", "link-arg=-landroid_support"] - -[target.arm-linux-androideabi] -linker = "arm-linux-androideabi-clang" -ar = "arm-linux-androideabi-ar" -rustflags = ["-C", "link-arg=-lc++_static", "-C", "link-arg=-lc++abi", "-C", "link-arg=-landroid_support"] diff --git a/docker/android/libudev.patch b/docker/android/libudev.patch deleted file mode 100644 index ba7e849b2e6..00000000000 --- a/docker/android/libudev.patch +++ /dev/null @@ -1,216 +0,0 @@ -diff --git a/src/collect/collect.c b/src/collect/collect.c -index 2cf1f00..b24f26b 100644 ---- a/src/collect/collect.c -+++ b/src/collect/collect.c -@@ -84,7 +84,7 @@ static void usage(void) - " invoked for each ID in ) collect returns 0, the\n" - " number of missing IDs otherwise.\n" - " On error a negative number is returned.\n\n" -- , program_invocation_short_name); -+ , "parity"); - } - - /* -diff --git a/src/scsi_id/scsi_id.c b/src/scsi_id/scsi_id.c -index 8b76d87..7bf3948 100644 ---- a/src/scsi_id/scsi_id.c -+++ b/src/scsi_id/scsi_id.c -@@ -321,7 +321,7 @@ static void help(void) { - " -u --replace-whitespace Replace all whitespace by underscores\n" - " -v --verbose Verbose logging\n" - " -x --export Print values as environment keys\n" -- , program_invocation_short_name); -+ , "parity"); - - } - -diff --git a/src/shared/hashmap.h b/src/shared/hashmap.h -index a03ee58..a7c2005 100644 ---- a/src/shared/hashmap.h -+++ b/src/shared/hashmap.h -@@ -98,10 +98,7 @@ extern const struct hash_ops uint64_hash_ops; - #if SIZEOF_DEV_T != 8 - unsigned long devt_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) _pure_; - int devt_compare_func(const void *a, const void *b) _pure_; --extern const struct hash_ops devt_hash_ops = { -- .hash = devt_hash_func, -- .compare = devt_compare_func --}; -+extern const struct hash_ops devt_hash_ops; - #else - #define devt_hash_func uint64_hash_func - #define devt_compare_func uint64_compare_func -diff --git a/src/shared/log.c b/src/shared/log.c -index 4a40996..1496984 100644 ---- a/src/shared/log.c -+++ b/src/shared/log.c -@@ -335,7 +335,7 @@ static int write_to_syslog( - - IOVEC_SET_STRING(iovec[0], header_priority); - IOVEC_SET_STRING(iovec[1], header_time); -- IOVEC_SET_STRING(iovec[2], program_invocation_short_name); -+ IOVEC_SET_STRING(iovec[2], "parity"); - IOVEC_SET_STRING(iovec[3], header_pid); - IOVEC_SET_STRING(iovec[4], buffer); - -@@ -383,7 +383,7 @@ static int write_to_kmsg( - char_array_0(header_pid); - - IOVEC_SET_STRING(iovec[0], header_priority); -- IOVEC_SET_STRING(iovec[1], program_invocation_short_name); -+ IOVEC_SET_STRING(iovec[1], "parity"); - IOVEC_SET_STRING(iovec[2], header_pid); - IOVEC_SET_STRING(iovec[3], buffer); - IOVEC_SET_STRING(iovec[4], "\n"); -diff --git a/src/udev/udevadm-control.c b/src/udev/udevadm-control.c -index 6af7163..3271e56 100644 ---- a/src/udev/udevadm-control.c -+++ b/src/udev/udevadm-control.c -@@ -41,7 +41,7 @@ static void print_help(void) { - " -p --property=KEY=VALUE Set a global property for all events\n" - " -m --children-max=N Maximum number of children\n" - " --timeout=SECONDS Maximum time to block for a reply\n" -- , program_invocation_short_name); -+ , "parity"); - } - - static int adm_control(struct udev *udev, int argc, char *argv[]) { -diff --git a/src/udev/udevadm-info.c b/src/udev/udevadm-info.c -index 0aec976..a31ac02 100644 ---- a/src/udev/udevadm-info.c -+++ b/src/udev/udevadm-info.c -@@ -279,7 +279,7 @@ static void help(void) { - " -P --export-prefix Export the key name with a prefix\n" - " -e --export-db Export the content of the udev database\n" - " -c --cleanup-db Clean up the udev database\n" -- , program_invocation_short_name); -+ , "parity"); - } - - static int uinfo(struct udev *udev, int argc, char *argv[]) { -diff --git a/src/udev/udevadm-monitor.c b/src/udev/udevadm-monitor.c -index 15ded09..b58dd08 100644 ---- a/src/udev/udevadm-monitor.c -+++ b/src/udev/udevadm-monitor.c -@@ -73,7 +73,7 @@ static void help(void) { - " -u --udev Print udev events\n" - " -s --subsystem-match=SUBSYSTEM[/DEVTYPE] Filter events by subsystem\n" - " -t --tag-match=TAG Filter events by tag\n" -- , program_invocation_short_name); -+ , "parity"); - } - - static int adm_monitor(struct udev *udev, int argc, char *argv[]) { -diff --git a/src/udev/udevadm-settle.c b/src/udev/udevadm-settle.c -index 33597bc..b36a504 100644 ---- a/src/udev/udevadm-settle.c -+++ b/src/udev/udevadm-settle.c -@@ -43,7 +43,7 @@ static void help(void) { - " --version Show package version\n" - " -t --timeout=SECONDS Maximum time to wait for events\n" - " -E --exit-if-exists=FILE Stop waiting if file exists\n" -- , program_invocation_short_name); -+ , "parity"); - } - - static int adm_settle(struct udev *udev, int argc, char *argv[]) { -diff --git a/src/udev/udevadm-test-builtin.c b/src/udev/udevadm-test-builtin.c -index baaeca9..50ed812 100644 ---- a/src/udev/udevadm-test-builtin.c -+++ b/src/udev/udevadm-test-builtin.c -@@ -39,7 +39,7 @@ static void help(struct udev *udev) { - " -h --help Print this message\n" - " --version Print version of the program\n\n" - "Commands:\n" -- , program_invocation_short_name); -+ , "parity"); - - udev_builtin_list(udev); - } -diff --git a/src/udev/udevadm-test.c b/src/udev/udevadm-test.c -index 47fd924..a855412 100644 ---- a/src/udev/udevadm-test.c -+++ b/src/udev/udevadm-test.c -@@ -39,7 +39,7 @@ static void help(void) { - " --version Show package version\n" - " -a --action=ACTION Set action string\n" - " -N --resolve-names=early|late|never When to resolve names\n" -- , program_invocation_short_name); -+ , "parity"); - } - - static int adm_test(struct udev *udev, int argc, char *argv[]) { -diff --git a/src/udev/udevadm-trigger.c b/src/udev/udevadm-trigger.c -index 4dc756a..67787d3 100644 ---- a/src/udev/udevadm-trigger.c -+++ b/src/udev/udevadm-trigger.c -@@ -92,7 +92,7 @@ static void help(void) { - " -y --sysname-match=NAME Trigger devices with this /sys path\n" - " --name-match=NAME Trigger devices with this /dev name\n" - " -b --parent-match=NAME Trigger devices with that parent device\n" -- , program_invocation_short_name); -+ , "parity"); - } - - static int adm_trigger(struct udev *udev, int argc, char *argv[]) { -diff --git a/src/udev/udevadm.c b/src/udev/udevadm.c -index 3e57cf6..b03dfaa 100644 ---- a/src/udev/udevadm.c -+++ b/src/udev/udevadm.c -@@ -62,7 +62,7 @@ static int adm_help(struct udev *udev, int argc, char *argv[]) { - printf("%s [--help] [--version] [--debug] COMMAND [COMMAND OPTIONS]\n\n" - "Send control commands or test the device manager.\n\n" - "Commands:\n" -- , program_invocation_short_name); -+ , "parity"); - - for (i = 0; i < ELEMENTSOF(udevadm_cmds); i++) - if (udevadm_cmds[i]->help != NULL) -@@ -128,7 +128,7 @@ int main(int argc, char *argv[]) { - goto out; - } - -- fprintf(stderr, "%s: missing or unknown command\n", program_invocation_short_name); -+ fprintf(stderr, "%s: missing or unknown command\n", "parity"); - rc = 2; - out: - mac_selinux_finish(); -diff --git a/src/udev/udevd.c b/src/udev/udevd.c -index cf826c6..4eec0af 100644 ---- a/src/udev/udevd.c -+++ b/src/udev/udevd.c -@@ -1041,7 +1041,7 @@ static void help(void) { - " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n" - " -N --resolve-names=early|late|never\n" - " When to resolve users and groups\n" -- , program_invocation_short_name); -+ , "parity"); - } - - static int parse_argv(int argc, char *argv[]) { -diff --git a/src/v4l_id/v4l_id.c b/src/v4l_id/v4l_id.c -index 1dce0d5..f65badf 100644 ---- a/src/v4l_id/v4l_id.c -+++ b/src/v4l_id/v4l_id.c -@@ -49,7 +49,7 @@ int main(int argc, char *argv[]) { - printf("%s [-h,--help] \n\n" - "Video4Linux device identification.\n\n" - " -h Print this message\n" -- , program_invocation_short_name); -+ , "parity"); - return 0; - case '?': - return -EINVAL; -diff --git a/src/shared/path-util.c b/src/shared/path-util.c -index 0744563..7151356 100644 ---- a/src/shared/path-util.c -+++ b/src/shared/path-util.c -@@ -109,7 +109,7 @@ char *path_make_absolute_cwd(const char *p) { - if (path_is_absolute(p)) - return strdup(p); - -- cwd = get_current_dir_name(); -+ cwd = getcwd(malloc(128), 128); - if (!cwd) - return NULL; - From 753fd4bda36f8bdd1ce19b0ca7e8b87bde47926d Mon Sep 17 00:00:00 2001 From: Nicolas Gotchac Date: Fri, 14 Sep 2018 22:18:03 +0200 Subject: [PATCH 3/4] Improve P2P discovery (#9526) * Add `target` to Rust traces * network-devp2p: Don't remove discovery peer in main sync * network-p2p: Refresh discovery more often * Update Peer discovery protocol * Run discovery more often when not enough nodes connected * Start the first discovery early * Update fast discovery rate * Fix tests * Fix `ping` tests * Fixing remote Node address ; adding PingPong round * Fix tests: update new +1 PingPong round * Increase slow Discovery rate Check in flight FindNode before pings * Add `deprecated` to deprecated_echo_hash * Refactor `discovery_round` branching --- ethcore/sync/src/chain/propagator.rs | 2 +- util/network-devp2p/src/discovery.rs | 350 +++++++++++++++----------- util/network-devp2p/src/host.rs | 42 +++- util/network-devp2p/src/node_table.rs | 10 +- 4 files changed, 248 insertions(+), 156 deletions(-) diff --git a/ethcore/sync/src/chain/propagator.rs b/ethcore/sync/src/chain/propagator.rs index 102a3171246..82b592c4ba0 100644 --- a/ethcore/sync/src/chain/propagator.rs +++ b/ethcore/sync/src/chain/propagator.rs @@ -200,7 +200,7 @@ impl SyncPropagator { let appended = packet.append_raw_checked(&transaction.drain(), 1, MAX_TRANSACTION_PACKET_SIZE); if !appended { // Maximal packet size reached just proceed with sending - debug!("Transaction packet size limit reached. Sending incomplete set of {}/{} transactions.", pushed, to_send.len()); + debug!(target: "sync", "Transaction packet size limit reached. Sending incomplete set of {}/{} transactions.", pushed, to_send.len()); to_send = to_send.into_iter().take(pushed).collect(); break; } diff --git a/util/network-devp2p/src/discovery.rs b/util/network-devp2p/src/discovery.rs index c7782b247ca..8ef244cbd11 100644 --- a/util/network-devp2p/src/discovery.rs +++ b/util/network-devp2p/src/discovery.rs @@ -42,9 +42,9 @@ const PACKET_PONG: u8 = 2; const PACKET_FIND_NODE: u8 = 3; const PACKET_NEIGHBOURS: u8 = 4; -const PING_TIMEOUT: Duration = Duration::from_millis(300); +const PING_TIMEOUT: Duration = Duration::from_millis(500); const FIND_NODE_TIMEOUT: Duration = Duration::from_secs(2); -const EXPIRY_TIME: Duration = Duration::from_secs(60); +const EXPIRY_TIME: Duration = Duration::from_secs(20); const MAX_NODES_PING: usize = 32; // Max nodes to add/ping at once const REQUEST_BACKOFF: [Duration; 4] = [ Duration::from_secs(1), @@ -80,15 +80,29 @@ impl BucketEntry { } } -pub struct NodeBucket { - nodes: VecDeque, //sorted by last active +struct FindNodeRequest { + // Time when the request was sent + sent_at: Instant, + // Number of items sent by the node + response_count: usize, + // Whether the request have been answered yet + answered: bool, } -struct PendingRequest { - packet_id: u8, +struct PingRequest { + // Time when the request was sent sent_at: Instant, - packet_hash: H256, - response_count: usize, // Some requests (eg. FIND_NODE) have multi-packet responses + // The node to which the request was sent + node: NodeEntry, + // The hash sent in the Ping request + echo_hash: H256, + // The hash Parity used to respond with (until rev 01f825b0e1f1c4c420197b51fc801cbe89284b29) + #[deprecated()] + deprecated_echo_hash: H256, +} + +pub struct NodeBucket { + nodes: VecDeque, //sorted by last active } impl Default for NodeBucket { @@ -115,13 +129,13 @@ pub struct Discovery<'a> { id_hash: H256, secret: Secret, public_endpoint: NodeEndpoint, - discovery_round: u16, + discovery_initiated: bool, + discovery_round: Option, discovery_id: NodeId, discovery_nodes: HashSet, node_buckets: Vec, - in_flight_requests: HashMap, - expiring_pings: VecDeque<(NodeId, Instant)>, - expiring_finds: VecDeque<(NodeId, Instant)>, + in_flight_pings: HashMap, + in_flight_find_nodes: HashMap, send_queue: VecDeque, check_timestamps: bool, adding_nodes: Vec, @@ -141,13 +155,13 @@ impl<'a> Discovery<'a> { id_hash: keccak(key.public()), secret: key.secret().clone(), public_endpoint: public, - discovery_round: 0, + discovery_initiated: false, + discovery_round: None, discovery_id: NodeId::new(), discovery_nodes: HashSet::new(), node_buckets: (0..ADDRESS_BITS).map(|_| NodeBucket::new()).collect(), - in_flight_requests: HashMap::new(), - expiring_pings: VecDeque::new(), - expiring_finds: VecDeque::new(), + in_flight_pings: HashMap::new(), + in_flight_find_nodes: HashMap::new(), send_queue: VecDeque::new(), check_timestamps: true, adding_nodes: Vec::new(), @@ -175,15 +189,6 @@ impl<'a> Discovery<'a> { } } - /// Add a list of known nodes to the table. - pub fn init_node_list(&mut self, nodes: Vec) { - for n in nodes { - if self.is_allowed(&n) { - self.update_node(n); - } - } - } - fn update_node(&mut self, e: NodeEntry) -> Option { trace!(target: "discovery", "Inserting {:?}", &e); let id_hash = keccak(e.id); @@ -224,13 +229,20 @@ impl<'a> Discovery<'a> { /// Starts the discovery process at round 0 fn start(&mut self) { trace!(target: "discovery", "Starting discovery"); - self.discovery_round = 0; + self.discovery_round = Some(0); self.discovery_id.randomize(); //TODO: use cryptographic nonce self.discovery_nodes.clear(); } + /// Complete the discovery process + fn stop(&mut self) { + trace!(target: "discovery", "Completing discovery"); + self.discovery_round = None; + self.discovery_nodes.clear(); + } + fn update_new_nodes(&mut self) { - while self.in_flight_requests.len() < MAX_NODES_PING { + while self.in_flight_pings.len() < MAX_NODES_PING { match self.adding_nodes.pop() { Some(next) => self.try_ping(next), None => break, @@ -239,8 +251,12 @@ impl<'a> Discovery<'a> { } fn discover(&mut self) { - self.update_new_nodes(); - if self.discovery_round == DISCOVERY_MAX_STEPS { + let discovery_round = match self.discovery_round { + Some(r) => r, + None => return, + }; + if discovery_round == DISCOVERY_MAX_STEPS { + self.stop(); return; } trace!(target: "discovery", "Starting round {:?}", self.discovery_round); @@ -263,12 +279,10 @@ impl<'a> Discovery<'a> { } if tried_count == 0 { - trace!(target: "discovery", "Completing discovery"); - self.discovery_round = DISCOVERY_MAX_STEPS; - self.discovery_nodes.clear(); + self.stop(); return; } - self.discovery_round += 1; + self.discovery_round = Some(discovery_round + 1); } /// The base 2 log of the distance between a and b using the XOR metric. @@ -285,14 +299,20 @@ impl<'a> Discovery<'a> { } fn try_ping(&mut self, node: NodeEntry) { - if !self.is_allowed(&node) || - self.in_flight_requests.contains_key(&node.id) || - self.adding_nodes.iter().any(|n| n.id == node.id) - { + if !self.is_allowed(&node) { + trace!(target: "discovery", "Node {:?} not allowed", node); + return; + } + if self.in_flight_pings.contains_key(&node.id) || self.in_flight_find_nodes.contains_key(&node.id) { + trace!(target: "discovery", "Node {:?} in flight requests", node); + return; + } + if self.adding_nodes.iter().any(|n| n.id == node.id) { + trace!(target: "discovery", "Node {:?} in adding nodes", node); return; } - if self.in_flight_requests.len() < MAX_NODES_PING { + if self.in_flight_pings.len() < MAX_NODES_PING { self.ping(&node) .unwrap_or_else(|e| { warn!(target: "discovery", "Error sending Ping packet: {:?}", e); @@ -308,18 +328,17 @@ impl<'a> Discovery<'a> { self.public_endpoint.to_rlp_list(&mut rlp); node.endpoint.to_rlp_list(&mut rlp); append_expiration(&mut rlp); + let old_parity_hash = keccak(rlp.as_raw()); let hash = self.send_packet(PACKET_PING, &node.endpoint.udp_address(), &rlp.drain())?; - let request_info = PendingRequest { - packet_id: PACKET_PING, + self.in_flight_pings.insert(node.id, PingRequest { sent_at: Instant::now(), - packet_hash: hash, - response_count: 0, - }; - self.expiring_pings.push_back((node.id, request_info.sent_at)); - self.in_flight_requests.insert(node.id, request_info); + node: node.clone(), + echo_hash: hash, + deprecated_echo_hash: old_parity_hash, + }); - trace!(target: "discovery", "Sent Ping to {:?}", &node.endpoint); + trace!(target: "discovery", "Sent Ping to {:?} ; node_id={:#x}", &node.endpoint, node.id); Ok(()) } @@ -327,16 +346,13 @@ impl<'a> Discovery<'a> { let mut rlp = RlpStream::new_list(2); rlp.append(target); append_expiration(&mut rlp); - let hash = self.send_packet(PACKET_FIND_NODE, &node.endpoint.udp_address(), &rlp.drain())?; + self.send_packet(PACKET_FIND_NODE, &node.endpoint.udp_address(), &rlp.drain())?; - let request_info = PendingRequest { - packet_id: PACKET_FIND_NODE, + self.in_flight_find_nodes.insert(node.id, FindNodeRequest { sent_at: Instant::now(), - packet_hash: hash, response_count: 0, - }; - self.expiring_finds.push_back((node.id, request_info.sent_at)); - self.in_flight_requests.insert(node.id, request_info); + answered: false, + }); trace!(target: "discovery", "Sent FindNode to {:?}", &node.endpoint); Ok(()) @@ -448,20 +464,31 @@ impl<'a> Discovery<'a> { entry.endpoint.is_allowed(&self.ip_filter) && entry.id != self.id } - fn on_ping(&mut self, rlp: &Rlp, node: &NodeId, from: &SocketAddr, echo_hash: &[u8]) -> Result, Error> { + fn on_ping(&mut self, rlp: &Rlp, node_id: &NodeId, from: &SocketAddr, echo_hash: &[u8]) -> Result, Error> { trace!(target: "discovery", "Got Ping from {:?}", &from); - let source = NodeEndpoint::from_rlp(&rlp.at(1)?)?; - let dest = NodeEndpoint::from_rlp(&rlp.at(2)?)?; + let ping_from = NodeEndpoint::from_rlp(&rlp.at(1)?)?; + let ping_to = NodeEndpoint::from_rlp(&rlp.at(2)?)?; let timestamp: u64 = rlp.val_at(3)?; self.check_timestamp(timestamp)?; let mut response = RlpStream::new_list(3); - dest.to_rlp_list(&mut response); + let pong_to = NodeEndpoint { + address: from.clone(), + udp_port: ping_from.udp_port + }; + // Here the PONG's `To` field should be the node we are + // sending the request to + // WARNING: this field _should not be used_, but old Parity versions + // use it in order to get the node's address. + // So this is a temporary fix so that older Parity versions don't brake completely. + ping_to.to_rlp_list(&mut response); + // pong_to.to_rlp_list(&mut response); + response.append(&echo_hash); append_expiration(&mut response); self.send_packet(PACKET_PONG, from, &response.drain())?; - let entry = NodeEntry { id: *node, endpoint: source.clone() }; + let entry = NodeEntry { id: *node_id, endpoint: pong_to.clone() }; if !entry.endpoint.is_valid() { debug!(target: "discovery", "Got bad address: {:?}", entry); } else if !self.is_allowed(&entry) { @@ -469,40 +496,45 @@ impl<'a> Discovery<'a> { } else { self.add_node(entry.clone()); } - Ok(None) } fn on_pong(&mut self, rlp: &Rlp, node_id: &NodeId, from: &SocketAddr) -> Result, Error> { - trace!(target: "discovery", "Got Pong from {:?}", &from); - let dest = NodeEndpoint::from_rlp(&rlp.at(0)?)?; + trace!(target: "discovery", "Got Pong from {:?} ; node_id={:#x}", &from, node_id); + let _pong_to = NodeEndpoint::from_rlp(&rlp.at(0)?)?; let echo_hash: H256 = rlp.val_at(1)?; let timestamp: u64 = rlp.val_at(2)?; self.check_timestamp(timestamp)?; - let mut node = NodeEntry { id: *node_id, endpoint: dest }; - if !node.endpoint.is_valid() { - debug!(target: "discovery", "Bad address: {:?}", node); - node.endpoint.address = *from; - } - let is_expected = match self.in_flight_requests.entry(*node_id) { + let expected_node = match self.in_flight_pings.entry(*node_id) { Entry::Occupied(entry) => { - let is_expected = { + let expected_node = { let request = entry.get(); - request.packet_id == PACKET_PING && request.packet_hash == echo_hash + if request.echo_hash != echo_hash && request.deprecated_echo_hash != echo_hash { + debug!(target: "discovery", "Got unexpected Pong from {:?} ; packet_hash={:#x} ; expected_hash={:#x}", &from, request.echo_hash, echo_hash); + None + } else { + if request.deprecated_echo_hash == echo_hash { + trace!(target: "discovery", "Got Pong from an old parity-ethereum version."); + } + Some(request.node.clone()) + } }; - if is_expected { + + if expected_node.is_some() { entry.remove(); } - is_expected + expected_node + }, + Entry::Vacant(_) => { + None }, - Entry::Vacant(_) => false }; - if is_expected { + if let Some(node) = expected_node { Ok(self.update_node(node)) } else { - debug!(target: "discovery", "Got unexpected Pong from {:?}", &from); + debug!(target: "discovery", "Got unexpected Pong from {:?} ; request not found", &from); Ok(None) } } @@ -544,29 +576,32 @@ impl<'a> Discovery<'a> { fn on_neighbours(&mut self, rlp: &Rlp, node_id: &NodeId, from: &SocketAddr) -> Result, Error> { let results_count = rlp.at(0)?.item_count()?; - let is_expected = match self.in_flight_requests.entry(*node_id) { + let is_expected = match self.in_flight_find_nodes.entry(*node_id) { Entry::Occupied(mut entry) => { - let result = { + let expected = { let request = entry.get_mut(); - if request.packet_id == PACKET_FIND_NODE && - request.response_count + results_count <= BUCKET_SIZE - { + // Mark the request as answered + request.answered = true; + if request.response_count + results_count <= BUCKET_SIZE { request.response_count += results_count; true } else { + debug!(target: "discovery", "Got unexpected Neighbors from {:?} ; oversized packet ({} + {}) node_id={:#x}", &from, request.response_count, results_count, node_id); false } }; if entry.get().response_count == BUCKET_SIZE { entry.remove(); } - result + expected } - Entry::Vacant(_) => false, + Entry::Vacant(_) => { + debug!(target: "discovery", "Got unexpected Neighbors from {:?} ; couldn't find node_id={:#x}", &from, node_id); + false + }, }; if !is_expected { - debug!(target: "discovery", "Got unexpected Neighbors from {:?}", &from); return Ok(None); } @@ -591,65 +626,74 @@ impl<'a> Discovery<'a> { Ok(None) } - fn check_expired(&mut self, time: Instant) -> HashSet { - let mut removed: HashSet = HashSet::new(); - while let Some((node_id, sent_at)) = self.expiring_pings.pop_front() { - if time.duration_since(sent_at) <= PING_TIMEOUT { - self.expiring_pings.push_front((node_id, sent_at)); - break; + fn check_expired(&mut self, time: Instant) { + let mut nodes_to_expire = Vec::new(); + self.in_flight_pings.retain(|node_id, ping_request| { + if time.duration_since(ping_request.sent_at) > PING_TIMEOUT { + debug!(target: "discovery", "Removing expired PING request for node_id={:#x}", node_id); + nodes_to_expire.push(*node_id); + false + } else { + true } - self.expire_in_flight_request(node_id, sent_at, &mut removed); - } - while let Some((node_id, sent_at)) = self.expiring_finds.pop_front() { - if time.duration_since(sent_at) <= FIND_NODE_TIMEOUT { - self.expiring_finds.push_front((node_id, sent_at)); - break; + }); + self.in_flight_find_nodes.retain(|node_id, find_node_request| { + if time.duration_since(find_node_request.sent_at) > FIND_NODE_TIMEOUT { + if !find_node_request.answered { + debug!(target: "discovery", "Removing expired FIND NODE request for node_id={:#x}", node_id); + nodes_to_expire.push(*node_id); + } + false + } else { + true } - self.expire_in_flight_request(node_id, sent_at, &mut removed); + }); + for node_id in nodes_to_expire { + self.expire_node_request(node_id); } - removed } - fn expire_in_flight_request(&mut self, node_id: NodeId, sent_at: Instant, removed: &mut HashSet) { - if let Entry::Occupied(entry) = self.in_flight_requests.entry(node_id) { - if entry.get().sent_at == sent_at { - entry.remove(); - - // Attempt to remove from bucket if in one. - let id_hash = keccak(&node_id); - let dist = Discovery::distance(&self.id_hash, &id_hash) - .expect("distance is None only if id hashes are equal; will never send request to self; qed"); - let bucket = &mut self.node_buckets[dist]; - if let Some(index) = bucket.nodes.iter().position(|n| n.id_hash == id_hash) { - if bucket.nodes[index].fail_count < self.request_backoff.len() { - let node = &mut bucket.nodes[index]; - node.backoff_until = Instant::now() + self.request_backoff[node.fail_count]; - node.fail_count += 1; - trace!( - target: "discovery", - "Requests to node {:?} timed out {} consecutive time(s)", - &node.address, node.fail_count - ); - } else { - removed.insert(node_id); - let node = bucket.nodes.remove(index).expect("index was located in if condition"); - debug!(target: "discovery", "Removed expired node {:?}", &node.address); - } - } + fn expire_node_request(&mut self, node_id: NodeId) { + // Attempt to remove from bucket if in one. + let id_hash = keccak(&node_id); + let dist = Discovery::distance(&self.id_hash, &id_hash) + .expect("distance is None only if id hashes are equal; will never send request to self; qed"); + let bucket = &mut self.node_buckets[dist]; + if let Some(index) = bucket.nodes.iter().position(|n| n.id_hash == id_hash) { + if bucket.nodes[index].fail_count < self.request_backoff.len() { + let node = &mut bucket.nodes[index]; + node.backoff_until = Instant::now() + self.request_backoff[node.fail_count]; + node.fail_count += 1; + trace!( + target: "discovery", + "Requests to node {:?} timed out {} consecutive time(s)", + &node.address, node.fail_count + ); + } else { + let node = bucket.nodes.remove(index).expect("index was located in if condition"); + debug!(target: "discovery", "Removed expired node {:?}", &node.address); } } } - pub fn round(&mut self) -> Option { - let removed = self.check_expired(Instant::now()); - self.discover(); - if !removed.is_empty() { - Some(TableUpdates { added: HashMap::new(), removed }) - } else { None } + + pub fn round(&mut self) { + self.check_expired(Instant::now()); + self.update_new_nodes(); + + if self.discovery_round.is_some() { + self.discover(); + // Start discovering if the first pings have been sent (or timed out) + } else if self.in_flight_pings.len() == 0 && !self.discovery_initiated { + self.discovery_initiated = true; + self.refresh(); + } } pub fn refresh(&mut self) { - self.start(); + if self.discovery_round.is_none() { + self.start(); + } } pub fn any_sends_queued(&self) -> bool { @@ -663,6 +707,16 @@ impl<'a> Discovery<'a> { pub fn requeue_send(&mut self, datagram: Datagram) { self.send_queue.push_front(datagram) } + + /// Add a list of known nodes to the table. + #[cfg(test)] + pub fn init_node_list(&mut self, nodes: Vec) { + for n in nodes { + if self.is_allowed(&n) { + self.update_node(n); + } + } + } } fn append_expiration(rlp: &mut RlpStream) { @@ -738,13 +792,13 @@ mod tests { for i in 1..(MAX_NODES_PING+1) { discovery.add_node(NodeEntry { id: NodeId::random(), endpoint: ep.clone() }); - assert_eq!(discovery.in_flight_requests.len(), i); + assert_eq!(discovery.in_flight_pings.len(), i); assert_eq!(discovery.send_queue.len(), i); assert_eq!(discovery.adding_nodes.len(), 0); } for i in 1..20 { discovery.add_node(NodeEntry { id: NodeId::random(), endpoint: ep.clone() }); - assert_eq!(discovery.in_flight_requests.len(), MAX_NODES_PING); + assert_eq!(discovery.in_flight_pings.len(), MAX_NODES_PING); assert_eq!(discovery.send_queue.len(), MAX_NODES_PING); assert_eq!(discovery.adding_nodes.len(), i); } @@ -821,23 +875,29 @@ mod tests { assert_eq!(total_bucket_nodes(&discovery.node_buckets), 1200); // Requests have not expired yet. - let removed = discovery.check_expired(Instant::now()).len(); + let num_nodes = total_bucket_nodes(&discovery.node_buckets); + discovery.check_expired(Instant::now()); + let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); assert_eq!(removed, 0); // Expiring pings to bucket nodes removes them from bucket. - let removed = discovery.check_expired(Instant::now() + PING_TIMEOUT).len(); + let num_nodes = total_bucket_nodes(&discovery.node_buckets); + discovery.check_expired(Instant::now() + PING_TIMEOUT); + let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); assert!(removed > 0); assert_eq!(total_bucket_nodes(&discovery.node_buckets), 1200 - removed); for _ in 0..100 { discovery.add_node(NodeEntry { id: NodeId::random(), endpoint: ep.clone() }); } - assert!(discovery.in_flight_requests.len() > 0); + assert!(discovery.in_flight_pings.len() > 0); // Expire pings to nodes that are not in buckets. - let removed = discovery.check_expired(Instant::now() + PING_TIMEOUT).len(); + let num_nodes = total_bucket_nodes(&discovery.node_buckets); + discovery.check_expired(Instant::now() + PING_TIMEOUT); + let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); assert_eq!(removed, 0); - assert_eq!(discovery.in_flight_requests.len(), 0); + assert_eq!(discovery.in_flight_pings.len(), 0); let from = SocketAddr::from_str("99.99.99.99:40445").unwrap(); @@ -849,7 +909,9 @@ mod tests { discovery.on_packet(&packet, from.clone()).unwrap(); } - let removed = discovery.check_expired(Instant::now() + FIND_NODE_TIMEOUT).len(); + let num_nodes = total_bucket_nodes(&discovery.node_buckets); + discovery.check_expired(Instant::now() + FIND_NODE_TIMEOUT); + let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); assert!(removed > 0); // FIND_NODE does not time out because it receives k results. @@ -859,7 +921,9 @@ mod tests { discovery.on_packet(&packet, from.clone()).unwrap(); } - let removed = discovery.check_expired(Instant::now() + FIND_NODE_TIMEOUT).len(); + let num_nodes = total_bucket_nodes(&discovery.node_buckets); + discovery.check_expired(Instant::now() + FIND_NODE_TIMEOUT); + let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); assert_eq!(removed, 0); // Test bucket evictions with retries. @@ -868,12 +932,16 @@ mod tests { for _ in 0..2 { discovery.ping(&node_entries[101]).unwrap(); - let removed = discovery.check_expired(Instant::now() + PING_TIMEOUT).len(); + let num_nodes = total_bucket_nodes(&discovery.node_buckets); + discovery.check_expired(Instant::now() + PING_TIMEOUT); + let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); assert_eq!(removed, 0); } discovery.ping(&node_entries[101]).unwrap(); - let removed = discovery.check_expired(Instant::now() + PING_TIMEOUT).len(); + let num_nodes = total_bucket_nodes(&discovery.node_buckets); + discovery.check_expired(Instant::now() + PING_TIMEOUT); + let removed = num_nodes - total_bucket_nodes(&discovery.node_buckets); assert_eq!(removed, 1); } @@ -1066,9 +1134,11 @@ mod tests { assert_eq!(ep1, NodeEndpoint::from_rlp(&rlp.at(1).unwrap()).unwrap()); assert_eq!(ep2, NodeEndpoint::from_rlp(&rlp.at(2).unwrap()).unwrap()); + // `discovery1` should be added to node table on ping received if let Some(_) = discovery2.on_packet(&ping_data.payload, ep1.address.clone()).unwrap() { panic!("Expected no changes to discovery2's table"); } + let pong_data = discovery2.dequeue_send().unwrap(); let data = &pong_data.payload[(32 + 65)..]; assert_eq!(data[0], PACKET_PONG); diff --git a/util/network-devp2p/src/host.rs b/util/network-devp2p/src/host.rs index f16701b6a8b..c43aab4bd8a 100644 --- a/util/network-devp2p/src/host.rs +++ b/util/network-devp2p/src/host.rs @@ -59,8 +59,9 @@ const TCP_ACCEPT: StreamToken = SYS_TIMER + 1; const IDLE: TimerToken = SYS_TIMER + 2; const DISCOVERY: StreamToken = SYS_TIMER + 3; const DISCOVERY_REFRESH: TimerToken = SYS_TIMER + 4; -const DISCOVERY_ROUND: TimerToken = SYS_TIMER + 5; -const NODE_TABLE: TimerToken = SYS_TIMER + 6; +const FAST_DISCOVERY_REFRESH: TimerToken = SYS_TIMER + 5; +const DISCOVERY_ROUND: TimerToken = SYS_TIMER + 6; +const NODE_TABLE: TimerToken = SYS_TIMER + 7; const FIRST_SESSION: StreamToken = 0; const LAST_SESSION: StreamToken = FIRST_SESSION + MAX_SESSIONS - 1; const USER_TIMER: TimerToken = LAST_SESSION + 256; @@ -71,6 +72,8 @@ const SYS_TIMER: TimerToken = LAST_SESSION + 1; const MAINTENANCE_TIMEOUT: Duration = Duration::from_secs(1); // for DISCOVERY_REFRESH TimerToken const DISCOVERY_REFRESH_TIMEOUT: Duration = Duration::from_secs(60); +// for FAST_DISCOVERY_REFRESH TimerToken +const FAST_DISCOVERY_REFRESH_TIMEOUT: Duration = Duration::from_secs(10); // for DISCOVERY_ROUND TimerToken const DISCOVERY_ROUND_TIMEOUT: Duration = Duration::from_millis(300); // for NODE_TABLE TimerToken @@ -478,10 +481,10 @@ impl Host { let socket = UdpSocket::bind(&udp_addr).expect("Error binding UDP socket"); *self.udp_socket.lock() = Some(socket); - discovery.init_node_list(self.nodes.read().entries()); discovery.add_node_list(self.nodes.read().entries()); *self.discovery.lock() = Some(discovery); io.register_stream(DISCOVERY)?; + io.register_timer(FAST_DISCOVERY_REFRESH, FAST_DISCOVERY_REFRESH_TIMEOUT)?; io.register_timer(DISCOVERY_REFRESH, DISCOVERY_REFRESH_TIMEOUT)?; io.register_timer(DISCOVERY_ROUND, DISCOVERY_ROUND_TIMEOUT)?; } @@ -533,6 +536,18 @@ impl Host { } } + fn has_enough_peers(&self) -> bool { + let min_peers = { + let info = self.info.read(); + let config = &info.config; + + config.min_peers + }; + let (_, egress_count, ingress_count) = self.session_count(); + + return egress_count + ingress_count >= min_peers as usize; + } + fn connect_peers(&self, io: &IoContext) { let (min_peers, mut pin, max_handshakes, allow_ips, self_id) = { let info = self.info.read(); @@ -1014,16 +1029,23 @@ impl IoHandler for Host { IDLE => self.maintain_network(io), FIRST_SESSION ... LAST_SESSION => self.connection_timeout(token, io), DISCOVERY_REFRESH => { - if let Some(d) = self.discovery.lock().as_mut() { - d.refresh(); - } + // Run the _slow_ discovery if enough peers are connected + if !self.has_enough_peers() { + return; + } + self.discovery.lock().as_mut().map(|d| d.refresh()); io.update_registration(DISCOVERY).unwrap_or_else(|e| debug!("Error updating discovery registration: {:?}", e)); }, - DISCOVERY_ROUND => { - let node_changes = { self.discovery.lock().as_mut().and_then(|d| d.round()) }; - if let Some(node_changes) = node_changes { - self.update_nodes(io, node_changes); + FAST_DISCOVERY_REFRESH => { + // Run the fast discovery if not enough peers are connected + if self.has_enough_peers() { + return; } + self.discovery.lock().as_mut().map(|d| d.refresh()); + io.update_registration(DISCOVERY).unwrap_or_else(|e| debug!("Error updating discovery registration: {:?}", e)); + }, + DISCOVERY_ROUND => { + self.discovery.lock().as_mut().map(|d| d.round()); io.update_registration(DISCOVERY).unwrap_or_else(|e| debug!("Error updating discovery registration: {:?}", e)); }, NODE_TABLE => { diff --git a/util/network-devp2p/src/node_table.rs b/util/network-devp2p/src/node_table.rs index 7d1380907ba..b48219d8adc 100644 --- a/util/network-devp2p/src/node_table.rs +++ b/util/network-devp2p/src/node_table.rs @@ -385,7 +385,7 @@ impl NodeTable { None => return, }; if let Err(e) = fs::create_dir_all(&path) { - warn!("Error creating node table directory: {:?}", e); + warn!(target: "network", "Error creating node table directory: {:?}", e); return; } path.push(NODES_FILE); @@ -400,11 +400,11 @@ impl NodeTable { match fs::File::create(&path) { Ok(file) => { if let Err(e) = serde_json::to_writer_pretty(file, &table) { - warn!("Error writing node table file: {:?}", e); + warn!(target: "network", "Error writing node table file: {:?}", e); } }, Err(e) => { - warn!("Error creating node table file: {:?}", e); + warn!(target: "network", "Error creating node table file: {:?}", e); } } } @@ -418,7 +418,7 @@ impl NodeTable { let file = match fs::File::open(&path) { Ok(file) => file, Err(e) => { - debug!("Error opening node table file: {:?}", e); + debug!(target: "network", "Error opening node table file: {:?}", e); return Default::default(); }, }; @@ -431,7 +431,7 @@ impl NodeTable { .collect() }, Err(e) => { - warn!("Error reading node table file: {:?}", e); + warn!(target: "network", "Error reading node table file: {:?}", e); Default::default() }, } From bbaac0c6a9a9e4ade59f15a21e5bff050050a9a3 Mon Sep 17 00:00:00 2001 From: gabriel klawitter Date: Sat, 15 Sep 2018 13:43:19 +0200 Subject: [PATCH 4/4] while working on the platform tests make them non-breaking (#9563) * while working on the platform tests make them non-critical * ci: unify test stage job names and torelate more failures * ci: restore valid yaml * ci: allow beta and nightly rust builds to fail --- .gitlab-ci.yml | 56 +++++++++++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6e1de5f20a4..8c794fd6c50 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -49,18 +49,32 @@ cache: - export VERSION - echo "Version = ${VERSION}" - - #### stage: test -test-rust-stable: &test +test-linux-rust-stable: &test stage: test script: - scripts/gitlab/test.sh stable tags: - rust-stable -test-darwin-macos-x86_64: +test-linux-rust-beta: + stage: test + script: + - scripts/gitlab/test.sh beta + tags: + - rust-stable + allow_failure: true + +test-linux-rust-nightly: + stage: test + script: + - scripts/gitlab/test.sh nightly + tags: + - rust-stable + allow_failure: true + +test-darwin-rust-stable: stage: test variables: CARGO_TARGET: x86_64-apple-darwin @@ -70,8 +84,9 @@ test-darwin-macos-x86_64: - scripts/gitlab/test.sh stable tags: - osx + allow_failure: true -test-linux-android-armhf: +test-android-rust-stable: stage: test image: parity/rust-android:gitlab-ci variables: @@ -80,8 +95,9 @@ test-linux-android-armhf: - scripts/gitlab/test.sh stable tags: - rust-arm + allow_failure: true -test-windows-msvc-x86_64: +test-windows-rust-stable: stage: test cache: key: "%CI_JOB_NAME%" @@ -95,10 +111,7 @@ test-windows-msvc-x86_64: - sh scripts/gitlab/test.sh stable tags: - rust-windows - - - - + allow_failure: true .optional_test: &optional_test <<: *test @@ -106,16 +119,6 @@ test-windows-msvc-x86_64: only: - master -test-rust-beta: - <<: *optional_test - script: - - scripts/gitlab/test.sh beta - -test-rust-nightly: - <<: *optional_test - script: - - scripts/gitlab/test.sh nightly - test-lint-rustfmt: <<: *optional_test script: @@ -127,15 +130,11 @@ test-lint-clippy: - scripts/gitlab/clippy.sh test-coverage-kcov: - stage: test - only: - - master + <<: *optional_test script: - scripts/gitlab/coverage.sh tags: - shell - allow_failure: true - #### stage: build @@ -158,6 +157,7 @@ build-linux-ubuntu-i386: CARGO_TARGET: i686-unknown-linux-gnu tags: - rust-i686 + allow_failure: true build-linux-ubuntu-arm64: <<: *build @@ -167,6 +167,7 @@ build-linux-ubuntu-arm64: CARGO_TARGET: aarch64-unknown-linux-gnu tags: - rust-arm + allow_failure: true build-linux-ubuntu-armhf: <<: *build @@ -176,6 +177,7 @@ build-linux-ubuntu-armhf: CARGO_TARGET: armv7-unknown-linux-gnueabihf tags: - rust-arm + allow_failure: true build-linux-android-armhf: stage: build @@ -187,6 +189,7 @@ build-linux-android-armhf: - scripts/gitlab/build-unix.sh tags: - rust-arm + allow_failure: true build-darwin-macos-x86_64: stage: build @@ -218,7 +221,6 @@ build-windows-msvc-x86_64: - rust-windows <<: *collect_artifacts - #### stage: package package-linux-snap-amd64: &package_snap @@ -260,7 +262,6 @@ package-linux-snap-armhf: dependencies: - build-linux-ubuntu-armhf - #### stage: publish publish-linux-snap-amd64: &publish_snap @@ -332,7 +333,6 @@ publish-github-and-s3: tags: - shell - ####stage: docs docs-rpc-json: